在caffe根目录下输入如下命令:
./build/tools/caffe.bin, 得到如下显示
usage:caffe<command><args>
# 这个是告诉你使用格式, caffe 后接上 一个command命令,后面再接其他参数
commands: #你能选择的命令有一下这么几种
train #训练或者微调一个模型
test #对一个模型打分
device—query #显示GPU诊断信息
time #评估模型执行时间
Flags form tools/caffe.cpp #其他一些参数的总览
-gpu (可选;给定时运行GPU模式,用’ , ’分隔开不同的gpu,
‘-gpu all’表示运行在所有可用的gpu设备上,此时有效训练批量大小就是gpu设备数乘以batch_size)
-iterations (循环迭代次数,默认为50)
-level (可选;定义网络水平,也是NetState中的一个,但我也还不清楚这个的作用)
-model (指定模型定义文本文件名,xxx.prototxt)
-phase (可选;网络是处于TEST还是TRAIN阶段,当你使用command中time命令时,再指定phase就可以选择计算TEST或者TRAIN的耗时)
-sighup_effect (可选;当收到SIGHUP信号时要采取的动作,可选项:snapshot、stop、none,默认为snapshot,即打印快照)
-sigint_effect (可选;当收到当收到SIGINT信号时要采取的动作,可选项同上,默认stop)
-snapshot (可选,恢复训练时指定上次中止的快照,就是比如训练到一般按Ctrl+C终止训练(Linux中这个Ctrl+C不是copy,而是终止当前操作),就会得到一个solverstate 文件,下次恢复训练时就可以指定这个)
-solver ( 指定sovler.prototxt文件,在train的时候需要这个参数)
-stage (可选;也是NetState中的一个,但我也还不清楚这个的作用)
-weights ( 指定用于微调的预训练权值,也即 训练后得到的**.caffemodel文件,不可与snapshot同时出现)
注:这个文件的内容有些多,我也只是选择性的阅读并注释了部分。
#ifdef WITH_PYTHON_LAYER #include "boost/python.hpp" namespace bp = boost::python; #endif #include <gflags/gflags.h> #include <glog/logging.h> #include <cstring> #include <map> #include <string> #include <vector> #include "boost/algorithm/string.hpp" #include "caffe/caffe.hpp" #include "caffe/util/signal_handler.h" using caffe::Blob; using caffe::Caffe; using caffe::Net; using caffe::Layer; using caffe::Solver; using caffe::shared_ptr; using caffe::string; using caffe::Timer; using caffe::vector; using std::ostringstream; DEFINE_string(gpu, "", "Optional; run in GPU mode on given device IDs separated by ','." "Use '-gpu all' to run on all available GPUs. The effective training " "batch size is multiplied by the number of devices."); DEFINE_string(solver, "", "The solver definition protocol buffer text file."); DEFINE_string(model, "", "The model definition protocol buffer text file."); DEFINE_string(phase, "", "Optional; network phase (TRAIN or TEST). Only used for 'time'."); DEFINE_int32(level, 0, "Optional; network level."); DEFINE_string(stage, "", "Optional; network stages (not to be confused with phase), " "separated by ','."); DEFINE_string(snapshot, "", "Optional; the snapshot solver state to resume training."); DEFINE_string(weights, "", "Optional; the pretrained weights to initialize finetuning, " "separated by ','. Cannot be set simultaneously with snapshot."); DEFINE_int32(iterations, 50, "The number of iterations to run."); DEFINE_string(sigint_effect, "stop", "Optional; action to take when a SIGINT signal is received: " "snapshot, stop or none."); DEFINE_string(sighup_effect, "snapshot", "Optional; action to take when a SIGHUP signal is received: " "snapshot, stop or none."); // A simple registry for caffe commands. typedef int (*BrewFunction)(); typedef std::map<caffe::string, BrewFunction> BrewMap; BrewMap g_brew_map; #define RegisterBrewFunction(func) \ namespace { \ class __Registerer_##func { \ public: /* NOLINT */ \ __Registerer_##func() { \ g_brew_map[#func] = &func; \ } \ }; \ __Registerer_##func g_registerer_##func; \ } static BrewFunction GetBrewFunction(const caffe::string& name) { if (g_brew_map.count(name)) { return g_brew_map[name]; } else { LOG(ERROR) << "Available caffe actions:"; for (BrewMap::iterator it = g_brew_map.begin(); it != g_brew_map.end(); ++it) { LOG(ERROR) << "\t" << it->first; } LOG(FATAL) << "Unknown action: " << name; return NULL; // not reachable, just to suppress old compiler warnings. } } // Parse GPU ids or use all available devices #解析GPU id,或者使用所有可用的GPU static void get_gpus(vector<int>* gpus) { if (FLAGS_gpu == "all") { int count = 0; #ifndef CPU_ONLY CUDA_CHECK(cudaGetDeviceCount(&count)); #else NO_GPU; #endif for (int i = 0; i < count; ++i) { gpus->push_back(i); } } else if (FLAGS_gpu.size()) { vector<string> strings; boost::split(strings, FLAGS_gpu, boost::is_any_of(",")); for (int i = 0; i < strings.size(); ++i) { gpus->push_back(boost::lexical_cast<int>(strings[i])); } } else { CHECK_EQ(gpus->size(), 0); } } // Parse phase from flags caffe::Phase get_phase_from_flags(caffe::Phase default_value) { if (FLAGS_phase == "") return default_value; if (FLAGS_phase == "TRAIN") return caffe::TRAIN; if (FLAGS_phase == "TEST") return caffe::TEST; LOG(FATAL) << "phase must be \"TRAIN\" or \"TEST\""; return caffe::TRAIN; // Avoid warning } // Parse stages from flags vector<string> get_stages_from_flags() { vector<string> stages; boost::split(stages, FLAGS_stage, boost::is_any_of(",")); return stages; } // caffe commands to call by ##caffe的命令格式 // caffe <command> <args> // // To add a command, define a function "int command()" and register it with // RegisterBrewFunction(action); // Device Query: show diagnostic information for a GPU device. int device_query() { LOG(INFO) << "Querying GPUs " << FLAGS_gpu; vector<int> gpus; get_gpus(&gpus); for (int i = 0; i < gpus.size(); ++i) { caffe::Caffe::SetDevice(gpus[i]); caffe::Caffe::DeviceQuery(); } return 0; } RegisterBrewFunction(device_query); // Load the weights from the specified caffemodel(s) into the train and // test nets. // ##从指定的caffemodel中向训练、预测网络载入训练过的权值。 void CopyLayers(caffe::Solver<float>* solver, const std::string& model_list) { std::vector<std::string> model_names; boost::split(model_names, model_list, boost::is_any_of(",") ); for (int i = 0; i < model_names.size(); ++i) { LOG(INFO) << "Finetuning from " << model_names[i]; solver->net()->CopyTrainedLayersFrom(model_names[i]); for (int j = 0; j < solver->test_nets().size(); ++j) { solver->test_nets()[j]->CopyTrainedLayersFrom(model_names[i]); } } } // Translate the signal effect the user specified on the command-line to the // corresponding enumeration. // ##将用户在命令行上指定的信号效果转换为相应的枚举 caffe::SolverAction::Enum GetRequestedAction( const std::string& flag_value) { if (flag_value == "stop") { return caffe::SolverAction::STOP; } if (flag_value == "snapshot") { return caffe::SolverAction::SNAPSHOT; } if (flag_value == "none") { return caffe::SolverAction::NONE; } LOG(FATAL) << "Invalid signal effect \""<< flag_value << "\" was specified"; return caffe::SolverAction::NONE; } //======================================== 训练/微调 模型 ===========================================// // Train / Finetune a model. int train() { CHECK_GT(FLAGS_solver.size(), 0) << "Need a solver definition to train."; //检查用户是否传入solver文件 CHECK(!FLAGS_snapshot.size() || !FLAGS_weights.size()) //检查参数里面--weights和--snapshot有没有同时出现 << "Give a snapshot to resume training or weights to finetune " //因为--weights是在从头启动训练的时候需要的参数,表示对模型的finetune, "but not both."; //而--snapshot表示的是继续训练模型, 之前暂停了模型训练,现在继续训练 vector<string> stages = get_stages_from_flags(); caffe::SolverParameter solver_param; //###获取并解析用户定义的solver.prototxt caffe::ReadSolverParamsFromTextFileOrDie(FLAGS_solver, &solver_param); solver_param.mutable_train_state()->set_level(FLAGS_level); for (int i = 0; i < stages.size(); i++) { solver_param.mutable_train_state()->add_stage(stages[i]); } // If the gpus flag is not provided, allow the mode and device to be set // in the solver prototxt. //##这一段代码对于gpu的选择很关键,我们已经了解到,可以在输入命令行的时候配置gpu信息,也可以在solver.prototxt中定义GPU信息 //##此时先看命令行中是否设置了gpu,如果没有,再按照solver.prototxt中的描述来, //##如果solver.prototxt中只是选用了gpu而没有指定几号,就默认0号 if (FLAGS_gpu.size() == 0 && solver_param.solver_mode() == caffe::SolverParameter_SolverMode_GPU) { if (solver_param.has_device_id()) { FLAGS_gpu = "" + boost::lexical_cast<string>(solver_param.device_id()); } else { // Set default GPU if unspecified FLAGS_gpu = "" + boost::lexical_cast<string>(0); } } // ##下面这几行在核验gpu检测结果,如果没有gpu信息,那么则使用cpu训练,否则,就开始一些GPU训练的初始化工作 vector<int> gpus; get_gpus(&gpus); if (gpus.size() == 0) { LOG(INFO) << "Use CPU."; Caffe::set_mode(Caffe::CPU); } else { ostringstream s; for (int i = 0; i < gpus.size(); ++i) { s << (i ? ", " : "") << gpus[i]; } LOG(INFO) << "Using GPUs " << s.str(); #ifndef CPU_ONLY cudaDeviceProp device_prop; for (int i = 0; i < gpus.size(); ++i) { cudaGetDeviceProperties(&device_prop, gpus[i]); LOG(INFO) << "GPU " << gpus[i] << ": " << device_prop.name; } #endif solver_param.set_device_id(gpus[0]); Caffe::SetDevice(gpus[0]); Caffe::set_mode(Caffe::GPU); Caffe::set_solver_count(gpus.size()); } caffe::SignalHandler signal_handler( GetRequestedAction(FLAGS_sigint_effect), GetRequestedAction(FLAGS_sighup_effect)); shared_ptr<caffe::Solver<float> > solver(caffe::SolverRegistry<float>::CreateSolver(solver_param)); solver->SetActionFunction(signal_handler.GetActionFunction()); // ##在这里查询了一下用户有没有定义snapshot参数和weights参数,因为如果定义了这两个参数,代表用户可能会希望从之前的 中断训练处继续训练或者借用其他模型初始化网络,caffe在对两个参数相关的内容进行处理时都要用到solver指针 if (FLAGS_snapshot.size()) { LOG(INFO) << "Resuming from " << FLAGS_snapshot; solver->Restore(FLAGS_snapshot.c_str()); } else if (FLAGS_weights.size()) { CopyLayers(solver.get(), FLAGS_weights); } // ##如果有不止一块gpu参与训练,那么将开启多gpu训练模式 if (gpus.size() > 1) { caffe::P2PSync<float> sync(solver, NULL, solver->param()); sync.Run(gpus); } else { LOG(INFO) << "Starting Optimization"; solver->Solve(); //使用Solve()接口正式开始优化网络 } LOG(INFO) << "Optimization Done."; return 0; } RegisterBrewFunction(train); //====================================测试 模型===========================================// // Test: score a model. int test() { //##需要输入model CHECK_GT(FLAGS_model.size(), 0) << "Need a model definition to score."; CHECK_GT(FLAGS_weights.size(), 0) << "Need model weights to score."; vector<string> stages = get_stages_from_flags(); // Set device id and mode // ##设置设备的id和模式,如果没有设置GPU就会默认采用CPU来test,这一点我上篇博文有提到 vector<int> gpus; get_gpus(&gpus); if (gpus.size() != 0) { LOG(INFO) << "Use GPU with device ID " << gpus[0]; #ifndef CPU_ONLY cudaDeviceProp device_prop; cudaGetDeviceProperties(&device_prop, gpus[0]); LOG(INFO) << "GPU device name: " << device_prop.name; #endif Caffe::SetDevice(gpus[0]); Caffe::set_mode(Caffe::GPU); } else { LOG(INFO) << "Use CPU."; Caffe::set_mode(Caffe::CPU); } // Instantiate the caffe net. ##实例化此caffe net Net<float> caffe_net(FLAGS_model, caffe::TEST, FLAGS_level, &stages); caffe_net.CopyTrainedLayersFrom(FLAGS_weights); LOG(INFO) << "Running for " << FLAGS_iterations << " iterations."; vector<int> test_score_output_id; vector<float> test_score; float loss = 0; for (int i = 0; i < FLAGS_iterations; ++i) { float iter_loss; const vector<Blob<float>*>& result = caffe_net.Forward(&iter_loss); loss += iter_loss; int idx = 0; for (int j = 0; j < result.size(); ++j) { const float* result_vec = result[j]->cpu_data(); for (int k = 0; k < result[j]->count(); ++k, ++idx) { const float score = result_vec[k]; if (i == 0) { test_score.push_back(score); test_score_output_id.push_back(j); } else { test_score[idx] += score; } const std::string& output_name = caffe_net.blob_names()[ caffe_net.output_blob_indices()[j]]; LOG(INFO) << "Batch " << i << ", " << output_name << " = " << score; } } } loss /= FLAGS_iterations; LOG(INFO) << "Loss: " << loss; for (int i = 0; i < test_score.size(); ++i) { const std::string& output_name = caffe_net.blob_names()[ caffe_net.output_blob_indices()[test_score_output_id[i]]]; const float loss_weight = caffe_net.blob_loss_weights()[ caffe_net.output_blob_indices()[test_score_output_id[i]]]; std::ostringstream loss_msg_stream; const float mean_score = test_score[i] / FLAGS_iterations; if (loss_weight) { loss_msg_stream << " (* " << loss_weight << " = " << loss_weight * mean_score << " loss)"; } LOG(INFO) << output_name << " = " << mean_score << loss_msg_stream.str(); } return 0; } RegisterBrewFunction(test); //===================================== 计时:评测模型执行时间 =========================================// // Time: benchmark the execution time of a model. int time() { CHECK_GT(FLAGS_model.size(), 0) << "Need a model definition to time."; caffe::Phase phase = get_phase_from_flags(caffe::TRAIN); vector<string> stages = get_stages_from_flags(); // Set device id and mode ##指定设备id和mode,如果没有设置gpu,就会默认采用cpu vector<int> gpus; get_gpus(&gpus); if (gpus.size() != 0) { LOG(INFO) << "Use GPU with device ID " << gpus[0]; Caffe::SetDevice(gpus[0]); Caffe::set_mode(Caffe::GPU); } else { LOG(INFO) << "Use CPU."; Caffe::set_mode(Caffe::CPU); } // Instantiate the caffe net. ##实例化caffe net Net<float> caffe_net(FLAGS_model, phase, FLAGS_level, &stages); // Do a clean forward and backward pass, so that memory allocation are done // and future iterations will be more stable. // ##做一次干净的前向、反向流程,保证完成存储区分配 LOG(INFO) << "Performing Forward"; // Note that for the speed benchmark, we will assume that the network does // not take any input blobs. // ##速度测试,假定网络不需要任何输入Blobs float initial_loss; caffe_net.Forward(&initial_loss); LOG(INFO) << "Initial loss: " << initial_loss; LOG(INFO) << "Performing Backward"; caffe_net.Backward(); const vector<shared_ptr<Layer<float> > >& layers = caffe_net.layers(); const vector<vector<Blob<float>*> >& bottom_vecs = caffe_net.bottom_vecs(); const vector<vector<Blob<float>*> >& top_vecs = caffe_net.top_vecs(); const vector<vector<bool> >& bottom_need_backward = caffe_net.bottom_need_backward(); LOG(INFO) << "*** Benchmark begins ***"; LOG(INFO) << "Testing for " << FLAGS_iterations << " iterations."; Timer total_timer; total_timer.Start(); Timer forward_timer; Timer backward_timer; Timer timer; std::vector<double> forward_time_per_layer(layers.size(), 0.0); std::vector<double> backward_time_per_layer(layers.size(), 0.0); double forward_time = 0.0; double backward_time = 0.0; for (int j = 0; j < FLAGS_iterations; ++j) { Timer iter_timer; iter_timer.Start(); forward_timer.Start(); for (int i = 0; i < layers.size(); ++i) { timer.Start(); layers[i]->Forward(bottom_vecs[i], top_vecs[i]); forward_time_per_layer[i] += timer.MicroSeconds(); } forward_time += forward_timer.MicroSeconds(); backward_timer.Start(); for (int i = layers.size() - 1; i >= 0; --i) { timer.Start(); layers[i]->Backward(top_vecs[i], bottom_need_backward[i], bottom_vecs[i]); backward_time_per_layer[i] += timer.MicroSeconds(); } backward_time += backward_timer.MicroSeconds(); LOG(INFO) << "Iteration: " << j + 1 << " forward-backward time: " << iter_timer.MilliSeconds() << " ms."; } LOG(INFO) << "Average time per layer: "; for (int i = 0; i < layers.size(); ++i) { const caffe::string& layername = layers[i]->layer_param().name(); LOG(INFO) << std::setfill(' ') << std::setw(10) << layername << "\tforward: " << forward_time_per_layer[i] / 1000 / FLAGS_iterations << " ms."; LOG(INFO) << std::setfill(' ') << std::setw(10) << layername << "\tbackward: " << backward_time_per_layer[i] / 1000 / FLAGS_iterations << " ms."; } total_timer.Stop(); LOG(INFO) << "Average Forward pass: " << forward_time / 1000 / FLAGS_iterations << " ms."; LOG(INFO) << "Average Backward pass: " << backward_time / 1000 / FLAGS_iterations << " ms."; LOG(INFO) << "Average Forward-Backward: " << total_timer.MilliSeconds() / FLAGS_iterations << " ms."; LOG(INFO) << "Total Time: " << total_timer.MilliSeconds() << " ms."; LOG(INFO) << "*** Benchmark ends ***"; return 0; } RegisterBrewFunction(time); //================================================ main函数 ======================================================// int main(int argc, char** argv) { // Print output to stderr (while still logging). FLAGS_alsologtostderr = 1; // Set version gflags::SetVersionString(AS_STRING(CAFFE_VERSION)); // Usage message. gflags::SetUsageMessage("command line brew\n" "usage: caffe <command> <args>\n\n" "commands:\n" " train train or finetune a model\n" " test score a model\n" " device_query show GPU diagnostic information\n" " time benchmark model execution time"); // Run tool or show usage. caffe::GlobalInit(&argc, &argv); if (argc == 2) { #ifdef WITH_PYTHON_LAYER try { #endif return GetBrewFunction(caffe::string(argv[1]))(); #ifdef WITH_PYTHON_LAYER } catch (bp::error_already_set) { PyErr_Print(); return 1; } #endif } else { gflags::ShowUsageWithFlagsRestrict(argv[0], "tools/caffe"); } }