原文链接:http://blog.csdn.net/xizero00/article/details/50914471
一、Layer的作用简介
Layer实际上定义了Layer的基本操作,即初始化层、前向传播和反向传播。在前向传播中根据bottom blob得到top blob,反向传播则根据top反传到bottom。而且在前传的时候还可以计算loss,一般来说只有最后一层才会计算loss,虽然每个层都有计算loss的功能。Layer类在没有实现GPU前传和反传的时候会自动使用CPU的实现。下面给出Layer类的具体介绍。
下面给出生成的一幅图,感性地了解一下Layer的层次。
二、Layer类的详细介绍
1)构造函数
构造函数初始化层的参数,并且设置当前层是否可以共享(如果是数据层则可以共享数据给多个网络)
这里的blobs_的定义是 vector<shared_ptr<Blob<Dtype> > > blobs_;也就是说它是是blob指针类型的容器。
[cpp]
view plain
copy
explicit Layer(
const LayerParameter& param) : layer_param_(param), is_shared_(
false) { phase_ = param.phase();
if (layer_param_.blobs_size() > 0) { blobs_.resize(layer_param_.blobs_size());
for (
int i = 0; i < layer_param_.blobs_size(); ++i) { blobs_[i].reset(
new Blob<Dtype>()); blobs_[i]->FromProto(layer_param_.blobs(i)); } } }
2)成员变量
保护性的成员变量:
[cpp]
view plain
copy
LayerParameter layer_param_; Phase phase_; vector<shared_ptr<Blob<Dtype> > > blobs_; vector<
bool> param_propagate_down_; vector<Dtype> loss_;
私有的成员变量:
[cpp]
view plain
copy
bool is_shared_; shared_ptr<boost::mutex> forward_mutex_;
3)成员函数
3-1非内联函数:
[cpp]
view plain
copy
void InitMutex();
void Lock();
void Unlock();
3-2内联函数:
[cpp]
view plain
copy
inline bool IsShared()
const {
return is_shared_; }
inline void SetShared(
bool is_shared) { CHECK(ShareInParallel() || !is_shared) << type() <<
"Layer does not support sharing."; is_shared_ = is_shared; }
inline Dtype Forward(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
inline void Backward(
const vector<Blob<Dtype>*>& top,
const vector<
bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom);
inline Dtype loss(
const int top_index)
const {
return (loss_.size() > top_index) ? loss_[top_index] : Dtype(0); }
inline void set_loss(
const int top_index,
const Dtype value) {
if (loss_.size() <= top_index) { loss_.resize(top_index + 1, Dtype(0)); } loss_[top_index] = value; }
inline bool param_propagate_down(
const int param_id) {
return (param_propagate_down_.size() > param_id) ? param_propagate_down_[param_id] :
false; }
inline void set_param_propagate_down(
const int param_id,
const bool value) {
if (param_propagate_down_.size() <= param_id) { param_propagate_down_.resize(param_id + 1,
true); } param_propagate_down_[param_id] = value; }
inline void SetLossWeights(
const vector<Blob<Dtype>*>& top) {
const int num_loss_weights = layer_param_.loss_weight_size();
if (num_loss_weights) { CHECK_EQ(top.size(), num_loss_weights) <<
"loss_weight must be " "unspecified or specified once per top blob.";
for (
int top_id = 0; top_id < top.size(); ++top_id) {
const Dtype loss_weight = layer_param_.loss_weight(top_id);
if (loss_weight == Dtype(0)) {
continue; }
this->set_loss(top_id, loss_weight);
const int count = top[top_id]->count(); Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff(); caffe_set(count, loss_weight, loss_multiplier); } } }
3-3类内的函数:
[cpp]
view plain
copy
void SetUp(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) { InitMutex(); CheckBlobCounts(bottom, top); LayerSetUp(bottom, top); Reshape(bottom, top); SetLossWeights(top); } vector<shared_ptr<Blob<Dtype> > >& blobs() {
return blobs_; }
const LayerParameter& layer_param()
const {
return layer_param_; }
3-4虚函数(纯虚函数是必须要实现的!!):
[cpp]
view plain
copy
virtual void LayerSetUp(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {}
virtual inline bool ShareInParallel()
const {
return false; }
virtual void Reshape(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) = 0;
virtual void ToProto(LayerParameter* param,
bool write_diff =
false);
virtual inline const char* type()
const {
return ""; }
virtual inline int ExactNumBottomBlobs()
const {
return -1; }
virtual inline int MinBottomBlobs()
const {
return -1; }
virtual inline int MaxBottomBlobs()
const {
return -1; }
virtual inline int ExactNumTopBlobs()
const {
return -1; }
virtual inline int MinTopBlobs()
const {
return -1; }
virtual inline int MaxTopBlobs()
const {
return -1; }
virtual inline bool EqualNumBottomTopBlobs()
const {
return false; }
virtual inline bool AutoTopBlobs()
const {
return false; }
virtual inline bool AllowForceBackward(
const int bottom_index)
const {
return true; }
virtual void Forward_cpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) = 0;
virtual void Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
return Forward_cpu(bottom, top); }
virtual void Backward_cpu(
const vector<Blob<Dtype>*>& top,
const vector<
bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) = 0;
virtual void Backward_gpu(
const vector<Blob<Dtype>*>& top,
const vector<
bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) { Backward_cpu(top, propagate_down, bottom); }
virtual void CheckBlobCounts(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (ExactNumBottomBlobs() >= 0) { CHECK_EQ(ExactNumBottomBlobs(), bottom.size()) << type() <<
" Layer takes " << ExactNumBottomBlobs() <<
" bottom blob(s) as input."; }
if (MinBottomBlobs() >= 0) { CHECK_LE(MinBottomBlobs(), bottom.size()) << type() <<
" Layer takes at least " << MinBottomBlobs() <<
" bottom blob(s) as input."; }
if (MaxBottomBlobs() >= 0) { CHECK_GE(MaxBottomBlobs(), bottom.size()) << type() <<
" Layer takes at most " << MaxBottomBlobs() <<
" bottom blob(s) as input."; }
if (ExactNumTopBlobs() >= 0) { CHECK_EQ(ExactNumTopBlobs(), top.size()) << type() <<
" Layer produces " << ExactNumTopBlobs() <<
" top blob(s) as output."; }
if (MinTopBlobs() >= 0) { CHECK_LE(MinTopBlobs(), top.size()) << type() <<
" Layer produces at least " << MinTopBlobs() <<
" top blob(s) as output."; }
if (MaxTopBlobs() >= 0) { CHECK_GE(MaxTopBlobs(), top.size()) << type() <<
" Layer produces at most " << MaxTopBlobs() <<
" top blob(s) as output."; }
if (EqualNumBottomTopBlobs()) { CHECK_EQ(bottom.size(), top.size()) << type() <<
" Layer produces one top blob as output for each " <<
"bottom blob input."; } }
其中的一些函数的具体实现如下:
主要就是前传和反传,前传调用对应的Forward_cpu或者Forward_gpu
而我们知道Forward_cpu是纯虚函数,必须要实现而Forward_gpu是虚函数,如果不实现就调用
Forward_cpu函数了。
前传(你必须实现自己的Forward_cpu,实现Forward_gpu是可选的)
[cpp]
view plain
copy
template <
typename Dtype>
inline Dtype Layer<Dtype>::Forward(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) { Lock(); Dtype loss = 0; Reshape(bottom, top);
switch (Caffe::mode()) {
case Caffe::CPU: Forward_cpu(bottom, top);
for (
int top_id = 0; top_id < top.size(); ++top_id) {
if (!
this->loss(top_id)) {
continue; }
const int count = top[top_id]->count();
const Dtype* data = top[top_id]->cpu_data();
const Dtype* loss_weights = top[top_id]->cpu_diff(); loss += caffe_cpu_dot(count, data, loss_weights); }
break;
case Caffe::GPU: Forward_gpu(bottom, top);
#ifndef CPU_ONLY for (
int top_id = 0; top_id < top.size(); ++top_id) {
if (!
this->loss(top_id)) {
continue; }
const int count = top[top_id]->count();
const Dtype* data = top[top_id]->gpu_data();
const Dtype* loss_weights = top[top_id]->gpu_diff(); Dtype blob_loss = 0; caffe_gpu_dot(count, data, loss_weights, &blob_loss); loss += blob_loss; }
#endif break;
default: LOG(FATAL) <<
"Unknown caffe mode."; } Unlock();
return loss; }
反传的道理与前传的道理很类似
[cpp]
view plain
copy
template <
typename Dtype>
inline void Layer<Dtype>::Backward(
const vector<Blob<Dtype>*>& top,
const vector<
bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
switch (Caffe::mode()) {
case Caffe::CPU: Backward_cpu(top, propagate_down, bottom);
break;
case Caffe::GPU: Backward_gpu(top, propagate_down, bottom);
break;
default: LOG(FATAL) <<
"Unknown caffe mode."; } }
template <
typename Dtype>
void Layer<Dtype>::ToProto(LayerParameter* param,
bool write_diff) { param->Clear(); param->CopyFrom(layer_param_); param->clear_blobs();
for (
int i = 0; i < blobs_.size(); ++i) { blobs_[i]->ToProto(param->add_blobs(), write_diff); } } 其他部分的实现:
template <
typename Dtype>
void Layer<Dtype>::InitMutex() { forward_mutex_.reset(
new boost::mutex()); }
template <
typename Dtype>
void Layer<Dtype>::Lock() {
if (IsShared()) { forward_mutex_->lock(); } }
template <
typename Dtype>
void Layer<Dtype>::Unlock() {
if (IsShared()) { forward_mutex_->unlock(); } }
三、与Layer类相关类的介绍
(1)用到了device_alternate.hpp
这其中只是定义了一些检查CUDA是否运行成功的函数、还有就是定义了几个宏
下面对其进行介绍:
[cpp]
view plain
copy
#define STUB_GPU(classname) \ template <
typename Dtype> \
void classname<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, \
const vector<Blob<Dtype>*>& top) { NO_GPU; } \
template <
typename Dtype> \
void classname<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, \
const vector<
bool>& propagate_down, \
const vector<Blob<Dtype>*>& bottom) { NO_GPU; } \
#define STUB_GPU_FORWARD(classname, funcname) \ template <
typename Dtype> \
void classname<Dtype>::funcname##_##gpu(
const vector<Blob<Dtype>*>& bottom, \
const vector<Blob<Dtype>*>& top) { NO_GPU; } \
#define STUB_GPU_BACKWARD(classname, funcname) \ template <
typename Dtype> \
void classname<Dtype>::funcname##_##gpu(
const vector<Blob<Dtype>*>& top, \
const vector<
bool>& propagate_down, \
const vector<Blob<Dtype>*>& bottom) { NO_GPU; } \
CUDA检查的宏:
[cpp]
view plain
copy
#define CUDA_CHECK(condition) \ \
do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) <<
" " << cudaGetErrorString(error); \ }
while (0)
#define CUBLAS_CHECK(condition) \ do { \ cublasStatus_t status = condition; \ CHECK_EQ(status, CUBLAS_STATUS_SUCCESS) <<
" " \ << caffe::cublasGetErrorString(status); \ }
while (0)
#define CURAND_CHECK(condition) \ do { \ curandStatus_t status = condition; \ CHECK_EQ(status, CURAND_STATUS_SUCCESS) <<
" " \ << caffe::curandGetErrorString(status); \ }
while (0)
#define CUDA_KERNEL_LOOP(i, n) \ for (
int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x)
四、总结
Layer的设计主要就是SetUp、Forward、Backward函数(层一开始的时候的设置、然后就是前传和反传)
这其中的SetUp的实现又依赖于CheckBlobCounts、LayerSetUp、Reshape等的实现。这其中Reshape又是必须要实现的,因为它是纯虚函数
这其中的Forward中又依赖于Forward_cpu、Forward_gpu,这其中Forward_cpu又是必须要实现的。
这其中的Backward中又依赖于Backward_cpu、Backward_gpu,这其中Backward_cpu 又是必须要实现的。
参考:
你可能需要了解一下多层感知机的前向传播和反向传播。
具体可以参考UFLDL的相关知识。