在介绍caffe
的基石Blob
之前,我们先看下与Blob
非常相关的在caffe.proto
中定义的BlobProto
对象。
message BlobProto {
optional BlobShape shape = 7;
repeated float data = 5 [packed = true];
repeated float diff = 6 [packed = true];
repeated double double_data = 8 [packed = true];
repeated double double_diff = 9 [packed = true];
// 4D dimensions -- deprecated. Use "shape" instead.
optional int32 num = 1 [default = 0];
optional int32 channels = 2 [default = 0];
optional int32 height = 3 [default = 0];
optional int32 width = 4 [default = 0];
}
从BlobProto
的定义就可以看出这个玩意儿是存储数据单元的,可以看到这个玩意儿其实就是定义一个超立方体,中间包含了数据和差(用来计算残差反传之类的)。那么在google
的protobuffer
官网给出的文档强烈建议不要使用BlobProto
来进行继承,虽然它是一个类,似乎会出什么问题,这一部分有心者去研究一下,,,,
那么我们先来看下Blob
对象中将会使用的SyncedMemory
是个啥吧,
#ifndef CAFFE_SYNCEDMEM_HPP_
#define CAFFE_SYNCEDMEM_HPP_
#include <cstdlib>
#include "caffe/common.hpp"
namespace caffe {
// If CUDA is available and in GPU mode, host memory will be allocated pinned,
// using cudaMallocHost. It avoids dynamic pinning for transfers (DMA).
// The improvement in performance seems negligible in the single GPU case,
// but might be more significant for parallel training. Most importantly,
// it improved stability for large models on many GPUs.
inline void CaffeMallocHost(void** ptr, size_t size, bool* use_cuda) {
#ifndef CPU_ONLY
if (Caffe::mode() == Caffe::GPU) {
CUDA_CHECK(cudaMallocHost(ptr, size));
*use_cuda = true;
return;
}
#endif
*ptr = malloc(size);
*use_cuda = false;
CHECK(*ptr) << "host allocation of size " << size << " failed";
}
inline void CaffeFreeHost(void* ptr, bool use_cuda) {
#ifndef CPU_ONLY
if (use_cuda) {
CUDA_CHECK(cudaFreeHost(ptr));
return;
}
#endif
free(ptr);
}
首先定义malloc
和free
函数,这两个函数似乎是专门为cpu_ptr_
来进行申请内存和释放内存的,cudaMallocHost
函数是在有cuda
情况下推荐使用的主机上的申请内存方法,此方法可以加快数据传输速度。
/**
* @brief Manages memory allocation and synchronization between the host (CPU)
* and device (GPU).
*
* TODO(dox): more thorough description.
*/
class SyncedMemory {
public:
SyncedMemory()
: cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED),
own_cpu_data_(false), cpu_malloc_use_cuda_(false), own_gpu_data_(false),
gpu_device_(-1) {}
explicit SyncedMemory(size_t size)
: cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED),
own_cpu_data_(false), cpu_malloc_use_cuda_(false), own_gpu_data_(false),
gpu_device_(-1) {}
~SyncedMemory();
const void* cpu_data();
void set_cpu_data(void* data);
const void* gpu_data();
void set_gpu_data(void* data);
void* mutable_cpu_data();
void* mutable_gpu_data();
enum SyncedHead { UNINITIALIZED, HEAD_AT_CPU, HEAD_AT_GPU, SYNCED };
SyncedHead head() { return head_; }
size_t size() { return size_; }
#ifndef CPU_ONLY
void async_gpu_push(const cudaStream_t& stream);
#endif
private:
void to_cpu();
void to_gpu();
void* cpu_ptr_;
void* gpu_ptr_;
size_t size_;
SyncedHead head_;
bool own_cpu_data_;
bool cpu_malloc_use_cuda_;
bool own_gpu_data_;
int gpu_device_;
DISABLE_COPY_AND_ASSIGN(SyncedMemory);
}; // class SyncedMemory
} // namespace caffe
#endif // CAFFE_SYNCEDMEM_HPP_
接下来我们来看看cpp
文件中具体的函数实现吧,,
#include "caffe/common.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
SyncedMemory::~SyncedMemory() {
if (cpu_ptr_ && own_cpu_data_) {
CaffeFreeHost(cpu_ptr_, cpu_malloc_use_cuda_);
}
析构函数如果GPU的数据指针不为空,并且own_cpu_data_
这个bool
型的指示标志为true
,那么就要进行释放操作。
#ifndef CPU_ONLY
if (gpu_ptr_ && own_gpu_data_) {
int initial_device;
cudaGetDevice(&initial_device);
if (gpu_device_ != -1) {
CUDA_CHECK(cudaSetDevice(gpu_device_));
}
CUDA_CHECK(cudaFree(gpu_ptr_));
cudaSetDevice(initial_device);
}
#endif // CPU_ONLY
}
这一部分无非是GPU
部分的free
,这些都是CUDA
中的API
,朋友们可以注意学习一下。
inline void SyncedMemory::to_cpu() {
switch (head_) {
case UNINITIALIZED:
CaffeMallocHost(&cpu_ptr_, size_, &cpu_malloc_use_cuda_);
caffe_memset(size_, 0, cpu_ptr_);
head_ = HEAD_AT_CPU;
own_cpu_data_ = true;
break;
这个函数作为作为一个辅助私有成员函数将实现数据在cpu
和gpu
之间的传递,其中有一个指示标志head_
,这很类似于git
中的文件头HEAD
,它指向master
,dev
,github
之类的,它有几种状态:UNINTIALIZED
,HEAD_AT_GPU
,HEAD_AT_CPU
,SYNCED
这几种状态;当内存状态处于UNINITIALIZED
状态时,用内联函数CaffeMallocHost
来申请内存,并且通过caffe_memset
将所有的内存赋值为0;
case HEAD_AT_GPU:
#ifndef CPU_ONLY
if (cpu_ptr_ == NULL) {
CaffeMallocHost(&cpu_ptr_, size_, &cpu_malloc_use_cuda_);
own_cpu_data_ = true;
}
caffe_gpu_memcpy(size_, gpu_ptr_, cpu_ptr_);
head_ = SYNCED;
#else
NO_GPU;
#endif
break;
当文件头位于gpu
上时,通过caffe_gpu_memcpy
函数来进行数据从gpu
到cpu
上的合并,并且将head_
状态转换为SYNCED
的状态;如果在预编译的时候有CPU_ONLY
指令,那么就会报错NO_GPU
,该宏定义在device_alternative.hpp
中定义了。
case HEAD_AT_CPU:
case SYNCED:
break;
}
}
如果head_
指示标志指向HEAD_AT_CPU
或者SYNCED
那么就说明在CPU
上已经有内存中的东西了,并不需要再去进行操作了,因为这个函数的功能毕竟只是to cpu
的功能罢了。。_
inline void SyncedMemory::to_gpu() {
#ifndef CPU_ONLY
switch (head_) {
case UNINITIALIZED:
CUDA_CHECK(cudaGetDevice(&gpu_device_));
CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_));
caffe_gpu_memset(size_, 0, gpu_ptr_);
head_ = HEAD_AT_GPU;
own_gpu_data_ = true;
break;
在整个内存并没有初始化的时候,对gpu
初始化两个步骤,这是cuda
中的api
函数接口决定的,同时使用一个memset
函数对内存初始化为0;
case HEAD_AT_CPU:
if (gpu_ptr_ == NULL) {
CUDA_CHECK(cudaGetDevice(&gpu_device_));
CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_));
own_gpu_data_ = true;
}
caffe_gpu_memcpy(size_, cpu_ptr_, gpu_ptr_);
head_ = SYNCED;
break;
如果文件的头在cpu
上,那么我们用memcpy
将cpu
上的东西复制到gpu
,然后将状态设置为SYNCED
;
case HEAD_AT_GPU:
case SYNCED:
break;
}
#else
NO_GPU;
#endif
}
const void* SyncedMemory::cpu_data() {
to_cpu();
return (const void*)cpu_ptr_;
}
在获取cpu
的数据时,加入限定符const
使得我们拿到的数据是不可更改的,这对于保护数据有着重要的作用。那么我们在获取数据之前需要将数据进行向cpu
的转移,然后拿到这个指针即可。
void SyncedMemory::set_cpu_data(void* data) {
CHECK(data);
if (own_cpu_data_) {
CaffeFreeHost(cpu_ptr_, cpu_malloc_use_cuda_);
}
cpu_ptr_ = data;
head_ = HEAD_AT_CPU;
own_cpu_data_ = false;
}
注意一下程序中所有的CHECK
什么的啊都是glog
中所带的api
,可以用来检测程序中某个变量的值是否是你预想的。首先我们要检查输入的data
是否是空的,如果是空的那么就是报错了;那么我们首先是释放掉原先的cpu
中的内存数据,然后将新的内存的指针指向该输入数据指针,但是这里有一个细节需要注意,own_cpu_data_
等于false
,意味着并不用于cpu
数据,因为这个数据是外部输入的,如果有条件我们可以在外部释放。
const void* SyncedMemory::gpu_data() {
#ifndef CPU_ONLY
to_gpu();
return (const void*)gpu_ptr_;
#else
NO_GPU;
return NULL;
#endif
}
如果我们需要拿到gpu_data
,则需要预编译指令CPU_ONLY
并没有定义,这样才能使用这个gpu
的数据;否则我们将得到NULL
值。
void SyncedMemory::set_gpu_data(void* data) {
#ifndef CPU_ONLY
CHECK(data);
if (own_gpu_data_) {
int initial_device;
cudaGetDevice(&initial_device);
if (gpu_device_ != -1) {
CUDA_CHECK(cudaSetDevice(gpu_device_));
}
CUDA_CHECK(cudaFree(gpu_ptr_));
cudaSetDevice(initial_device);
}
gpu_ptr_ = data;
head_ = HEAD_AT_GPU;
own_gpu_data_ = false;
#else
NO_GPU;
#endif
}
这个函数与cpu
版本相似,也是首先CHECK
数据,然后判断是否own_gpu_data_
,如果是得话,那么我们就要将原先的数据清空,防止内存泄漏,然后将gpu_ptr_
置成data
这个数据指针,同样的我们并不拥有gpu
数据,因为这个数据是从外部进来的。。大概是这个意思。。
void* SyncedMemory::mutable_cpu_data() {
to_cpu();
head_ = HEAD_AT_CPU;
return cpu_ptr_;
}
这个是数据可以编辑版本的指针获取函数。注意这里一个细节,将head_
设置为HEAD_AT_CPU
是很有必要的,因为现在处于编辑状态。将它想象为一个带副本的可编辑文档,想象吧!
void* SyncedMemory::mutable_gpu_data() {
#ifndef CPU_ONLY
to_gpu();
head_ = HEAD_AT_GPU;
return gpu_ptr_;
#else
NO_GPU;
return NULL;
#endif
}
跟cpu
中的是一致的,也要注意一点这里返回的是void *
类型的指针,它可以转化为任意类型的指针,这是有必要的。
#ifndef CPU_ONLY
void SyncedMemory::async_gpu_push(const cudaStream_t& stream) {
CHECK(head_ == HEAD_AT_CPU);
if (gpu_ptr_ == NULL) {
CUDA_CHECK(cudaGetDevice(&gpu_device_));
CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_));
own_gpu_data_ = true;
}
const cudaMemcpyKind put = cudaMemcpyHostToDevice;
CUDA_CHECK(cudaMemcpyAsync(gpu_ptr_, cpu_ptr_, size_, put, stream));
// Assume caller will synchronize on the stream before use
head_ = SYNCED;
}
#endif
} // namespace caffe
好的,我们看完了基石是啥样的再来看看Blob
对象是什么样的了哈哈哈哈哈。。。。
#ifndef CAFFE_BLOB_HPP_
#define CAFFE_BLOB_HPP_
#include <algorithm>
#include <string>
#include <vector>
#include "caffe/common.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/syncedmem.hpp"
const int kMaxBlobAxes = 32;
这个参数规定了Blob
最大维度;
namespace caffe {
/**
* @brief A wrapper around SyncedMemory holders serving as the basic
* computational unit through which Layer%s, Net%s, and Solver%s
* interact.
*
* TODO(dox): more thorough description.
*/
template <typename Dtype>
class Blob {
public:
Blob()
: data_(), diff_(), count_(0), capacity_(0) {}
这个默认的构造函数,恩,,就这样;
/// @brief Deprecated; use <code>Blob(const vector<int>& shape)</code>.
explicit Blob(const int num, const int channels, const int height,
const int width);
这种写法已经不推荐了,,我估计是贾杨清同学当年留下来的?。。_
explicit Blob(const vector<int>& shape);
/// @brief Deprecated; use <code>Reshape(const vector<int>& shape)</code>.
void Reshape(const int num, const int channels, const int height,
const int width);
/**
* @brief Change the dimensions of the blob, allocating new memory if
* necessary.
*
* This function can be called both to create an initial allocation
* of memory, and to adjust the dimensions of a top blob during Layer::Reshape
* or Layer::Forward. When changing the size of blob, memory will only be
* reallocated if sufficient memory does not already exist, and excess memory
* will never be freed.
*
* Note that reshaping an input blob and immediately calling Net::Backward is
* an error; either Net::Forward or Net::Reshape need to be called to
* propagate the new input shape to higher layers.
*/
void Reshape(const vector<int>& shape);
void Reshape(const BlobShape& shape);
void ReshapeLike(const Blob& other);
inline string shape_string() const {
ostringstream stream;
for (int i = 0; i < shape_.size(); ++i) {
stream << shape_[i] << " ";
}
stream << "(" << count_ << ")";
return stream.str();
}
前面几个Reshape
函数我将在后面重点介绍,这个shape_string
函数是一个显示函数,注意这里将函数限定为内联的,这将加快程序的执行速度。
inline const vector<int>& shape() const { return shape_; }
/**
* @brief Returns the dimension of the index-th axis (or the negative index-th
* axis from the end, if index is negative).
*
* @param index the axis index, which may be negative as it will be
* "canonicalized" using CanonicalAxisIndex.
* Dies on out of range index.
*/
inline int shape(int index) const {
return shape_[CanonicalAxisIndex(index)];
}
这两个重载了的函数返回这个Blob
的shape
,其中第二个函数可以返回某个特定维度的长度,使用了CanonicalAxis
,很好很python
。。。
inline int num_axes() const { return shape_.size(); }
inline int count() const { return count_; }
/**
* @brief Compute the volume of a slice; i.e., the product of dimensions
* among a range of axes.
*
* @param start_axis The first axis to include in the slice.
*
* @param end_axis The first axis to exclude from the slice.
*/
inline int count(int start_axis, int end_axis) const {
CHECK_LE(start_axis, end_axis);
CHECK_GE(start_axis, 0);
CHECK_GE(end_axis, 0);
CHECK_LE(start_axis, num_axes());
CHECK_LE(end_axis, num_axes());
int count = 1;
for (int i = start_axis; i < end_axis; ++i) {
count *= shape(i);
}
return count;
}
/**
* @brief Compute the volume of a slice spanning from a particular first
* axis to the final axis.
*
* @param start_axis The first axis to include in the slice.
*/
inline int count(int start_axis) const {
return count(start_axis, num_axes());
}
这四个函数都是计数的,num_axes
计算多少个维度,这个维度不能超过32
,这是宏定义中规定的。三个重载的count
函数都是计算某个维度到某个维度的点数,可以想象成一个高维矩阵,,,
/**
* @brief Returns the 'canonical' version of a (usually) user-specified axis,
* allowing for negative indexing (e.g., -1 for the last axis).
*
* @param axis_index the axis index.
* If 0 <= index < num_axes(), return index.
* If -num_axes <= index <= -1, return (num_axes() - (-index)),
* e.g., the last axis index (num_axes() - 1) if index == -1,
* the second to last if index == -2, etc.
* Dies on out of range index.
*/
inline int CanonicalAxisIndex(int axis_index) const {
CHECK_GE(axis_index, -num_axes())
<< "axis " << axis_index << " out of range for " << num_axes()
<< "-D Blob with shape " << shape_string();
CHECK_LT(axis_index, num_axes())
<< "axis " << axis_index << " out of range for " << num_axes()
<< "-D Blob with shape " << shape_string();
if (axis_index < 0) {
return axis_index + num_axes();
}
return axis_index;
}
这个很类似于matlab
或者python
中的数组index
的约定,如最后一个可以用-1
来表示,就是这样;
/// @brief Deprecated legacy shape accessor num: use shape(0) instead.
inline int num() const { return LegacyShape(0); }
/// @brief Deprecated legacy shape accessor channels: use shape(1) instead.
inline int channels() const { return LegacyShape(1); }
/// @brief Deprecated legacy shape accessor height: use shape(2) instead.
inline int height() const { return LegacyShape(2); }
/// @brief Deprecated legacy shape accessor width: use shape(3) instead.
inline int width() const { return LegacyShape(3); }
inline int LegacyShape(int index) const {
CHECK_LE(num_axes(), 4)
<< "Cannot use legacy accessors on Blobs with > 4 axes.";
CHECK_LT(index, 4);
CHECK_GE(index, -4);
if (index >= num_axes() || index < -num_axes()) {
// Axis is out of range, but still in [0, 3] (or [-4, -1] for reverse
// indexing) -- this special case simulates the one-padding used to fill
// extraneous axes of legacy blobs.
return 1;
}
return shape(index);
}
这几个函数都是废弃了的函数,最好不要使用,caffe
中推荐使用shape()
函数来获取Blob
的维度信息,注意看这个LegacyShape
这个函数有一个if
判断,这个很有意思,如果输入的index
是out of range
,但是num_axes()
并没有超过4
,所以就对高维赋值为1
,这是一个传统哈哈,,,,,,
inline int offset(const int n, const int c = 0, const int h = 0,
const int w = 0) const {
CHECK_GE(n, 0);
CHECK_LE(n, num());
CHECK_GE(channels(), 0);
CHECK_LE(c, channels());
CHECK_GE(height(), 0);
CHECK_LE(h, height());
CHECK_GE(width(), 0);
CHECK_LE(w, width());
return ((n * channels() + c) * height() + h) * width() + w;
}
可以发现glog
中的CHECK
功能真是好用啊,我只能说这么多了
inline int offset(const vector<int>& indices) const {
CHECK_LE(indices.size(), num_axes());
int offset = 0;
for (int i = 0; i < num_axes(); ++i) {
offset *= shape(i);
if (indices.size() > i) {
CHECK_GE(indices[i], 0);
CHECK_LT(indices[i], shape(i));
offset += indices[i];
}
}
return offset;
}
上面这两个offset
函数都其实是辅助函数,下面这个offset
函数使用了vector
作为输入,它的维度可以少于blob
的维度,这种情况下其他维度的坐标当做0
处理,,所以其实有点歧义,推荐使用全部的坐标值,这样感觉意义会更加明确。
/**
* @brief Copy from a source Blob.
*
* @param source the Blob to copy from
* @param copy_diff if false, copy the data; if true, copy the diff
* @param reshape if false, require this Blob to be pre-shaped to the shape
* of other (and die otherwise); if true, Reshape this Blob to other's
* shape if necessary
*/
void CopyFrom(const Blob<Dtype>& source, bool copy_diff = false,
bool reshape = false);
这个CopyFrom
函数待会儿咱们在cpp
文件中进行研究
inline Dtype data_at(const int n, const int c, const int h,
const int w) const {
return cpu_data()[offset(n, c, h, w)];
}
inline Dtype diff_at(const int n, const int c, const int h,
const int w) const {
return cpu_diff()[offset(n, c, h, w)];
}
inline Dtype data_at(const vector<int>& index) const {
return cpu_data()[offset(index)];
}
inline Dtype diff_at(const vector<int>& index) const {
return cpu_diff()[offset(index)];
}
这几个函数都是输出cpu
数据的,单点的
inline const shared_ptr<SyncedMemory>& data() const {
CHECK(data_);
return data_;
}
inline const shared_ptr<SyncedMemory>& diff() const {
CHECK(diff_);
return diff_;
}
这两个函数直接拿到data
和diff
的指针,这就是使用智能指针的好处出来了,不需要进行手动析构,如果是普通指针,我们在拿到它之后可以使用delete
操作,这样就非常不安全了。
const Dtype* cpu_data() const;
void set_cpu_data(Dtype* data);
const int* gpu_shape() const;
const Dtype* gpu_data() const;
const Dtype* cpu_diff() const;
const Dtype* gpu_diff() const;
Dtype* mutable_cpu_data();
Dtype* mutable_gpu_data();
Dtype* mutable_cpu_diff();
Dtype* mutable_gpu_diff();
void Update();
这几个函数都是有关于数据的,包括了mutable
和unmutable
版本的。
void FromProto(const BlobProto& proto, bool reshape = true);
void ToProto(BlobProto* proto, bool write_diff = false) const;
这两个就是非常重要的io
接口了,它连接的是caffe.proto
中定义的BlobProto
类型,它用来存储数据,这非常关键。
/// @brief Compute the sum of absolute values (L1 norm) of the data.
Dtype asum_data() const;
/// @brief Compute the sum of absolute values (L1 norm) of the diff.
Dtype asum_diff() const;
/// @brief Compute the sum of squares (L2 norm squared) of the data.
Dtype sumsq_data() const;
/// @brief Compute the sum of squares (L2 norm squared) of the diff.
Dtype sumsq_diff() const;
/// @brief Scale the blob data by a constant factor.
void scale_data(Dtype scale_factor);
/// @brief Scale the blob diff by a constant factor.
void scale_diff(Dtype scale_factor);
这几个就是数学函数咯,自己看吧
/**
* @brief Set the data_ shared_ptr to point to the SyncedMemory holding the
* data_ of Blob other -- useful in Layer%s which simply perform a copy
* in their Forward pass.
*
* This deallocates the SyncedMemory holding this Blob's data_, as
* shared_ptr calls its destructor when reset with the "=" operator.
*/
void ShareData(const Blob& other);
/**
* @brief Set the diff_ shared_ptr to point to the SyncedMemory holding the
* diff_ of Blob other -- useful in Layer%s which simply perform a copy
* in their Forward pass.
*
* This deallocates the SyncedMemory holding this Blob's diff_, as
* shared_ptr calls its destructor when reset with the "=" operator.
*/
void ShareDiff(const Blob& other);
这两个函数充分利用了boost
开发的智能指针,完美,可以将这个指针指向其他blob
的SyncedMemory
,同时自身的SyncedMemory
就自动析构了,完美!
bool ShapeEquals(const BlobProto& other);
protected:
shared_ptr<SyncedMemory> data_;
shared_ptr<SyncedMemory> diff_;
shared_ptr<SyncedMemory> shape_data_;
vector<int> shape_;
int count_;
int capacity_;
DISABLE_COPY_AND_ASSIGN(Blob);
这个保护成员中使用了boost
中的shared_ptr
,这个玩意儿在c++1X
中已经是标准化了,这样就可以将data_
这些成员当做一个普通的指针来使用,但是又不必担心释放问题
}; // class Blob
} // namespace caffe
#endif // CAFFE_BLOB_HPP_
来来来,看一下最后这个blob.cpp
里面这些没有定义的非内联函数到底是怎么定义的,玛德这个程序500多行,
#include <climits>
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void Blob<Dtype>::Reshape(const int num, const int channels, const int height,
const int width) {
vector<int> shape(4);
shape[0] = num;
shape[1] = channels;
shape[2] = height;
shape[3] = width;
Reshape(shape);
}
所有的Reshape
函数都调用了Reshape( const vector<int> & )
这个函数
template <typename Dtype>
void Blob<Dtype>::Reshape(const vector<int>& shape) {
CHECK_LE(shape.size(), kMaxBlobAxes);
count_ = 1;
shape_.resize(shape.size());
if (!shape_data_ || shape_data_->size() < shape.size() * sizeof(int)) {
shape_data_.reset(new SyncedMemory(shape.size() * sizeof(int)));
}
来细细分析这个函数是怎么实现的,首先我们输入的vector
的长度不能超过32
,这是维度上的极限值,然后保护成员shape_
要进行重新调整尺寸,这利用vector
对象的resize
方法即可实现;下面这个if
语句非常关键,我的理解如下,当我们的shape_data_
这个指针为空的时候,或者它的字节长度小于我们输入的shape
中需要存储的int
类型的维度的长度了。。。也就是说这个shape_data_
指向的SyncedMemory
对象是专门用来存储维度信息的,,然后呢,当这个玩意儿不够长的时候我们需要进行reset
操作。
int* shape_data = static_cast<int*>(shape_data_->mutable_cpu_data());
将shape_data_
中的可操作的cpu
数据指针拿出来;
for (int i = 0; i < shape.size(); ++i) {
CHECK_GE(shape[i], 0);
这个地方使用了CHECK_GE
意味着,当shape[i]
等于0
时,count_
等于0
了,我们申请了size=0
的data_
和diff_
,它们都指向了SyncedMemory
的内存空间,所以~~~不错
CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX";
count_ *= shape[i];
shape_[i] = shape[i];
shape_data[i] = shape[i];
}
if (count_ > capacity_) {
capacity_ = count_;
data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
}
}
在这里使用capacity
和count_
使得我们不需要重复回收内存,释放内存,减少开销。
template <typename Dtype>
void Blob<Dtype>::Reshape(const BlobShape& shape) {
CHECK_LE(shape.dim_size(), kMaxBlobAxes);
vector<int> shape_vec(shape.dim_size());
for (int i = 0; i < shape.dim_size(); ++i) {
shape_vec[i] = shape.dim(i);
}
Reshape(shape_vec);
}
template <typename Dtype>
void Blob<Dtype>::ReshapeLike(const Blob<Dtype>& other) {
Reshape(other.shape());
}
注意这里在使用BlobShape
的时候,使用了protobuffer
的东西,注意它的求size
的方法
template <typename Dtype>
Blob<Dtype>::Blob(const int num, const int channels, const int height,
const int width)
// capacity_ must be initialized before calling Reshape
: capacity_(0) {
Reshape(num, channels, height, width);
}
template <typename Dtype>
Blob<Dtype>::Blob(const vector<int>& shape)
// capacity_ must be initialized before calling Reshape
: capacity_(0) {
Reshape(shape);
}
两个构造函数,将capacity
设置为为0
,然后进行Reshape
,哈哈哈哈,
template <typename Dtype>
const int* Blob<Dtype>::gpu_shape() const {
CHECK(shape_data_);
return (const int*)shape_data_->gpu_data();
}
template <typename Dtype>
const Dtype* Blob<Dtype>::cpu_data() const {
CHECK(data_);
return (const Dtype*)data_->cpu_data();
}
template <typename Dtype>
void Blob<Dtype>::set_cpu_data(Dtype* data) {
CHECK(data);
data_->set_cpu_data(data);
}
template <typename Dtype>
const Dtype* Blob<Dtype>::gpu_data() const {
CHECK(data_);
return (const Dtype*)data_->gpu_data();
}
template <typename Dtype>
const Dtype* Blob<Dtype>::cpu_diff() const {
CHECK(diff_);
return (const Dtype*)diff_->cpu_data();
}
template <typename Dtype>
const Dtype* Blob<Dtype>::gpu_diff() const {
CHECK(diff_);
return (const Dtype*)diff_->gpu_data();
}
template <typename Dtype>
Dtype* Blob<Dtype>::mutable_cpu_data() {
CHECK(data_);
return static_cast<Dtype*>(data_->mutable_cpu_data());
}
template <typename Dtype>
Dtype* Blob<Dtype>::mutable_gpu_data() {
CHECK(data_);
return static_cast<Dtype*>(data_->mutable_gpu_data());
}
template <typename Dtype>
Dtype* Blob<Dtype>::mutable_cpu_diff() {
CHECK(diff_);
return static_cast<Dtype*>(diff_->mutable_cpu_data());
}
template <typename Dtype>
Dtype* Blob<Dtype>::mutable_gpu_diff() {
CHECK(diff_);
return static_cast<Dtype*>(diff_->mutable_gpu_data());
}
上面几个接口函数都是获取blob
中的data
或者diff
数据,因为它们都是SyncedMemory
类型的智能指针,所以直接使用它们的类方法来获取数据指针。
template <typename Dtype>
void Blob<Dtype>::ShareData(const Blob& other) {
CHECK_EQ(count_, other.count());
data_ = other.data();
}
template <typename Dtype>
void Blob<Dtype>::ShareDiff(const Blob& other) {
CHECK_EQ(count_, other.count());
diff_ = other.diff();
}
两个blob
能共享数据的前提是两个blob
具有相同的count_
值,shared_ptr_
在这个时候就体现出了优势,并不需要使用new
来申请内存,它自动帮我们完成了这一切。
// The "update" method is used for parameter blobs in a Net, which are stored
// as Blob<float> or Blob<double> -- hence we do not define it for
// Blob<int> or Blob<unsigned int>.
template <> void Blob<unsigned int>::Update() { NOT_IMPLEMENTED; }
template <> void Blob<int>::Update() { NOT_IMPLEMENTED; }
template <typename Dtype>
void Blob<Dtype>::Update() {
// We will perform update based on where the data is located.
switch (data_->head()) {
case SyncedMemory::HEAD_AT_CPU:
// perform computation on CPU
caffe_axpy<Dtype>(count_, Dtype(-1),
static_cast<const Dtype*>(diff_->cpu_data()),
static_cast<Dtype*>(data_->mutable_cpu_data()));
break;
case SyncedMemory::HEAD_AT_GPU:
case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
// perform computation on GPU
caffe_gpu_axpy<Dtype>(count_, Dtype(-1),
static_cast<const Dtype*>(diff_->gpu_data()),
static_cast<Dtype*>(data_->mutable_gpu_data()));
#else
NO_GPU;
#endif
break;
default:
LOG(FATAL) << "Syncedmem not initialized.";
}
}
计算参数blob
的update
步骤,因为参数只能是浮点型,所以在开头将<int>
和<unsigned int>
类型设置为宏定义NOT_IMPLEMENTED
,在这里面进行的计算是data - diff
;
template <> unsigned int Blob<unsigned int>::asum_data() const {
NOT_IMPLEMENTED;
return 0;
}
template <> int Blob<int>::asum_data() const {
NOT_IMPLEMENTED;
return 0;
}
template <typename Dtype>
Dtype Blob<Dtype>::asum_data() const {
if (!data_) { return 0; }
首先对data_
这个智能指针进行判断,如果它为空,则不用费神去计算了,它计算出来的就是0
咯,
switch (data_->head()) {
case SyncedMemory::HEAD_AT_CPU:
return caffe_cpu_asum(count_, cpu_data());
case SyncedMemory::HEAD_AT_GPU:
case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
{
Dtype asum;
caffe_gpu_asum(count_, gpu_data(), &asum);
return asum;
}
#else
NO_GPU;
#endif
case SyncedMemory::UNINITIALIZED:
return 0;
default:
LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
}
return 0;
}
可以看到这里面head
信息是非常重要的,同时要注意到cpu
和gpu
版本的计算asum
函数并不相同,gpu
版本需要输入一个Dtype
的引用变量,通过这个引用变量来输出计算结果
template <> unsigned int Blob<unsigned int>::asum_diff() const {
NOT_IMPLEMENTED;
return 0;
}
template <> int Blob<int>::asum_diff() const {
NOT_IMPLEMENTED;
return 0;
}
template <typename Dtype>
Dtype Blob<Dtype>::asum_diff() const {
if (!diff_) { return 0; }
switch (diff_->head()) {
case SyncedMemory::HEAD_AT_CPU:
return caffe_cpu_asum(count_, cpu_diff());
case SyncedMemory::HEAD_AT_GPU:
case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
{
Dtype asum;
caffe_gpu_asum(count_, gpu_diff(), &asum);
return asum;
}
#else
NO_GPU;
#endif
case SyncedMemory::UNINITIALIZED:
return 0;
default:
LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head();
}
return 0;
}
diff
的操作与data
是一模一样的,都是这个逻辑。
template <> unsigned int Blob<unsigned int>::sumsq_data() const {
NOT_IMPLEMENTED;
return 0;
}
template <> int Blob<int>::sumsq_data() const {
NOT_IMPLEMENTED;
return 0;
}
template <typename Dtype>
Dtype Blob<Dtype>::sumsq_data() const {
Dtype sumsq;
const Dtype* data;
if (!data_) { return 0; }
switch (data_->head()) {
case SyncedMemory::HEAD_AT_CPU:
data = cpu_data();
sumsq = caffe_cpu_dot(count_, data, data);
break;
case SyncedMemory::HEAD_AT_GPU:
case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
data = gpu_data();
caffe_gpu_dot(count_, data, data, &sumsq);
#else
NO_GPU;
#endif
break;
case SyncedMemory::UNINITIALIZED:
return 0;
default:
LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
}
return sumsq;
}
template <> unsigned int Blob<unsigned int>::sumsq_diff() const {
NOT_IMPLEMENTED;
return 0;
}
template <> int Blob<int>::sumsq_diff() const {
NOT_IMPLEMENTED;
return 0;
}
template <typename Dtype>
Dtype Blob<Dtype>::sumsq_diff() const {
Dtype sumsq;
const Dtype* diff;
if (!diff_) { return 0; }
switch (diff_->head()) {
case SyncedMemory::HEAD_AT_CPU:
diff = cpu_diff();
sumsq = caffe_cpu_dot(count_, diff, diff);
break;
case SyncedMemory::HEAD_AT_GPU:
case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
diff = gpu_diff();
caffe_gpu_dot(count_, diff, diff, &sumsq);
break;
#else
NO_GPU;
#endif
case SyncedMemory::UNINITIALIZED:
return 0;
default:
LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
}
return sumsq;
}
data
和diff
的sumsq
的计算与之前是一样的,这是L2
范数,在计算误差时是需要用到的
template <> void Blob<unsigned int>::scale_data(unsigned int scale_factor) {
NOT_IMPLEMENTED;
}
template <> void Blob<int>::scale_data(int scale_factor) {
NOT_IMPLEMENTED;
}
template <typename Dtype>
void Blob<Dtype>::scale_data(Dtype scale_factor) {
Dtype* data;
if (!data_) { return; }
switch (data_->head()) {
case SyncedMemory::HEAD_AT_CPU:
data = mutable_cpu_data();
caffe_scal(count_, scale_factor, data);
return;
case SyncedMemory::HEAD_AT_GPU:
case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
data = mutable_gpu_data();
caffe_gpu_scal(count_, scale_factor, data);
return;
#else
NO_GPU;
#endif
case SyncedMemory::UNINITIALIZED:
return;
default:
LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
}
}
template <> void Blob<unsigned int>::scale_diff(unsigned int scale_factor) {
NOT_IMPLEMENTED;
}
template <> void Blob<int>::scale_diff(int scale_factor) {
NOT_IMPLEMENTED;
}
template <typename Dtype>
void Blob<Dtype>::scale_diff(Dtype scale_factor) {
Dtype* diff;
if (!diff_) { return; }
switch (diff_->head()) {
case SyncedMemory::HEAD_AT_CPU:
diff = mutable_cpu_diff();
caffe_scal(count_, scale_factor, diff);
return;
case SyncedMemory::HEAD_AT_GPU:
case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
diff = mutable_gpu_diff();
caffe_gpu_scal(count_, scale_factor, diff);
return;
#else
NO_GPU;
#endif
case SyncedMemory::UNINITIALIZED:
return;
default:
LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head();
}
}
这两个函数是将data
与diff
进行scale
,这个函数是有必要的,比如我们在输入图像时可能需要将dataBlob
进行缩放,可能从0-255
缩小到0-1
,这样就需要乘以一个比例因子,这在MNIST
的数据使用中就用到了这个技巧。
template <typename Dtype>
bool Blob<Dtype>::ShapeEquals(const BlobProto& other) {
if (other.has_num() || other.has_channels() ||
other.has_height() || other.has_width()) {
// Using deprecated 4D Blob dimensions --
// shape is (num, channels, height, width).
// Note: we do not use the normal Blob::num(), Blob::channels(), etc.
// methods as these index from the beginning of the blob shape, where legacy
// parameter blobs were indexed from the end of the blob shape (e.g., bias
// Blob shape (1 x 1 x 1 x N), IP layer weight Blob shape (1 x 1 x M x N)).
return shape_.size() <= 4 &&
LegacyShape(-4) == other.num() &&
LegacyShape(-3) == other.channels() &&
LegacyShape(-2) == other.height() &&
LegacyShape(-1) == other.width();
}
vector<int> other_shape(other.shape().dim_size());
for (int i = 0; i < other.shape().dim_size(); ++i) {
other_shape[i] = other.shape().dim(i);
}
return shape_ == other_shape;
}
这一段理解较为困难,不过想象一下,对于bias
只有一个维度的情况下,其他维度都为1
,其实是首先给出了width
而不是num
,对吧,,
template <typename Dtype>
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
if (source.count() != count_ || source.shape() != shape_) {
if (reshape) {
ReshapeLike(source);
} else {
LOG(FATAL) << "Trying to copy blobs of different sizes.";
}
}
switch (Caffe::mode()) {
case Caffe::GPU:
if (copy_diff) {
caffe_copy(count_, source.gpu_diff(),
static_cast<Dtype*>(diff_->mutable_gpu_data()));
} else {
caffe_copy(count_, source.gpu_data(),
static_cast<Dtype*>(data_->mutable_gpu_data()));
}
break;
case Caffe::CPU:
if (copy_diff) {
caffe_copy(count_, source.cpu_diff(),
static_cast<Dtype*>(diff_->mutable_cpu_data()));
} else {
caffe_copy(count_, source.cpu_data(),
static_cast<Dtype*>(data_->mutable_cpu_data()));
}
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
}
从一个现有的Blob
中复制data
或者diff
,可以设置reshape
为true
,这样会根据source blob
的形状来重新为blob
设置内存信息。
template <typename Dtype>
void Blob<Dtype>::FromProto(const BlobProto& proto, bool reshape) {
if (reshape) {
vector<int> shape;
if (proto.has_num() || proto.has_channels() ||
proto.has_height() || proto.has_width()) {
// Using deprecated 4D Blob dimensions --
// shape is (num, channels, height, width).
shape.resize(4);
shape[0] = proto.num();
shape[1] = proto.channels();
shape[2] = proto.height();
shape[3] = proto.width();
} else {
shape.resize(proto.shape().dim_size());
for (int i = 0; i < proto.shape().dim_size(); ++i) {
shape[i] = proto.shape().dim(i);
}
}
Reshape(shape);
} else {
CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)";
}
从一个BlobProto
中复制数据第一重要的就是shape
要匹配,如果并不匹配,那就需要reshape
,否则会报错,
// copy data
Dtype* data_vec = mutable_cpu_data();
if (proto.double_data_size() > 0) {
CHECK_EQ(count_, proto.double_data_size());
for (int i = 0; i < count_; ++i) {
data_vec[i] = proto.double_data(i);
}
} else {
CHECK_EQ(count_, proto.data_size());
for (int i = 0; i < count_; ++i) {
data_vec[i] = proto.data(i);
}
}
if (proto.double_diff_size() > 0) {
CHECK_EQ(count_, proto.double_diff_size());
Dtype* diff_vec = mutable_cpu_diff();
for (int i = 0; i < count_; ++i) {
diff_vec[i] = proto.double_diff(i);
}
} else if (proto.diff_size() > 0) {
CHECK_EQ(count_, proto.diff_size());
Dtype* diff_vec = mutable_cpu_diff();
for (int i = 0; i < count_; ++i) {
diff_vec[i] = proto.diff(i);
}
}
}
复制数据的时候优先复制double
类型的数据,如果没有,那就复制float
类型的低精度的数据,这是非常合理的,同时包括diff
也要复制。
template <>
void Blob<double>::ToProto(BlobProto* proto, bool write_diff) const {
proto->clear_shape();
for (int i = 0; i < shape_.size(); ++i) {
proto->mutable_shape()->add_dim(shape_[i]);
}
proto->clear_double_data();
proto->clear_double_diff();
const double* data_vec = cpu_data();
for (int i = 0; i < count_; ++i) {
proto->add_double_data(data_vec[i]);
}
if (write_diff) {
const double* diff_vec = cpu_diff();
for (int i = 0; i < count_; ++i) {
proto->add_double_diff(diff_vec[i]);
}
}
}
template <>
void Blob<float>::ToProto(BlobProto* proto, bool write_diff) const {
proto->clear_shape();
for (int i = 0; i < shape_.size(); ++i) {
proto->mutable_shape()->add_dim(shape_[i]);
}
proto->clear_data();
proto->clear_diff();
const float* data_vec = cpu_data();
for (int i = 0; i < count_; ++i) {
proto->add_data(data_vec[i]);
}
if (write_diff) {
const float* diff_vec = cpu_diff();
for (int i = 0; i < count_; ++i) {
proto->add_diff(diff_vec[i]);
}
}
}
在往BlobProto
中写入数据时,这里实现了两个类型的模板函数的具体实现,double
和float
类型,这里主要是写两个东西,一个是shape
,注意首先使用了protobuffer
中提供的clear()
函数来将shape
中的数据清空,然后使用add_dim()
方法来加入数据,当然首先是需要通过mutable_shape()
方法来拿到可以操作的指针咯,,,这里面具体要参考protobuffer
的内容
INSTANTIATE_CLASS(Blob);
给出float
和double
类型的具体化,通过一个#define
,学习这种方式;
template class Blob<int>;
template class Blob<unsigned int>;
} // namespace caffe