Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

upgrading matlab interface by adding new methods to matlab class Net #4996

Closed
wants to merge 17 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 14 additions & 4 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -192,12 +192,12 @@ ifeq ($(USE_LMDB), 1)
LIBRARIES += lmdb
endif
ifeq ($(USE_OPENCV), 1)
LIBRARIES += opencv_core opencv_highgui opencv_imgproc
LIBRARIES += opencv_core opencv_highgui opencv_imgproc

ifeq ($(OPENCV_VERSION), 3)
LIBRARIES += opencv_imgcodecs
endif

endif
PYTHON_LIBRARIES ?= boost_python python2.7
WARNINGS := -Wall -Wno-sign-compare
Expand Down Expand Up @@ -385,7 +385,7 @@ else
XCODE_CLT_GEQ_7 := $(shell [ $(XCODE_CLT_VER) -gt 6 ] && echo 1)
XCODE_CLT_GEQ_6 := $(shell [ $(XCODE_CLT_VER) -gt 5 ] && echo 1)
ifeq ($(XCODE_CLT_GEQ_7), 1)
BLAS_INCLUDE ?= /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/System/Library/Frameworks/Accelerate.framework/Versions/A/Frameworks/vecLib.framework/Versions/A/Headers
BLAS_INCLUDE ?= /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/$(shell ls /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/ | sort | tail -1)/System/Library/Frameworks/Accelerate.framework/Versions/A/Frameworks/vecLib.framework/Versions/A/Headers
else ifeq ($(XCODE_CLT_GEQ_6), 1)
BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/
LDFLAGS += -framework Accelerate
Expand All @@ -409,6 +409,14 @@ CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS)
NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS)
# mex may invoke an older gcc that is too liberal with -Wuninitalized
MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized
ifneq ($(CPU_ONLY), 1)
# required for compiling with support for mxGPUArray
MATLAB_INCLUDE := -I$(MATLAB_DIR)/toolbox/distcomp/gpu/extern/include
MATLAB_LIBS := -lmwgpu
else
MATLAB_INCLUDE :=
MATLAB_LIBS :=
endif
LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS)

USE_PKG_CONFIG ?= 0
Expand Down Expand Up @@ -517,7 +525,9 @@ $(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME)
$(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \
CXX="$(CXX)" \
CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \
CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@
INCLUDE="\$$INCLUDE $(MATLAB_INCLUDE)" \
CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS) $(MATLAB_LIBS)" \
-output $@
@ if [ -f "$(PROJECT)_.d" ]; then \
mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \
fi
Expand Down
7 changes: 7 additions & 0 deletions include/caffe/layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -316,6 +316,13 @@ class Layer {
param_propagate_down_[param_id] = value;
}

/**
* @brief set phase
* enable train and test with one network, for saving memory
*/
virtual inline void set_phase(Phase phase) {
phase_ = phase;
}

protected:
/** The protobuf that stores the layer parameters */
Expand Down
16 changes: 16 additions & 0 deletions include/caffe/layers/sigmoid_cross_entropy_loss_layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,13 @@ class SigmoidCrossEntropyLossLayer : public LossLayer<Dtype> {
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);

/// Read the normalization mode parameter and compute the normalizer based
/// on the blob size. If normalization_mode is VALID, the count of valid
/// outputs will be read from valid_count, unless it is -1 in which case
/// all outputs are assumed to be valid.
virtual Dtype get_normalizer(
LossParameter_NormalizationMode normalization_mode, int valid_count);

/// The internal SigmoidLayer used to map predictions to probabilities.
shared_ptr<SigmoidLayer<Dtype> > sigmoid_layer_;
/// sigmoid_output stores the output of the SigmoidLayer.
Expand All @@ -105,6 +112,15 @@ class SigmoidCrossEntropyLossLayer : public LossLayer<Dtype> {
vector<Blob<Dtype>*> sigmoid_bottom_vec_;
/// top vector holder to call the underlying SigmoidLayer::Forward
vector<Blob<Dtype>*> sigmoid_top_vec_;

/// Whether to ignore instances with a certain label.
bool has_ignore_label_;
/// The label indicating that an instance should be ignored.
int ignore_label_;
/// How to normalize the loss.
LossParameter_NormalizationMode normalization_;
Dtype normalizer_;
int outer_num_, inner_num_;
};

} // namespace caffe
Expand Down
12 changes: 12 additions & 0 deletions include/caffe/net.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@ class Net {
/// @brief Initialize a network with a NetParameter.
void Init(const NetParameter& param);

/// @brief set phase
/// enable train and test with one network, for saving memory
void SetPhase(Phase phase);

/**
* @brief Run Forward and return the result.
*
Expand Down Expand Up @@ -150,6 +154,14 @@ class Net {
inline const vector<vector<Blob<Dtype>*> >& top_vecs() const {
return top_vecs_;
}
/// @brief returns the bottom vecs ids for each layer
inline const vector<vector<int> >& bottom_id_vecs() const {
return bottom_id_vecs_;
}
/// @brief returns the top vecs ids for each layer
inline const vector<vector<int> >& top_id_vecs() const {
return top_id_vecs_;
}
/// @brief returns the ids of the top blobs of layer i
inline const vector<int> & top_ids(int i) const {
CHECK_GE(i, 0) << "Invalid layer id";
Expand Down
4 changes: 3 additions & 1 deletion include/caffe/solver.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ class Solver {
return test_nets_;
}
int iter() { return iter_; }

int max_iter() const { return param_.max_iter(); }
// Invoked at specific points during an iteration
class Callback {
protected:
Expand Down Expand Up @@ -108,6 +108,8 @@ class Solver {
virtual void RestoreSolverStateFromBinaryProto(const string& state_file) = 0;
void DisplayOutputBlobs(const int net_id);
void UpdateSmoothedLoss(Dtype loss, int start_iter, int average_loss);
/// Harmonize solver class type with configured proto type.
void CheckType(SolverParameter* param);

SolverParameter param_;
int iter_;
Expand Down
3 changes: 3 additions & 0 deletions matlab/+caffe/Layer.m
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@
self.params(n) = caffe.Blob(self.attributes.hBlob_blobs(n));
end
end
function set_params_data(self, blob_index, params)
caffe.Blob(self.attributes.hBlob_blobs(blob_index)).set_data(params);
end
function layer_type = type(self)
layer_type = caffe_('layer_get_type', self.hLayer_self);
end
Expand Down
60 changes: 60 additions & 0 deletions matlab/+caffe/Net.m
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
name2blob_index
layer_names
blob_names
bottom_id_vecs
top_id_vecs
end

methods
Expand Down Expand Up @@ -67,6 +69,23 @@
% expose layer_names and blob_names for public read access
self.layer_names = self.attributes.layer_names;
self.blob_names = self.attributes.blob_names;

% expose bottom_id_vecs and top_id_vecs for public read access
self.attributes.bottom_id_vecs = cellfun(@(x) x+1, self.attributes.bottom_id_vecs, 'UniformOutput', false);
self.bottom_id_vecs = self.attributes.bottom_id_vecs;
self.attributes.top_id_vecs = cellfun(@(x) x+1, self.attributes.top_id_vecs, 'UniformOutput', false);
self.top_id_vecs = self.attributes.top_id_vecs;
end
function set_phase(self, phase_name)
CHECK(ischar(phase_name), 'phase_name must be a string');
CHECK(strcmp(phase_name, 'train') || strcmp(phase_name, 'test'), ...
sprintf('phase_name can only be %strain%s or %stest%s', ...
char(39), char(39), char(39), char(39)));
caffe_('net_set_phase', self.hNet_self, phase_name);
end
function share_weights_with(self, net)
CHECK(is_valid_handle(net.hNet_net), 'invalid Net handle');
caffe_('net_share_trained_layers_with', self.hNet_net, net.hNet_net);
end
function layer = layers(self, layer_name)
CHECK(ischar(layer_name), 'layer_name must be a string');
Expand All @@ -81,18 +100,43 @@
CHECK(isscalar(blob_index), 'blob_index must be a scalar');
blob = self.layer_vec(self.name2layer_index(layer_name)).params(blob_index);
end
function set_params_data(self, layer_name, blob_index, data)
CHECK(ischar(layer_name), 'layer_name must be a string');
CHECK(isscalar(blob_index), 'blob_index must be a scalar');
self.layer_vec(self.name2layer_index(layer_name)).set_params_data(blob_index, data);
end
function forward_prefilled(self)
caffe_('net_forward', self.hNet_self);
end
function backward_prefilled(self)
caffe_('net_backward', self.hNet_self);
end
function set_input_data(self, input_data)
CHECK(iscell(input_data), 'input_data must be a cell array');
CHECK(length(input_data) == length(self.inputs), ...
'input data cell length must match input blob number');
% copy data to input blobs
for n = 1:length(self.inputs)
self.blobs(self.inputs{n}).set_data(input_data{n});
end
end
function res = get_output(self)
% get onput blobs
res = struct('blob_name', '', 'data', []);
for n = 1:length(self.outputs)
res(n).blob_name = self.outputs{n};
res(n).data = self.blobs(self.outputs{n}).get_data();
end
end
function res = forward(self, input_data)
CHECK(iscell(input_data), 'input_data must be a cell array');
CHECK(length(input_data) == length(self.inputs), ...
'input data cell length must match input blob number');
% copy data to input blobs
for n = 1:length(self.inputs)
if isempty(input_data{n})
continue;
end
self.blobs(self.inputs{n}).set_data(input_data{n});
end
self.forward_prefilled();
Expand Down Expand Up @@ -122,9 +166,25 @@ function copy_from(self, weights_file)
CHECK_FILE_EXIST(weights_file);
caffe_('net_copy_from', self.hNet_self, weights_file);
end

function reshape(self)
caffe_('net_reshape', self.hNet_self);
end
function reshape_as_input(self, input_data)
CHECK(iscell(input_data), 'input_data must be a cell array');
CHECK(length(input_data) == length(self.inputs), ...
'input data cell length must match input blob number');
% reshape input blobs
for n = 1:length(self.inputs)
if isempty(input_data{n})
continue;
end
input_data_size = size(input_data{n});
input_data_size_extended = [input_data_size, ones(1, 4 - length(input_data_size))];
self.blobs(self.inputs{n}).reshape(input_data_size_extended);
end
self.reshape();
end
function save(self, weights_file)
CHECK(ischar(weights_file), 'weights_file must be a string');
caffe_('net_save', self.hNet_self, weights_file);
Expand Down
3 changes: 3 additions & 0 deletions matlab/+caffe/Solver.m
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,9 @@
function iter = iter(self)
iter = caffe_('solver_get_iter', self.hSolver_self);
end
function max_iter = max_iter(self)
max_iter = caffe_('solver_get_max_iter', self.hSolver_self);
end
function restore(self, snapshot_filename)
CHECK(ischar(snapshot_filename), 'snapshot_filename must be a string');
CHECK_FILE_EXIST(snapshot_filename);
Expand Down
15 changes: 15 additions & 0 deletions matlab/+caffe/init_log.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
function init_log(log_base_filename)
% init_log(log_base_filename)
% init Caffe's log

CHECK(ischar(log_base_filename) && ~isempty(log_base_filename), ...
'log_base_filename must be string');

[log_base_dir] = fileparts(log_base_filename);
if ~exist(log_base_dir, 'dir')
mkdir(log_base_dir);
end

caffe_('init_log', log_base_filename);

end
Loading