Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Merge pull request #3 from dmlc/master
Browse files Browse the repository at this point in the history
Update dev branch
  • Loading branch information
antinucleon committed Jul 1, 2015
2 parents b55212f + 480604f commit a8f9ee3
Show file tree
Hide file tree
Showing 28 changed files with 1,488 additions and 99 deletions.
11 changes: 11 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,14 @@
dmlc-core
mshadow
config.mk

*.pyc
.Rhistory
*log
Debug
*suo

# vim
*.swp
*.swo
*.swn
19 changes: 14 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -48,20 +48,23 @@ endif

BIN = test/api_registry_test
OBJ = storage.o narray_op_cpu.o operator.o operator_cpu.o
OBJCXX11 = engine.o narray.o mxnet_api.o api_registry.o
# add threaded engine after it is done
OBJCXX11 = engine.o narray.o mxnet_api.o api_registry.o engine.o
CUOBJ = narray_op_gpu.o operator_gpu.o

SLIB = api/libmxnet.so
ALIB = api/libmxnet.a
LIB_DEP = $(DMLC_CORE)/libdmlc.a

.PHONY: clean all

all: $(OBJ) $(OBJCXX11) $(CUOBJ) $(BIN)
all: $(ALIB) $(SLIB) $(BIN)

$(DMLC_CORE)/libdmlc.a:
+ cd $(DMLC_CORE); make libdmlc.a config=$(ROOTDIR)/$(config); cd $(ROOTDIR)

storage.o: src/storage/storage.cc
engine.o: src/dag_engine/simple_engine.cc
threaded_engine.o: src/dag_engine/threaded_engine.cc src/common/concurrent_blocking_queue.h src/common/spin_lock.h
narray.o: src/narray/narray.cc
narray_op_cpu.o: src/narray/narray_op_cpu.cc src/narray/narray_op-inl.h
narray_op_gpu.o: src/narray/narray_op_gpu.cu src/narray/narray_op-inl.h
Expand All @@ -71,7 +74,10 @@ operator_gpu.o: src/operator/operator_gpu.cu
api_registry.o: src/api_registry.cc
mxnet_api.o: api/mxnet_api.cc

test/api_registry_test: test/api_registry_test.cc $(OBJ) $(OBJCXX11) $(CUOBJ)
api/libmxnet.a: $(OBJ) $(OBJCXX11) $(CUOBJ)
api/libmxnet.so: $(OBJ) $(OBJCXX11) $(CUOBJ)

test/api_registry_test: test/api_registry_test.cc api/libmxnet.a

$(BIN) :
$(CXX) $(CFLAGS) -std=c++11 -o $@ $(filter %.cpp %.o %.c %.a %.cc, $^) $(LDFLAGS)
Expand All @@ -85,12 +91,15 @@ $(OBJCXX11) :
$(SLIB) :
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.cpp %.o %.c %.a %.cc, $^) $(LDFLAGS)

$(ALIB):
ar cr $@ $+

$(CUOBJ) :
$(NVCC) -c -o $@ $(NVCCFLAGS) -Xcompiler "$(CFLAGS)" $(filter %.cu, $^)

$(CUBIN) :
$(NVCC) -o $@ $(NVCCFLAGS) -Xcompiler "$(CFLAGS)" -Xlinker "$(LDFLAGS)" $(filter %.cu %.cpp %.o, $^)

clean:
$(RM) $(OBJ) $(OBJCXX11) $(BIN) $(CUBIN) $(CUOBJ) $(SLIB) *~ */*~ */*/*~
$(RM) $(OBJ) $(OBJCXX11) $(BIN) $(CUBIN) $(CUOBJ) $(SLIB) $(ALIB) *~ */*~ */*/*~ */*/*/*~
cd $(DMLC_CORE); make clean; cd -
7 changes: 3 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,16 +1,15 @@
# MXNet
This is an experimental project to put cxxnet and minerva together, nothing is working yet.
This is a project that combines lessons and ideas we learnt from [cxxnet](https://github.com/dmlc/cxxnet), [minerva](https://github.com/dmlc/minerva) and [purine2](https://github.com/purine/purine2).
- The interface is designed in collaboration by authors of three projects.
- Nothing is yet working

# Guidelines
* Use google c style
* Put module header in [include](include)
- move them to ```project-name/include``` when we finalized the name
* Depend on [dmlc-core](https://github.com/dmlc/dmlc-core)
* Doxygen comment every function, class and variable for the module headers
- Ref headers in [dmlc-core/include](https://github.com/dmlc/dmlc-core/tree/master/include/dmlc)
- Use the same style as dmlc-core
* Try write some use-cases of interface in [test](test)
- They do not need to link, but need to pass compile
* Minimize dependency, if possible only depend on dmlc-core
* Macro Guard CXX11 code by
- Try to make interface compile when c++11 was not avaialable(but with some functionalities pieces missing)
Expand Down
187 changes: 187 additions & 0 deletions api/mxnet_api.cc
Original file line number Diff line number Diff line change
@@ -1,4 +1,191 @@
#include <dmlc/base.h>
#include <dmlc/logging.h>
#include <mxnet/base.h>
#include <mxnet/narray.h>
#include <mxnet/api_registry.h>
#include "./mxnet_api.h"

// NOTE: all functions return 0 upon success
// consider add try/catch block for user error
// handling in the future
using namespace mxnet;

// macro to guard beginning and end section of all functions
// every function starts with API_BEGIN(); and finishes with API_END();
#define API_BEGIN() try {
#define API_END() } catch(dmlc::Error &e) { return MXHandleException(e); } return 0;

/*!
* \brief a helper function for error handling
* will set the last error to be str_set when it is not NULL
* \param str_set the error to set
* \return a pointer message to last error
*/
const char *MXSetGetLastError_(const char *str_set) {
// use last_error to record last error
static thread_local std::string last_error;
if (str_set != NULL) {
last_error = str_set;
}
return last_error.c_str();
}

/*! \brief return str message of the last error */
const char *MXGetLastError() {
return MXSetGetLastError_(NULL);
}

/*!
* \brief handle exception throwed out
* \param e the exception
* \return the return value of API after exception is handled
*/
int MXHandleException(const dmlc::Error &e) {
MXSetGetLastError_(e.what());
return -1;
}

// NOTE: return value is added in API_END
int MXNArrayCreateNone(NArrayHandle *out) {
API_BEGIN();
*out = new NArray();
API_END();
}

int MXNArrayCreateShareMem(mx_float *data,
mx_uint *shape,
mx_uint ndim,
NArrayHandle *out) {
API_BEGIN();
*out = new NArray(TBlob(data, TShape(shape, shape + ndim),
cpu::kDevMask), 0);
API_END();
}

int MXNArrayCreate(const mx_uint *shape,
mx_uint ndim,
int dev_mask,
int dev_id,
int delay_alloc,
NArrayHandle *out) {
API_BEGIN();
*out = new NArray(TShape(shape, shape + ndim),
Context(dev_mask, dev_id),
delay_alloc != 0);
API_END();
}

int MXNArrayWait(NArrayHandle handle) {
API_BEGIN();
static_cast<NArray*>(handle)->Wait();
API_END();
}

int MXNArrayWaitAll() {
API_BEGIN();
DAGEngine::Get()->WaitForAll();
API_END();
}

int MXNArrayFree(NArrayHandle handle) {
API_BEGIN();
delete static_cast<NArray*>(handle);
API_END();
}

int MXNArrayGetShape(NArrayHandle handle,
mx_uint *out_dim,
const mx_uint **out_pdata) {
API_BEGIN();
NArray *arr = static_cast<NArray*>(handle);
if (!arr->is_none()) {
const TShape &s = arr->shape();
*out_dim = s.ndim();
*out_pdata = s.data();
} else {
*out_dim = 0;
}
API_END();
}

int MXNArrayGetData(NArrayHandle handle,
mx_float **out_pdata) {
API_BEGIN();
NArray *arr = static_cast<NArray*>(handle);
if (!arr->is_none()) {
CHECK(arr->ctx().dev_mask == cpu::kDevMask)
<< "MXNArrayGetData can only be called for NArray on CPU";
const TBlob &b = arr->data();
CHECK(b.CheckContiguous());
*out_pdata = b.FlatTo2D<cpu, mx_float>().dptr_;
} else {
*out_pdata = nullptr;
}
API_END();
}

int MXNArrayGetContext(NArrayHandle handle,
int *out_dev_mask,
int *out_dev_id) {
API_BEGIN();
NArray *arr = static_cast<NArray*>(handle);
if (!arr->is_none()) {
const Context &ctx = arr->ctx();
*out_dev_mask = ctx.dev_mask;
*out_dev_id = ctx.dev_id;
} else {
*out_dev_mask = 0;
*out_dev_id = 0;
}
API_END();
}

int MXListFunctions(mx_uint *out_size,
FunctionHandle **out_array) {
API_BEGIN();
auto &vec = FunctionRegistry::List();
*out_size = static_cast<mx_uint>(vec.size());
*out_array = (FunctionHandle*)(dmlc::BeginPtr(vec));
API_END();
}

int MXGetFunction(const char *name,
FunctionHandle *out) {
API_BEGIN();
*out = FunctionRegistry::Find(name);
API_END();
}

int MXFuncGetName(FunctionHandle fun,
const char **out_name) {
API_BEGIN();
auto *f = static_cast<const FunctionRegistry::Entry *>(fun);
*out_name = f->name.c_str();
API_END();
}

int MXFuncDescribe(FunctionHandle fun,
mx_uint *num_use_vars,
mx_uint *num_scalars,
mx_uint *num_mutate_vars,
int *type_mask) {
API_BEGIN();
auto *f = static_cast<const FunctionRegistry::Entry *>(fun);
*num_use_vars = f->num_use_vars;
*num_scalars = f->num_scalars;
*num_mutate_vars = f->num_mutate_vars;
*type_mask = f->type_mask;
API_END();
}

int MXFuncInvoke(FunctionHandle fun,
NArrayHandle *use_vars,
mx_float *scalar_args,
NArrayHandle *mutate_vars) {
API_BEGIN();
auto *f = static_cast<const FunctionRegistry::Entry *>(fun);
(*f)((NArray**)(use_vars),
scalar_args,
(NArray**)(mutate_vars));
API_END();
}
41 changes: 29 additions & 12 deletions api/mxnet_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,24 @@ typedef float mx_float;
/*! \brief handle to NArray */
typedef void *NArrayHandle;
/*! \brief handle to a mxnet narray function that changes NArray */
typedef void *FunctionHandle;
typedef const void *FunctionHandle;
/*! \brief handle to a symbol that can be bind as operator */
typedef void *SymbolHandle;
/*! \brief handle to a NArrayOperator */
typedef void *OperatorHandle;
/*! \brief handle to a DataIterator */
typedef void *DataIterHandle;

/*!
* \brief return str message of the last error
* all function in this file will return 0 when success
* and -1 when an error occured,
* MXGetLastError can be called to retrieve the error
*
* this function is threadsafe and can be called by different thread
*/
MXNET_DLL const char *MXGetLastError();

//--------------------------------
// Part 1: NArray creation and deletion
//--------------------------------
Expand Down Expand Up @@ -71,13 +81,16 @@ MXNET_DLL int MXNArrayCreateShareMem(mx_float *data,
* \param ndim the dimension of the shape
* \param dev_mask device mask, specify device we want to take
* \param dev_id the device id of the specific device
* \param delay_alloc whether to delay allocation until
* the narray is first mutated
* \param out the returning handle
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXNArrayCreate(const mx_uint *shape,
mx_uint ndim,
int dev_mask,
int dev_id,
int delay_alloc,
NArrayHandle *out);
/*!
* \brief wait until all the operation with respect NArray
Expand Down Expand Up @@ -105,25 +118,27 @@ MXNET_DLL int MXNArrayFree(NArrayHandle handle);
* \param out_pdata pointer holder to get data pointer of the shape
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXNArrayGetShape(NArrayHandle *handle,
MXNET_DLL int MXNArrayGetShape(NArrayHandle handle,
mx_uint *out_dim,
mx_uint **out_pdata);
const mx_uint **out_pdata);
/*!
* \brief get the content of the data in NArray
* \param handle the handle to the narray
* \param out_pdata pointer holder to get pointer of data
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXNArrayGetData(NArrayHandle *handle,
MXNET_DLL int MXNArrayGetData(NArrayHandle handle,
mx_float **out_pdata);
/*!
* \brief get the device of the NArray
* \brief get the context of the NArray
* \param handle the handle to the narray
* \param out_device the output device mask
* \param out_dev_mask the output device mask
* \param out_dev_id the output device id
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXNArrayGetDevice(NArrayHandle *handle,
int *out_device);
MXNET_DLL int MXNArrayGetContext(NArrayHandle handle,
int *out_dev_mask,
int *out_dev_id);

//--------------------------------
// Part 2: functions on NArray
Expand Down Expand Up @@ -158,13 +173,15 @@ MXNET_DLL int MXFuncGetName(FunctionHandle fun,
* \param num_use_vars how many NArrays to be passed in as used_vars
* \param num_scalars scalar variable is needed
* \param num_mutate_vars how many NArrays to be passed in as mutate_vars
* \param type_mask the type mask of this function
* \return 0 when success, -1 when failure happens
* \sa MXFuncInvoke
*/
MXNET_DLL int MXFuncDescribeArgs(FunctionHandle fun,
mx_uint *num_use_vars,
mx_uint *num_scalars,
mx_uint *num_mutate_vars);
MXNET_DLL int MXFuncDescribe(FunctionHandle fun,
mx_uint *num_use_vars,
mx_uint *num_scalars,
mx_uint *num_mutate_vars,
int *type_mask);

/*!
* \brief invoke a function, the array size of passed in arguments
Expand Down
17 changes: 17 additions & 0 deletions api/python/mxnet/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/usr/bin/env python
# coding: utf-8
"""MXNet: a concise, fast and flexible framework for deep learning
MXNet is a project that evolves from cxxnet, minerva and purine2.
The interface is designed in collaboration by authors of three projects.
Version : 0.10
"""
from __future__ import absolute_import

from .context import Context, current_context
from .narray import NArray, _init_function_registry
from .function import _FunctionRegistry

# this is a global function registry that can be used to invoke functions
op = _init_function_registry(_FunctionRegistry())
Loading

0 comments on commit a8f9ee3

Please sign in to comment.