Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
fix crash when profiler not enabled (#10306)
Browse files Browse the repository at this point in the history
* fix crash when profiler not enabled

* fix

* Update graph_executor.cc

* Update graph_executor.cc

* use nosetests to try and prevent thehang

* shutdown after GPU pass

* remove temp

* remove temp
  • Loading branch information
cjolivier01 authored Mar 31, 2018
1 parent c69a071 commit 4ad0bba
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 24 deletions.
3 changes: 2 additions & 1 deletion src/engine/threaded_engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -398,7 +398,8 @@ class ThreadedEngine : public Engine {
}

int bulk_size() const override {
return profiler::Profiler::Get()->AggregateRunning() ? 0 : BulkStatusStore::Get()->bulk_size;
const profiler::Profiler *prof = profiler::Profiler::Get();
return (prof && prof->AggregateRunning()) ? 0 : BulkStatusStore::Get()->bulk_size;
}

int set_bulk_size(int bulk_size) override {
Expand Down
4 changes: 3 additions & 1 deletion src/executor/graph_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1348,8 +1348,9 @@ void GraphExecutor::InitOpSegs() {
// Generate segments based on the graph structure
bool prefer_bulk_exec_inference = dmlc::GetEnv("MXNET_EXEC_BULK_EXEC_INFERENCE", true);
// Whether to perform bulk exec for training
const profiler::Profiler *prof = profiler::Profiler::Get();
bool prefer_bulk_exec = dmlc::GetEnv("MXNET_EXEC_BULK_EXEC_TRAIN", 1)
&& !profiler::Profiler::Get()->AggregateEnabled();
&& (!prof || !prof->AggregateEnabled());

bool is_training = num_forward_nodes_ != total_num_nodes;

Expand All @@ -1362,6 +1363,7 @@ void GraphExecutor::InitOpSegs() {
}
}


void GraphExecutor::BulkTrainingOpSegs(size_t total_num_nodes) {
// The maximum number of node in a segment executed in bulk
size_t num_nodes_threshold = dmlc::GetEnv("MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN", 15);
Expand Down
43 changes: 22 additions & 21 deletions tests/python/gpu/test_operator_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -904,81 +904,81 @@ def test_1d_pooling(pool_type):
kernel = (4,)
pad = (2,)
stride = (2,)

ctx_list = []
sym_list = []

pooling_convention = 'valid'

ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))

ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))

ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))

ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))

ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))

ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))

check_consistency(sym_list, ctx_list)

def test_2d_pooling(pool_type):
data = (2, 3, 20, 20)
kernel = (4, 4)
pad = (2, 2)
stride = (2, 2)

ctx_list = []
sym_list = []

pooling_convention = 'valid'

ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))

ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))

ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))

ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))

ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))

ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))

ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))

ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))

check_consistency(sym_list, ctx_list)

test_1d_pooling('max')
Expand Down Expand Up @@ -1784,3 +1784,4 @@ def test_kernel_error_checking():
if __name__ == '__main__':
import nose
nose.runmodule()

3 changes: 2 additions & 1 deletion tests/python/gpu/test_tvm_bridge.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,4 +62,5 @@ def check(target, dtype):


if __name__ == "__main__":
test_tvm_bridge()
import nose
nose.runmodule()

0 comments on commit 4ad0bba

Please sign in to comment.