forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
profiler_cuda.cpp
86 lines (78 loc) · 2.67 KB
/
profiler_cuda.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
#include <torch/csrc/autograd/profiler.h>
#include <c10/cuda/CUDAGuard.h>
#include <nvToolsExt.h>
#include <sstream>
namespace torch { namespace autograd { namespace profiler {
namespace {
static inline void cudaCheck(cudaError_t result, const char * file, int line) {
if(result != cudaSuccess) {
std::stringstream ss;
ss << file << ":" << line << ": ";
if (result == cudaErrorInitializationError) {
// It is common for users to use DataLoader with multiple workers
// and the autograd profiler. Throw a nice error message here.
ss << "CUDA initialization error. "
<< "This can occur if one runs the profiler in CUDA mode on code "
<< "that creates a DataLoader with num_workers > 0. This operation "
<< "is currently unsupported; potential workarounds are: "
<< "(1) don't use the profiler in CUDA mode or (2) use num_workers=0 "
<< "in the DataLoader or (3) Don't profile the data loading portion "
<< "of your code. https://github.com/pytorch/pytorch/issues/6313 "
<< "tracks profiler support for multi-worker DataLoader.";
} else {
ss << cudaGetErrorString(result);
}
throw std::runtime_error(ss.str());
}
}
#define TORCH_CUDA_CHECK(result) cudaCheck(result,__FILE__,__LINE__);
struct CUDAMethods : public CUDAStubs {
void record(int* device, CUDAEventStub* event, int64_t* cpu_ns) override {
TORCH_CUDA_CHECK(cudaGetDevice(device));
TORCH_CUDA_CHECK(cudaEventCreate(event));
auto stream = at::cuda::getCurrentCUDAStream();
*cpu_ns = getTime();
TORCH_CUDA_CHECK(cudaEventRecord(*event, stream));
}
float elapsed(CUDAEventStub event, CUDAEventStub event2) override {
TORCH_CUDA_CHECK(cudaEventSynchronize(event));
TORCH_CUDA_CHECK(cudaEventSynchronize(event2));
float ms;
TORCH_CUDA_CHECK(cudaEventElapsedTime(&ms, event, event2));
return ms*1000.0;
}
void nvtxMarkA(const char* name) override {
::nvtxMark(name);
}
void nvtxRangePushA(const char* name) override {
::nvtxRangePushA(name);
}
void nvtxRangePop() override {
::nvtxRangePop();
}
void onEachDevice(std::function<void(int)> op) override {
at::cuda::OptionalCUDAGuard device_guard;
int count = at::cuda::device_count();
for(int i = 0; i < count; i++) {
device_guard.set_index(i);
op(i);
}
}
void synchronize() override {
cudaDeviceSynchronize();
}
bool enabled() override {
return true;
}
};
struct RegisterCUDAMethods {
RegisterCUDAMethods() {
static CUDAMethods methods;
registerCUDAMethods(&methods);
}
};
RegisterCUDAMethods reg;
} // namespaces
} // namespace profiler
} // namespace autograd
} // namespace torch