-
Notifications
You must be signed in to change notification settings - Fork 0
/
adder_cuda.cpp
102 lines (94 loc) · 2.25 KB
/
adder_cuda.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
#include <torch/torch.h>
#include <vector>
int adder_cuda_forward(
const at::Tensor &input,
const at::Tensor &weight,
// const at::Tensor &bias,
at::Tensor &output,
int KW, int KH,
int SW, int SH,
int PW, int PH);
int adder_cuda_backward_grad_in(
at::Tensor &grad_out,
at::Tensor &input,
at::Tensor &weight,
at::Tensor &grad_in,
int KW, int KH,
int SW, int SH,
int PW, int PH);
int adder_cuda_backward_grad_weight(
at::Tensor &grad_out,
at::Tensor &input,
at::Tensor &weight,
at::Tensor &grad_weight,
int KW, int KH,
int SW, int SH,
int PW, int PH);
#define CHECK_CUDA(x) AT_ASSERT((x).type().is_cuda(), #x "must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) AT_ASSERT((x).type().is_contiguous(), #x "must be contiguous")
#define CHECK_INPUT(x) \
CHECK_CUDA((x)); \
CHECK_CONTIGUOUS((x))
int adder_forward(
const at::Tensor &input,
const at::Tensor &weight,
// const at::Tensor &bias,
at::Tensor &output,
int KW, int KH,
int SW, int SH,
int PW, int PH)
{
// TODO: add checks checks
return adder_cuda_forward(
input,
weight,
// bias,
output,
KW, KH,
SW, SH,
PW, PH);
}
int adder_backward_input(
at::Tensor &grad_out,
at::Tensor &input,
at::Tensor &weight,
at::Tensor &grad_in,
int KW, int KH,
int SW, int SH,
int PW, int PH)
{
// TODO: add checks checks
return adder_cuda_backward_grad_in(
grad_out,
input,
weight,
grad_in,
KW, KH,
SW, SH,
PW, PH);
}
int adder_backward_weight(
at::Tensor &grad_out,
at::Tensor &input,
at::Tensor &weight,
at::Tensor &grad_weight,
int KW, int KH,
int SW, int SH,
int PW, int PH)
{
// TODO: add checks checks
return adder_cuda_backward_grad_weight(
grad_out,
input,
weight,
grad_weight,
KW, KH,
SW, SH,
PW, PH);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
{
m.def("forward", &adder_forward, "adder forward (CUDA)");
m.def("backward_input", &adder_backward_input, "adder backward input (CUDA)");
m.def("backward_weight", &adder_backward_weight, "adder backward weight (CUDA)");
}