forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
MiscUtils.h
65 lines (53 loc) · 1.65 KB
/
MiscUtils.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
#include "ATen/ATen.h"
#include "THC.h" // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
// RAII for a MAGMA Queue
struct MAGMAQueue {
// Default constructor without a device will cause
// destroying a queue which has not been initialized.
MAGMAQueue() = delete;
// Constructor
explicit MAGMAQueue(int64_t device_id) {
auto& context = at::globalContext();
magma_queue_create_from_cuda(
device_id,
at::cuda::getCurrentCUDAStream(),
at::cuda::getCurrentCUDABlasHandle(),
at::cuda::getCurrentCUDASparseHandle(),
&magma_queue_);
}
// Getter
magma_queue_t get_queue() const { return magma_queue_; }
// Destructor
~MAGMAQueue() {
magma_queue_destroy(magma_queue_);
}
private:
magma_queue_t magma_queue_;
};
static inline magma_int_t magma_int_cast(int64_t value, const char* varname) {
auto result = static_cast<magma_int_t>(value);
if (static_cast<int64_t>(result) != value) {
AT_ERROR("magma: The value of ", varname, "(", (long long)value,
") is too large to fit into a magma_int_t (", sizeof(magma_int_t), " bytes)");
}
return result;
}
#endif
// Creates an array of size elements of type T, backed by pinned memory
// wrapped in a Storage
template<class T>
static inline Storage pin_memory(int64_t size, Tensor dummy) {
int64_t adjusted_size = size * sizeof(T);
auto* allocator = cuda::getPinnedMemoryAllocator();
auto& backend = dummy.type().toBackend(Backend::CPU).toScalarType(kByte);
return backend.storageWithAllocator(adjusted_size, allocator);
}
} // namespace native
} // namespace at