forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ParallelNativeTBB.h
107 lines (95 loc) · 2.58 KB
/
ParallelNativeTBB.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
#pragma once
#include <ATen/ATen.h>
#include <cstddef>
#include <exception>
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#endif
#include <tbb/tbb.h>
#define INTRA_OP_PARALLEL
namespace at {
template <class F>
inline void parallel_for(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const F& f) {
TORCH_CHECK(grain_size >= 0);
at::internal::lazy_init_num_threads();
if (begin >= end) {
return;
}
if ((end - begin) < grain_size || get_num_threads() == 1) {
f(begin, end);
return;
}
// Choose number of tasks based on grain size and number of threads.
int64_t chunk_size = divup((end - begin), get_num_threads());
// Make sure each task is at least grain_size size.
chunk_size = std::max(grain_size, chunk_size);
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
tbb::parallel_for(tbb::blocked_range<int64_t>(begin, end, chunk_size),
[&eptr, &err_flag, f](const tbb::blocked_range<int64_t>& r) {
try {
f(r.begin(), r.end());
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
}
});
if (eptr) {
std::rethrow_exception(eptr);
}
}
template <class scalar_t, class F, class SF>
inline scalar_t parallel_reduce(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const scalar_t ident,
const F& f,
const SF& sf) {
TORCH_CHECK(grain_size >= 0);
at::internal::lazy_init_num_threads();
if (begin >= end) {
return ident;
}
if ((end - begin) < grain_size || get_num_threads() == 1) {
return f(begin, end, ident);
}
// Choose number of tasks based on grain size and number of threads.
int64_t chunk_size = divup((end - begin), get_num_threads());
// Make sure each task is at least grain_size size.
chunk_size = std::max(grain_size, chunk_size);
scalar_t result;
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
result = tbb::parallel_reduce(
tbb::blocked_range<int64_t>(begin, end, chunk_size), ident,
[&eptr, &err_flag, f]
(const tbb::blocked_range<int64_t>& r, scalar_t ident) {
try {
return f(r.begin(), r.end(), ident);
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
return ident;
}
},
sf
);
if (eptr) {
std::rethrow_exception(eptr);
}
return result;
}
template<typename F0, typename F1>
void intraop_invoke(const F0& f0, const F1& f1) {
tbb::parallel_invoke(f0, f1);
}
} // namespace at