-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathcudatools.h
More file actions
234 lines (209 loc) · 7.31 KB
/
cudatools.h
File metadata and controls
234 lines (209 loc) · 7.31 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
/*
Copyright (c) 2019 Sven Willner <sven.willner@gmail.com>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation files
(the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef CUDATOOLS_H
#define CUDATOOLS_H
#if defined(USE_CUDA) && defined(__CUDACC__)
#include <cooperative_groups.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <device_functions.h>
#endif
#include <algorithm>
#include <cstring>
#include <fstream>
#include "nvector.h"
namespace cudatools {
class exception : public std::runtime_error {
public:
explicit exception(const char* msg) : std::runtime_error(msg) {}
explicit exception(const std::string& msg) : std::runtime_error(msg) {}
};
template<typename T, bool only_device = false>
class vector;
template<typename T>
class device_pointer {
friend class vector<T, true>;
friend class vector<T, false>;
protected:
T* p;
explicit device_pointer(T* p_) : p(p_) {}
public:
operator T*() { return p; }
};
template<typename T, bool only_device>
class vector {
protected:
T* data = nullptr;
std::size_t size_m = 0;
inline void allocate(std::size_t size_p) {
#if defined(USE_CUDA) && defined(__CUDACC__)
cudaError_t res;
if (only_device) {
res = cudaMalloc(&data, size_p * sizeof(T));
} else {
res = cudaMallocManaged(&data, size_p * sizeof(T));
}
if (res != cudaSuccess) {
throw cudatools::exception(cudaGetErrorString(res));
}
#else
data = static_cast<T*>(std::malloc(size_p * sizeof(T)));
if (data == nullptr) {
throw std::bad_alloc();
}
#endif
size_m = size_p;
}
public:
using iterator = T*;
vector() = default;
explicit vector(std::size_t size_p) { allocate(size_p); }
vector(const vector&) = delete;
vector(vector&&) noexcept = default;
~vector() { reset(); }
const T* begin() const { return data; }
T* begin() { return data; }
const T* end() const { return data + size_m; }
T* end() { return data + size_m; }
inline void resize(std::size_t size_p) {
reset();
allocate(size_p);
}
inline T* pointer() { return device_pointer<T>(data); }
inline void resize(std::size_t size_p, const T& value) {
resize(size_p);
if (only_device) {
(void)value;
} else {
std::fill(data, data + size_p, value);
}
}
inline void reset() {
if (size_m > 0) {
#if defined(USE_CUDA) && defined(__CUDACC__)
cudaFree(data);
#else
std::free(data);
#endif
size_m = 0;
}
}
inline std::size_t size() const { return size_m; }
inline void get(T* dest) {
#if defined(USE_CUDA) && defined(__CUDACC__)
if (only_device) {
const auto res = cudaMemcpy(dest, data, size_m * sizeof(T), cudaMemcpyDeviceToHost);
if (res != cudaSuccess) {
throw cudatools::exception(cudaGetErrorString(res));
}
} else {
#else
{
#endif
std::memcpy(dest, data, size_m * sizeof(T));
}
}
inline void set(const T* src) {
#if defined(USE_CUDA) && defined(__CUDACC__)
if (only_device) {
const auto res = cudaMemcpy(data, src, size_m * sizeof(T), cudaMemcpyHostToDevice);
if (res != cudaSuccess) {
throw cudatools::exception(cudaGetErrorString(res));
}
} else {
#else
{
#endif
std::memcpy(data, src, size_m * sizeof(T));
}
}
inline void read(std::ifstream& file) {
#if defined(USE_CUDA) && defined(__CUDACC__)
if (only_device) {
auto tmp = static_cast<T*>(std::malloc(size_m * sizeof(T)));
if (tmp == nullptr) {
throw std::bad_alloc();
}
file.read(reinterpret_cast<char*>(tmp), size_m * sizeof(T));
set(tmp);
std::free(tmp);
} else {
#else
{
#endif
file.read(reinterpret_cast<char*>(data), size_m * sizeof(T));
}
}
inline T& operator[](std::size_t i) { return data[i]; }
inline const T& operator[](std::size_t i) const { return data[i]; }
};
} // namespace cudatools
#if defined(USE_CUDA) && defined(__CUDACC__)
#define CUDA_DEVICE __device__
#define CUDA_GLOBAL __global__
#else
#define CUDA_DEVICE
#define CUDA_GLOBAL
#endif
namespace nvector {
namespace detail_gpu {
#if defined(USE_CUDA) && defined(__CUDACC__)
template<typename Function, typename Arg, typename... Args>
__global__ void device_func(Function func, std::size_t n, Arg* it, Args*... its) {
const auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
func(i, it[i], its[i]...);
}
}
#endif
template<typename Function, typename Arg, typename... Args>
inline void loop_foreach_aligned_view_gpu(Function&& func, Arg&& view, Args&&... views) {
#if defined(USE_CUDA) && defined(__CUDACC__)
if (!nvector::detail::all_values_equal(view.slices(), views.slices()...)) {
throw std::runtime_error("views have different slices");
}
constexpr auto block_size = 256;
device_func<<<(view.total_size() + block_size - 1) / block_size, block_size>>>(func, view.total_size(), &view[0], &views[0]...);
#else
nvector::detail::loop_foreach_aligned_view_parallel(std::forward<Function>(func), std::forward<Arg>(view), std::forward<Args>(views)...);
#endif
}
template<std::size_t i, std::size_t n, typename... Args>
struct foreach_helper {
template<typename Function, std::size_t... Ns>
static constexpr void foreach_aligned_gpu(Function&& func, const std::tuple<Args...>& views) {
foreach_helper<i + 1, n, Args...>::template foreach_aligned_gpu<Function, Ns..., i>(std::forward<Function>(func), views);
}
};
template<std::size_t n, typename... Args>
struct foreach_helper<n, n, Args...> {
template<typename Function, std::size_t... Ns>
static constexpr void foreach_aligned_gpu(Function&& func, const std::tuple<Args...>& views) {
loop_foreach_aligned_view_gpu(std::forward<Function>(func), std::forward<Args>(std::get<Ns>(views))...);
}
};
} // namespace detail_gpu
template<typename... Args, typename Function>
constexpr void foreach_aligned_gpu(const std::tuple<Args...>& views, Function&& func) {
detail_gpu::foreach_helper<0, std::tuple_size<std::tuple<Args...>>::value, Args...>::foreach_aligned_gpu(std::forward<Function>(func), views);
}
} // namespace nvector
#endif