forked from bshoshany/thread-pool
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathBS_thread_pool.hpp
More file actions
516 lines (460 loc) · 20 KB
/
BS_thread_pool.hpp
File metadata and controls
516 lines (460 loc) · 20 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
#pragma once
/**
* @file BS_thread_pool.hpp
* @author Barak Shoshany (baraksh@gmail.com) (http://baraksh.com)
* @version 3.0.0
* @date 2022-05-30
* @copyright Copyright (c) 2022 Barak Shoshany. Licensed under the MIT license. If you use this library in software of any kind, please provide a link to the GitHub repository https://github.com/bshoshany/thread-pool in the source code and documentation. If you use this library in published research, please cite it as follows: Barak Shoshany, "A C++17 Thread Pool for High-Performance Scientific Computing", doi:10.5281/zenodo.4742687, arXiv:2105.00613 (May 2021)
*
* @brief BS::thread_pool: a fast, lightweight, and easy-to-use C++17 thread pool library. This header file contains the entire library, including the main BS::thread_pool class and the helper classes BS::multi_future, BS:synced_stream, and BS::timer.
*/
#define BS_THREAD_POOL_VERSION "v3.0.0 (2022-05-30)"
#include <atomic> // std::atomic
#include <chrono> // std::chrono
#include <condition_variable> // std::condition_variable
#include <exception> // std::current_exception
#include <functional> // std::function
#include <future> // std::future, std::promise
#include <iostream> // std::cout, std::ostream
#include <memory> // std::make_shared, std::make_unique, std::shared_ptr, std::unique_ptr
#include <mutex> // std::mutex, std::scoped_lock, std::unique_lock
#include <queue> // std::queue
#include <thread> // std::thread
#include <type_traits> // std::common_type_t, std::decay_t, std::is_void_v, std::invoke_result_t
#include <utility> // std::move, std::swap
#include <vector> // std::vector
namespace BS
{
using concurrency_t = std::invoke_result_t<decltype(std::thread::hardware_concurrency)>;
// ============================================================================================= //
// Begin class multi_future //
/**
* @brief A helper class to facilitate waiting for and/or getting the results of multiple futures at once.
*/
template <typename T>
class multi_future
{
public:
/**
* @brief Construct a multi_future object with the given number of futures.
*
* @param num_futures_ The desired number of futures to store.
*/
explicit multi_future(const size_t num_futures_ = 0) : f(num_futures_) {}
/**
* @brief Get the results from all the futures stored in this multi_future object.
*
* @return A vector containing the results.
*/
std::vector<T> get()
{
std::vector<T> results(f.size());
for (size_t i = 0; i < f.size(); ++i)
results[i] = f[i].get();
return results;
}
/**
* @brief Wait for all the futures stored in this multi_future object.
*/
void wait() const
{
for (size_t i = 0; i < f.size(); ++i)
f[i].wait();
}
/**
* @brief A vector to store the futures.
*/
std::vector<std::future<T>> f;
};
// End class multi_future //
// ============================================================================================= //
// ============================================================================================= //
// Begin class thread_pool //
/**
* @brief A fast, lightweight, and easy-to-use C++17 thread pool class.
*/
class thread_pool
{
public:
// ============================
// Constructors and destructors
// ============================
/**
* @brief Construct a new thread pool.
*
* @param thread_count_ The number of threads to use. The default value is the total number of hardware threads available, as reported by the implementation. This is usually determined by the number of cores in the CPU. If a core is hyperthreaded, it will count as two threads.
*/
explicit thread_pool(const concurrency_t thread_count_ = std::thread::hardware_concurrency()) : thread_count(thread_count_ ? thread_count_ : std::thread::hardware_concurrency()), threads(std::make_unique<std::thread[]>(thread_count_ ? thread_count_ : std::thread::hardware_concurrency()))
{
create_threads();
}
/**
* @brief Destruct the thread pool. Waits for all tasks to complete, then destroys all threads. Note that if the variable paused is set to true, then any tasks still in the queue will never be executed.
*/
~thread_pool()
{
wait_for_tasks();
destroy_threads();
}
// =======================
// Public member functions
// =======================
/**
* @brief Get the number of tasks currently waiting in the queue to be executed by the threads.
*
* @return The number of queued tasks.
*/
size_t get_tasks_queued() const
{
const std::scoped_lock tasks_lock(tasks_mutex);
return tasks.size();
}
/**
* @brief Get the number of tasks currently being executed by the threads.
*
* @return The number of running tasks.
*/
size_t get_tasks_running() const
{
const std::scoped_lock tasks_lock(tasks_mutex);
return tasks_total - tasks.size();
}
/**
* @brief Get the total number of unfinished tasks: either still in the queue, or running in a thread. Note that get_tasks_total() == get_tasks_queued() + get_tasks_running().
*
* @return The total number of tasks.
*/
size_t get_tasks_total() const
{
return tasks_total;
}
/**
* @brief Get the number of threads in the pool.
*
* @return The number of threads.
*/
concurrency_t get_thread_count() const
{
return thread_count;
}
/**
* @brief Parallelize a loop by automatically splitting it into blocks and submitting each block separately to the queue.
*
* @tparam F The type of the function to loop through.
* @tparam T1 The type of the first index in the loop. Should be a signed or unsigned integer.
* @tparam T2 The type of the index after the last index in the loop. Should be a signed or unsigned integer. If T1 is not the same as T2, a common type will be automatically inferred.
* @tparam T The common type of T1 and T2.
* @tparam R The return value of the loop function F (can be void).
* @param first_index The first index in the loop.
* @param index_after_last The index after the last index in the loop. The loop will iterate from first_index to (index_after_last - 1) inclusive. In other words, it will be equivalent to "for (T i = first_index; i < index_after_last; ++i)". Note that if first_index == index_after_last, no blocks will be submitted.
* @param loop The function to loop through. Will be called once per block. Should take exactly two arguments: the first index in the block and the index after the last index in the block. loop(start, end) should typically involve a loop of the form "for (T i = start; i < end; ++i)".
* @param num_blocks The maximum number of blocks to split the loop into. The default is to use the number of threads in the pool.
* @return A multi_future object that can be used to wait for all the blocks to finish. If the loop function returns a value, the multi_future object can be used to obtain the values returned by each block.
*/
template <typename F, typename T1, typename T2, typename T = std::common_type_t<T1, T2>, typename R = std::invoke_result_t<std::decay_t<F>, T, T>>
multi_future<R> parallelize_loop(const T1& first_index, const T2& index_after_last, const F& loop, size_t num_blocks = 0)
{
T first_index_T = static_cast<T>(first_index);
T index_after_last_T = static_cast<T>(index_after_last);
if (first_index_T == index_after_last_T)
return multi_future<R>();
if (index_after_last_T < first_index_T)
std::swap(index_after_last_T, first_index_T);
if (num_blocks == 0)
num_blocks = thread_count;
const size_t total_size = static_cast<size_t>(index_after_last_T - first_index_T);
size_t block_size = static_cast<size_t>(total_size / num_blocks);
if (block_size == 0)
{
block_size = 1;
num_blocks = total_size > 1 ? total_size : 1;
}
multi_future<R> mf(num_blocks);
for (size_t i = 0; i < num_blocks; ++i)
{
const T start = (static_cast<T>(i * block_size) + first_index_T);
const T end = (i == num_blocks - 1) ? index_after_last_T : (static_cast<T>((i + 1) * block_size) + first_index_T);
mf.f[i] = submit(loop, start, end);
}
return mf;
}
/**
* @brief Push a function with zero or more arguments, but no return value, into the task queue.
*
* @tparam F The type of the function.
* @tparam A The types of the arguments.
* @param task The function to push.
* @param args The arguments to pass to the function.
*/
template <typename F, typename... A>
void push_task(const F& task, const A&... args)
{
{
const std::scoped_lock tasks_lock(tasks_mutex);
if constexpr (sizeof...(args) == 0)
tasks.push(std::function<void()>(task));
else
tasks.push(std::function<void()>([task, args...] { task(args...); }));
}
++tasks_total;
task_available_cv.notify_one();
}
/**
* @brief Reset the number of threads in the pool. Waits for all currently running tasks to be completed, then destroys all threads in the pool and creates a new thread pool with the new number of threads. Any tasks that were waiting in the queue before the pool was reset will then be executed by the new threads. If the pool was paused before resetting it, the new pool will be paused as well.
*
* @param thread_count_ The number of threads to use. The default value is the total number of hardware threads available, as reported by the implementation. This is usually determined by the number of cores in the CPU. If a core is hyperthreaded, it will count as two threads.
*/
void reset(const concurrency_t thread_count_ = std::thread::hardware_concurrency())
{
const bool was_paused = paused;
paused = true;
wait_for_tasks();
destroy_threads();
thread_count = thread_count_ ? thread_count_ : std::thread::hardware_concurrency();
threads = std::make_unique<std::thread[]>(thread_count);
paused = was_paused;
create_threads();
}
/**
* @brief Submit a function with zero or more arguments into the task queue. If the function has a return value, get a future for the eventual returned value. If the function has no return value, get an std::future<void> which can be used to wait until the task finishes.
*
* @tparam F The type of the function.
* @tparam A The types of the zero or more arguments to pass to the function.
* @tparam R The return type of the function (can be void).
* @param task The function to submit.
* @param args The zero or more arguments to pass to the function.
* @return A future to be used later to wait for the function to finish executing and/or obtain its returned value if it has one.
*/
template <typename F, typename... A, typename R = std::invoke_result_t<std::decay_t<F>, std::decay_t<A>...>>
std::future<R> submit(const F& task, const A&... args)
{
std::shared_ptr<std::promise<R>> task_promise = std::make_shared<std::promise<R>>();
push_task(
[task, args..., task_promise]
{
try
{
if constexpr (std::is_void_v<R>)
{
task(args...);
task_promise->set_value();
}
else
{
task_promise->set_value(task(args...));
}
}
catch (...)
{
try
{
task_promise->set_exception(std::current_exception());
}
catch (...)
{
}
}
});
return task_promise->get_future();
}
/**
* @brief Wait for tasks to be completed. Normally, this function waits for all tasks, both those that are currently running in the threads and those that are still waiting in the queue. However, if the pool is paused, this function only waits for the currently running tasks (otherwise it would wait forever). Note: To wait for just one specific task, use submit() instead, and call the wait() member function of the generated future.
*/
void wait_for_tasks()
{
waiting = true;
std::unique_lock<std::mutex> tasks_lock(tasks_mutex);
task_done_cv.wait(tasks_lock, [this] { return (tasks_total == (paused ? tasks.size() : 0)); });
waiting = false;
}
// ===========
// Public data
// ===========
/**
* @brief An atomic variable indicating whether the workers should pause. When set to true, the workers temporarily stop retrieving new tasks out of the queue, although any tasks already executed will keep running until they are finished. Set to false again to resume retrieving tasks.
*/
std::atomic<bool> paused = false;
private:
// ========================
// Private member functions
// ========================
/**
* @brief Create the threads in the pool and assign a worker to each thread.
*/
void create_threads()
{
running = true;
for (concurrency_t i = 0; i < thread_count; ++i)
{
threads[i] = std::thread(&thread_pool::worker, this);
}
}
/**
* @brief Destroy the threads in the pool.
*/
void destroy_threads()
{
running = false;
task_available_cv.notify_all();
for (concurrency_t i = 0; i < thread_count; ++i)
{
threads[i].join();
}
}
/**
* @brief A worker function to be assigned to each thread in the pool. Waits until it is notified by push_task() that a task is available, and then retrieves the task from the queue and executes it. Once the task finishes, the worker notifies wait_for_tasks() in case it is waiting.
*/
void worker()
{
while (running)
{
std::function<void()> task;
std::unique_lock<std::mutex> tasks_lock(tasks_mutex);
task_available_cv.wait(tasks_lock, [&] { return !tasks.empty() || !running; });
if (running && !paused)
{
task = std::move(tasks.front());
tasks.pop();
tasks_lock.unlock();
task();
--tasks_total;
if (waiting)
task_done_cv.notify_one();
}
}
}
// ============
// Private data
// ============
/**
* @brief An atomic variable indicating to the workers to keep running. When set to false, the workers permanently stop working.
*/
std::atomic<bool> running = false;
/**
* @brief A condition variable used to notify worker() that a new task has become available.
*/
std::condition_variable task_available_cv = {};
/**
* @brief A condition variable used to notify wait_for_tasks() that a tasks is done.
*/
std::condition_variable task_done_cv = {};
/**
* @brief A queue of tasks to be executed by the threads.
*/
std::queue<std::function<void()>> tasks = {};
/**
* @brief An atomic variable to keep track of the total number of unfinished tasks - either still in the queue, or running in a thread.
*/
std::atomic<size_t> tasks_total = 0;
/**
* @brief A mutex to synchronize access to the task queue by different threads.
*/
mutable std::mutex tasks_mutex = {};
/**
* @brief The number of threads in the pool.
*/
concurrency_t thread_count = 0;
/**
* @brief A smart pointer to manage the memory allocated for the threads.
*/
std::unique_ptr<std::thread[]> threads = nullptr;
/**
* @brief An atomic variable indicating that wait_for_tasks() is active and expects to be notified whenever a task is done.
*/
std::atomic<bool> waiting = false;
};
// End class thread_pool //
// ============================================================================================= //
// ============================================================================================= //
// Begin class synced_stream //
/**
* @brief A helper class to synchronize printing to an output stream by different threads.
*/
class synced_stream
{
public:
/**
* @brief Construct a new synced stream.
*
* @param out_stream_ The output stream to print to. The default value is std::cout.
*/
explicit synced_stream(std::ostream& out_stream_ = std::cout) : out_stream(out_stream_) {};
/**
* @brief Print any number of items into the output stream. Ensures that no other threads print to this stream simultaneously, as long as they all exclusively use the same synced_stream object to print.
*
* @tparam T The types of the items
* @param items The items to print.
*/
template <typename... T>
void print(const T&... items)
{
const std::scoped_lock lock(stream_mutex);
(out_stream << ... << items);
}
/**
* @brief Print any number of items into the output stream, followed by a newline character. Ensures that no other threads print to this stream simultaneously, as long as they all exclusively use the same synced_stream object to print.
*
* @tparam T The types of the items
* @param items The items to print.
*/
template <typename... T>
void println(const T&... items)
{
print(items..., '\n');
}
private:
/**
* @brief The output stream to print to.
*/
std::ostream& out_stream;
/**
* @brief A mutex to synchronize printing.
*/
mutable std::mutex stream_mutex = {};
};
// End class synced_stream //
// ============================================================================================= //
// ============================================================================================= //
// Begin class timer //
/**
* @brief A helper class to measure execution time for benchmarking purposes.
*/
class timer
{
public:
/**
* @brief Start (or restart) measuring time.
*/
void start()
{
start_time = std::chrono::steady_clock::now();
}
/**
* @brief Stop measuring time and store the elapsed time since start().
*/
void stop()
{
elapsed_time = std::chrono::steady_clock::now() - start_time;
}
/**
* @brief Get the number of milliseconds that have elapsed between start() and stop().
*
* @return The number of milliseconds.
*/
std::chrono::milliseconds::rep ms() const
{
return (std::chrono::duration_cast<std::chrono::milliseconds>(elapsed_time)).count();
}
private:
/**
* @brief The time point when measuring started.
*/
std::chrono::time_point<std::chrono::steady_clock> start_time = std::chrono::steady_clock::now();
/**
* @brief The duration that has elapsed between start() and stop().
*/
std::chrono::duration<double> elapsed_time = std::chrono::duration<double>::zero();
};
// End class timer //
// ============================================================================================= //
} // namespace BS