llvm.org GIT mirror llvm / abb30d1
Add a C++11 ThreadPool implementation in LLVM This is a very simple implementation of a thread pool using C++11 thread. It accepts any std::function<void()> for asynchronous execution. Individual task can be synchronize using the returned future, or the client can block on the full queue completion. In case LLVM is configured with Threading disabled, it falls back to sequential execution using std::async with launch:deferred. This is intended to support parallelism for ThinLTO processing in linker plugin, but is generic enough for any other uses. This is a recommit of r255444 ; trying to workaround a bug in the MSVC 2013 standard library. I think I was hit by: http://connect.microsoft.com/VisualStudio/feedbackdetail/view/791185/std-packaged-task-t-where-t-is-void-or-a-reference-class-are-not-movable Recommit of r255589, trying to please g++ as well. Differential Revision: http://reviews.llvm.org/D15464 From: mehdi_amini <mehdi_amini@91177308-0d34-0410-b5e6-96231b3b80d8> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@255593 91177308-0d34-0410-b5e6-96231b3b80d8 Mehdi Amini 3 years ago
6 changed file(s) with 356 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
0 //===-- llvm/Support/ThreadPool.h - A ThreadPool implementation -*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines a crude C++11 based thread pool.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #ifndef LLVM_SUPPORT_THREAD_POOL_H
14 #define LLVM_SUPPORT_THREAD_POOL_H
15
16 #include "llvm/Support/thread.h"
17
18 #include
19 #include
20 #include
21 #include
22 #include
23 #include
24 #include
25
26 namespace llvm {
27
28 /// A ThreadPool for asynchronous parallel execution on a defined number of
29 /// threads.
30 ///
31 /// The pool keeps a vector of threads alive, waiting on a condition variable
32 /// for some work to become available.
33 class ThreadPool {
34 public:
35 #ifndef _MSC_VER
36 using VoidTy = void;
37 using TaskTy = std::function;
38 using PackagedTaskTy = std::packaged_task;
39 #else
40 // MSVC 2013 has a bug and can't use std::packaged_task;
41 // We force it to use bool(bool) instead.
42 using VoidTy = bool;
43 using TaskTy = std::function;
44 using PackagedTaskTy = std::packaged_task;
45 #endif
46
47 /// Construct a pool with the number of core available on the system (or
48 /// whatever the value returned by std::thread::hardware_concurrency() is).
49 ThreadPool();
50
51 /// Construct a pool of \p ThreadCount threads
52 ThreadPool(unsigned ThreadCount);
53
54 /// Blocking destructor: the pool will wait for all the threads to complete.
55 ~ThreadPool();
56
57 /// Asynchronous submission of a task to the pool. The returned future can be
58 /// used to wait for the task to finish and is *non-blocking* on destruction.
59 template
60 inline std::shared_future async(Function &&F, Args &&... ArgList) {
61 auto Task =
62 std::bind(std::forward(F), std::forward(ArgList...));
63 #ifndef _MSC_VER
64 return asyncImpl(std::move(Task));
65 #else
66 return asyncImpl([Task] (VoidTy) -> VoidTy { Task(); return VoidTy(); });
67 #endif
68 }
69
70 /// Asynchronous submission of a task to the pool. The returned future can be
71 /// used to wait for the task to finish and is *non-blocking* on destruction.
72 template
73 inline std::shared_future async(Function &&F) {
74 #ifndef _MSC_VER
75 return asyncImpl(std::forward(F));
76 #else
77 return asyncImpl([F] (VoidTy) -> VoidTy { F(); return VoidTy(); });
78 #endif
79 }
80
81 /// Blocking wait for all the threads to complete and the queue to be empty.
82 /// It is an error to try to add new tasks while blocking on this call.
83 void wait();
84
85 private:
86 /// Asynchronous submission of a task to the pool. The returned future can be
87 /// used to wait for the task to finish and is *non-blocking* on destruction.
88 std::shared_future asyncImpl(TaskTy F);
89
90 /// Threads in flight
91 std::vector Threads;
92
93 /// Tasks waiting for execution in the pool.
94 std::queue Tasks;
95
96 /// Locking and signaling for accessing the Tasks queue.
97 std::mutex QueueLock;
98 std::condition_variable QueueCondition;
99
100 /// Locking and signaling for job completion
101 std::mutex CompletionLock;
102 std::condition_variable CompletionCondition;
103
104 /// Keep track of the number of thread actually busy
105 std::atomic ActiveThreads;
106
107 #if LLVM_ENABLE_THREADS // avoids warning for unused variable
108 /// Signal for the destruction of the pool, asking thread to exit.
109 bool EnableFlag;
110 #endif
111 };
112 }
113
114 #endif // LLVM_SUPPORT_THREAD_POOL_H
4242
4343 #else // !LLVM_ENABLE_THREADS
4444
45 #include
46
4547 namespace llvm {
4648
4749 struct thread {
8888 StringRef.cpp
8989 SystemUtils.cpp
9090 TargetParser.cpp
91 ThreadPool.cpp
9192 Timer.cpp
9293 ToolOutputFile.cpp
9394 Triple.cpp
0 //==-- llvm/Support/ThreadPool.cpp - A ThreadPool implementation -*- C++ -*-==//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a crude C++11 based thread pool.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Support/ThreadPool.h"
14
15 #include "llvm/Config/llvm-config.h"
16 #include "llvm/Support/raw_ostream.h"
17
18 using namespace llvm;
19
20 #if LLVM_ENABLE_THREADS
21
22 // Default to std::thread::hardware_concurrency
23 ThreadPool::ThreadPool() : ThreadPool(std::thread::hardware_concurrency()) {}
24
25 ThreadPool::ThreadPool(unsigned ThreadCount)
26 : ActiveThreads(0), EnableFlag(true) {
27 // Create ThreadCount threads that will loop forever, wait on QueueCondition
28 // for tasks to be queued or the Pool to be destroyed.
29 Threads.reserve(ThreadCount);
30 for (unsigned ThreadID = 0; ThreadID < ThreadCount; ++ThreadID) {
31 Threads.emplace_back([&] {
32 while (true) {
33 PackagedTaskTy Task;
34 {
35 std::unique_lock LockGuard(QueueLock);
36 // Wait for tasks to be pushed in the queue
37 QueueCondition.wait(LockGuard,
38 [&] { return !EnableFlag || !Tasks.empty(); });
39 // Exit condition
40 if (!EnableFlag && Tasks.empty())
41 return;
42 // Yeah, we have a task, grab it and release the lock on the queue
43
44 // We first need to signal that we are active before popping the queue
45 // in order for wait() to properly detect that even if the queue is
46 // empty, there is still a task in flight.
47 {
48 ++ActiveThreads;
49 std::unique_lock LockGuard(CompletionLock);
50 }
51 Task = std::move(Tasks.front());
52 Tasks.pop();
53 }
54 // Run the task we just grabbed
55 #ifndef _MSC_VER
56 Task();
57 #else
58 Task(/* unused */ false);
59 #endif
60
61 {
62 // Adjust `ActiveThreads`, in case someone waits on ThreadPool::wait()
63 std::unique_lock LockGuard(CompletionLock);
64 --ActiveThreads;
65 }
66
67 // Notify task completion, in case someone waits on ThreadPool::wait()
68 CompletionCondition.notify_all();
69 }
70 });
71 }
72 }
73
74 void ThreadPool::wait() {
75 // Wait for all threads to complete and the queue to be empty
76 std::unique_lock LockGuard(CompletionLock);
77 CompletionCondition.wait(LockGuard,
78 [&] { return Tasks.empty() && !ActiveThreads; });
79 }
80
81 std::shared_future ThreadPool::asyncImpl(TaskTy Task) {
82 /// Wrap the Task in a packaged_task to return a future object.
83 PackagedTaskTy PackagedTask(std::move(Task));
84 auto Future = PackagedTask.get_future();
85 {
86 // Lock the queue and push the new task
87 std::unique_lock LockGuard(QueueLock);
88
89 // Don't allow enqueueing after disabling the pool
90 assert(EnableFlag && "Queuing a thread during ThreadPool destruction");
91
92 Tasks.push(std::move(PackagedTask));
93 }
94 QueueCondition.notify_one();
95 return Future.share();
96 }
97
98 // The destructor joins all threads, waiting for completion.
99 ThreadPool::~ThreadPool() {
100 {
101 std::unique_lock LockGuard(QueueLock);
102 EnableFlag = false;
103 }
104 QueueCondition.notify_all();
105 for (auto &Worker : Threads)
106 Worker.join();
107 }
108
109 #else // LLVM_ENABLE_THREADS Disabled
110
111 ThreadPool::ThreadPool() : ThreadPool(0) {}
112
113 // No threads are launched, issue a warning if ThreadCount is not 0
114 ThreadPool::ThreadPool(unsigned ThreadCount)
115 : ActiveThreads(0) {
116 if (ThreadCount) {
117 errs() << "Warning: request a ThreadPool with " << ThreadCount
118 << " threads, but LLVM_ENABLE_THREADS has been turned off\n";
119 }
120 }
121
122 void ThreadPool::wait() {
123 // Sequential implementation running the tasks
124 while (!Tasks.empty()) {
125 auto Task = std::move(Tasks.front());
126 Tasks.pop();
127 Task();
128 }
129 }
130
131 std::shared_future ThreadPool::asyncImpl(TaskTy Task) {
132 // Get a Future with launch::deferred execution using std::async
133 auto Future = std::async(std::launch::deferred, std::move(Task)).share();
134 // Wrap the future so that both ThreadPool::wait() can operate and the
135 // returned future can be sync'ed on.
136 PackagedTaskTy PackagedTask([Future]() { Future.get(); });
137 Tasks.push(std::move(PackagedTask));
138 return Future;
139 }
140
141 ThreadPool::~ThreadPool() {
142 wait();
143 }
144
145 #endif
4040 SwapByteOrderTest.cpp
4141 TargetRegistry.cpp
4242 ThreadLocalTest.cpp
43 ThreadPool.cpp
4344 TimeValueTest.cpp
4445 TrailingObjectsTest.cpp
4546 UnicodeTest.cpp
0 //========- unittests/Support/ThreadPools.cpp - ThreadPools.h tests --========//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "llvm/Support/ThreadPool.h"
10
11 #include "llvm/ADT/STLExtras.h"
12
13 #include "gtest/gtest.h"
14
15 using namespace llvm;
16 using namespace std::chrono;
17
18 /// Try best to make this thread not progress faster than the main thread
19 static void yield() {
20 #ifdef LLVM_ENABLE_THREADS
21 std::this_thread::yield();
22 #endif
23 std::this_thread::sleep_for(milliseconds(200));
24 #ifdef LLVM_ENABLE_THREADS
25 std::this_thread::yield();
26 #endif
27 }
28
29 TEST(ThreadPoolTest, AsyncBarrier) {
30 // test that async & barrier work together properly.
31
32 std::atomic_int checked_in{0};
33
34 ThreadPool Pool;
35 for (size_t i = 0; i < 5; ++i) {
36 Pool.async([&checked_in, i] {
37 yield();
38 ++checked_in;
39 });
40 }
41 ASSERT_EQ(0, checked_in);
42 Pool.wait();
43 ASSERT_EQ(5, checked_in);
44 }
45
46 TEST(ThreadPoolTest, Async) {
47 ThreadPool Pool;
48 std::atomic_int i{0};
49 // sleep here just to ensure that the not-equal is correct.
50 Pool.async([&i] {
51 yield();
52 ++i;
53 });
54 Pool.async([&i] { ++i; });
55 ASSERT_NE(2, i.load());
56 Pool.wait();
57 ASSERT_EQ(2, i.load());
58 }
59
60 TEST(ThreadPoolTest, GetFuture) {
61 ThreadPool Pool;
62 std::atomic_int i{0};
63 // sleep here just to ensure that the not-equal is correct.
64 Pool.async([&i] {
65 yield();
66 ++i;
67 });
68 // Force the future using get()
69 Pool.async([&i] { ++i; }).get();
70 ASSERT_NE(2, i.load());
71 Pool.wait();
72 ASSERT_EQ(2, i.load());
73 }
74
75 TEST(ThreadPoolTest, PoolDestruction) {
76 // Test that we are waiting on destruction
77 std::atomic_int checked_in{0};
78
79 {
80 ThreadPool Pool;
81 for (size_t i = 0; i < 5; ++i) {
82 Pool.async([&checked_in, i] {
83 yield();
84 ++checked_in;
85 });
86 }
87 ASSERT_EQ(0, checked_in);
88 }
89 ASSERT_EQ(5, checked_in);
90 }