Automatic date update in version.in
[binutils-gdb.git] / gdbsupport / parallel-for.h
blob713ec660306e8fb2dbd8d95b2db1c88d0cb9a78d
1 /* Parallel for loops
3 Copyright (C) 2019-2022 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #ifndef GDBSUPPORT_PARALLEL_FOR_H
21 #define GDBSUPPORT_PARALLEL_FOR_H
23 #include <algorithm>
24 #include <type_traits>
25 #include "gdbsupport/thread-pool.h"
27 namespace gdb
30 namespace detail
33 /* This is a helper class that is used to accumulate results for
34 parallel_for. There is a specialization for 'void', below. */
35 template<typename T>
36 struct par_for_accumulator
38 public:
40 explicit par_for_accumulator (size_t n_threads)
41 : m_futures (n_threads)
45 /* The result type that is accumulated. */
46 typedef std::vector<T> result_type;
48 /* Post the Ith task to a background thread, and store a future for
49 later. */
50 void post (size_t i, std::function<T ()> task)
52 m_futures[i]
53 = gdb::thread_pool::g_thread_pool->post_task (std::move (task));
56 /* Invoke TASK in the current thread, then compute all the results
57 from all background tasks and put them into a result vector,
58 which is returned. */
59 result_type finish (gdb::function_view<T ()> task)
61 result_type result (m_futures.size () + 1);
63 result.back () = task ();
65 for (size_t i = 0; i < m_futures.size (); ++i)
66 result[i] = m_futures[i].get ();
68 return result;
71 private:
73 /* A vector of futures coming from the tasks run in the
74 background. */
75 std::vector<std::future<T>> m_futures;
78 /* See the generic template. */
79 template<>
80 struct par_for_accumulator<void>
82 public:
84 explicit par_for_accumulator (size_t n_threads)
85 : m_futures (n_threads)
89 /* This specialization does not compute results. */
90 typedef void result_type;
92 void post (size_t i, std::function<void ()> task)
94 m_futures[i]
95 = gdb::thread_pool::g_thread_pool->post_task (std::move (task));
98 result_type finish (gdb::function_view<void ()> task)
100 task ();
102 for (auto &future : m_futures)
104 /* Use 'get' and not 'wait', to propagate any exception. */
105 future.get ();
109 private:
111 std::vector<std::future<void>> m_futures;
116 /* A very simple "parallel for". This splits the range of iterators
117 into subranges, and then passes each subrange to the callback. The
118 work may or may not be done in separate threads.
120 This approach was chosen over having the callback work on single
121 items because it makes it simple for the caller to do
122 once-per-subrange initialization and destruction.
124 The parameter N says how batching ought to be done -- there will be
125 at least N elements processed per thread. Setting N to 0 is not
126 allowed.
128 If the function returns a non-void type, then a vector of the
129 results is returned. The size of the resulting vector depends on
130 the number of threads that were used. */
132 template<class RandomIt, class RangeFunction>
133 typename gdb::detail::par_for_accumulator<
134 typename std::result_of<RangeFunction (RandomIt, RandomIt)>::type
135 >::result_type
136 parallel_for_each (unsigned n, RandomIt first, RandomIt last,
137 RangeFunction callback)
139 using result_type
140 = typename std::result_of<RangeFunction (RandomIt, RandomIt)>::type;
142 size_t n_threads = thread_pool::g_thread_pool->thread_count ();
143 size_t n_elements = last - first;
144 size_t elts_per_thread = 0;
145 if (n_threads > 1)
147 /* Require that there should be at least N elements in a
148 thread. */
149 gdb_assert (n > 0);
150 if (n_elements / n_threads < n)
151 n_threads = std::max (n_elements / n, (size_t) 1);
152 elts_per_thread = n_elements / n_threads;
155 size_t count = n_threads == 0 ? 0 : n_threads - 1;
156 gdb::detail::par_for_accumulator<result_type> results (count);
158 for (int i = 0; i < count; ++i)
160 RandomIt end = first + elts_per_thread;
161 results.post (i, [=] ()
163 return callback (first, end);
165 first = end;
168 /* Process all the remaining elements in the main thread. */
169 return results.finish ([=] ()
171 return callback (first, last);
177 #endif /* GDBSUPPORT_PARALLEL_FOR_H */