]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/include/parallel/balanced_quicksort.h
algobase.h: Replace tabs by spaces; correct line breaks.
[thirdparty/gcc.git] / libstdc++-v3 / include / parallel / balanced_quicksort.h
1 // -*- C++ -*-
2
3 // Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the terms
7 // of the GNU General Public License as published by the Free Software
8 // Foundation; either version 3, or (at your option) any later
9 // version.
10
11 // This library is distributed in the hope that it will be useful, but
12 // WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 // General Public License for more details.
15
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24
25 /** @file parallel/balanced_quicksort.h
26 * @brief Implementation of a dynamically load-balanced parallel quicksort.
27 *
28 * It works in-place and needs only logarithmic extra memory.
29 * The algorithm is similar to the one proposed in
30 *
31 * P. Tsigas and Y. Zhang.
32 * A simple, fast parallel implementation of quicksort and
33 * its performance evaluation on SUN enterprise 10000.
34 * In 11th Euromicro Conference on Parallel, Distributed and
35 * Network-Based Processing, page 372, 2003.
36 *
37 * This file is a GNU parallel extension to the Standard C++ Library.
38 */
39
40 // Written by Johannes Singler.
41
42 #ifndef _GLIBCXX_PARALLEL_BALANCED_QUICKSORT_H
43 #define _GLIBCXX_PARALLEL_BALANCED_QUICKSORT_H 1
44
45 #include <parallel/basic_iterator.h>
46 #include <bits/stl_algo.h>
47
48 #include <parallel/settings.h>
49 #include <parallel/partition.h>
50 #include <parallel/random_number.h>
51 #include <parallel/queue.h>
52 #include <functional>
53
54 #if _GLIBCXX_ASSERTIONS
55 #include <parallel/checkers.h>
56 #endif
57
58 namespace __gnu_parallel
59 {
60 /** @brief Information local to one thread in the parallel quicksort run. */
61 template<typename _RAIter>
62 struct _QSBThreadLocal
63 {
64 typedef std::iterator_traits<_RAIter> _TraitsType;
65 typedef typename _TraitsType::difference_type _DifferenceType;
66
67 /** @brief Continuous part of the sequence, described by an
68 iterator pair. */
69 typedef std::pair<_RAIter, _RAIter> _Piece;
70
71 /** @brief Initial piece to work on. */
72 _Piece _M_initial;
73
74 /** @brief Work-stealing queue. */
75 _RestrictedBoundedConcurrentQueue<_Piece> _M_leftover_parts;
76
77 /** @brief Number of threads involved in this algorithm. */
78 _ThreadIndex _M_num_threads;
79
80 /** @brief Pointer to a counter of elements left over to sort. */
81 volatile _DifferenceType* _M_elements_leftover;
82
83 /** @brief The complete sequence to sort. */
84 _Piece _M_global;
85
86 /** @brief Constructor.
87 * @param __queue_size size of the work-stealing queue. */
88 _QSBThreadLocal(int __queue_size) : _M_leftover_parts(__queue_size) { }
89 };
90
91 /** @brief Balanced quicksort divide step.
92 * @param __begin Begin iterator of subsequence.
93 * @param __end End iterator of subsequence.
94 * @param __comp Comparator.
95 * @param __num_threads Number of threads that are allowed to work on
96 * this part.
97 * @pre @__c (__end-__begin)>=1 */
98 template<typename _RAIter, typename _Compare>
99 typename std::iterator_traits<_RAIter>::difference_type
100 __qsb_divide(_RAIter __begin, _RAIter __end,
101 _Compare __comp, _ThreadIndex __num_threads)
102 {
103 _GLIBCXX_PARALLEL_ASSERT(__num_threads > 0);
104
105 typedef std::iterator_traits<_RAIter> _TraitsType;
106 typedef typename _TraitsType::value_type _ValueType;
107 typedef typename _TraitsType::difference_type _DifferenceType;
108
109 _RAIter __pivot_pos =
110 __median_of_three_iterators(__begin, __begin + (__end - __begin) / 2,
111 __end - 1, __comp);
112
113 #if defined(_GLIBCXX_ASSERTIONS)
114 // Must be in between somewhere.
115 _DifferenceType __n = __end - __begin;
116
117 _GLIBCXX_PARALLEL_ASSERT(
118 (!__comp(*__pivot_pos, *__begin) &&
119 !__comp(*(__begin + __n / 2), *__pivot_pos))
120 || (!__comp(*__pivot_pos, *__begin) &&
121 !__comp(*(__end - 1), *__pivot_pos))
122 || (!__comp(*__pivot_pos, *(__begin + __n / 2)) &&
123 !__comp(*__begin, *__pivot_pos))
124 || (!__comp(*__pivot_pos, *(__begin + __n / 2)) &&
125 !__comp(*(__end - 1), *__pivot_pos))
126 || (!__comp(*__pivot_pos, *(__end - 1)) &&
127 !__comp(*__begin, *__pivot_pos))
128 || (!__comp(*__pivot_pos, *(__end - 1)) &&
129 !__comp(*(__begin + __n / 2), *__pivot_pos)));
130 #endif
131
132 // Swap pivot value to end.
133 if (__pivot_pos != (__end - 1))
134 std::swap(*__pivot_pos, *(__end - 1));
135 __pivot_pos = __end - 1;
136
137 __gnu_parallel::binder2nd<_Compare, _ValueType, _ValueType, bool>
138 __pred(__comp, *__pivot_pos);
139
140 // Divide, returning __end - __begin - 1 in the worst case.
141 _DifferenceType __split_pos = __parallel_partition(
142 __begin, __end - 1, __pred, __num_threads);
143
144 // Swap back pivot to middle.
145 std::swap(*(__begin + __split_pos), *__pivot_pos);
146 __pivot_pos = __begin + __split_pos;
147
148 #if _GLIBCXX_ASSERTIONS
149 _RAIter __r;
150 for (__r = __begin; __r != __pivot_pos; ++__r)
151 _GLIBCXX_PARALLEL_ASSERT(__comp(*__r, *__pivot_pos));
152 for (; __r != __end; ++__r)
153 _GLIBCXX_PARALLEL_ASSERT(!__comp(*__r, *__pivot_pos));
154 #endif
155
156 return __split_pos;
157 }
158
159 /** @brief Quicksort conquer step.
160 * @param __tls Array of thread-local storages.
161 * @param __begin Begin iterator of subsequence.
162 * @param __end End iterator of subsequence.
163 * @param __comp Comparator.
164 * @param __iam Number of the thread processing this function.
165 * @param __num_threads
166 * Number of threads that are allowed to work on this part. */
167 template<typename _RAIter, typename _Compare>
168 void
169 __qsb_conquer(_QSBThreadLocal<_RAIter>** __tls,
170 _RAIter __begin, _RAIter __end,
171 _Compare __comp,
172 _ThreadIndex __iam, _ThreadIndex __num_threads,
173 bool __parent_wait)
174 {
175 typedef std::iterator_traits<_RAIter> _TraitsType;
176 typedef typename _TraitsType::value_type _ValueType;
177 typedef typename _TraitsType::difference_type _DifferenceType;
178
179 _DifferenceType __n = __end - __begin;
180
181 if (__num_threads <= 1 || __n <= 1)
182 {
183 __tls[__iam]->_M_initial.first = __begin;
184 __tls[__iam]->_M_initial.second = __end;
185
186 __qsb_local_sort_with_helping(__tls, __comp, __iam, __parent_wait);
187
188 return;
189 }
190
191 // Divide step.
192 _DifferenceType __split_pos =
193 __qsb_divide(__begin, __end, __comp, __num_threads);
194
195 #if _GLIBCXX_ASSERTIONS
196 _GLIBCXX_PARALLEL_ASSERT(0 <= __split_pos &&
197 __split_pos < (__end - __begin));
198 #endif
199
200 _ThreadIndex __num_threads_leftside =
201 std::max<_ThreadIndex>(1, std::min<_ThreadIndex>(
202 __num_threads - 1, __split_pos * __num_threads / __n));
203
204 # pragma omp atomic
205 *__tls[__iam]->_M_elements_leftover -= (_DifferenceType)1;
206
207 // Conquer step.
208 # pragma omp parallel num_threads(2)
209 {
210 bool __wait;
211 if(omp_get_num_threads() < 2)
212 __wait = false;
213 else
214 __wait = __parent_wait;
215
216 # pragma omp sections
217 {
218 # pragma omp section
219 {
220 __qsb_conquer(__tls, __begin, __begin + __split_pos, __comp,
221 __iam,
222 __num_threads_leftside,
223 __wait);
224 __wait = __parent_wait;
225 }
226 // The pivot_pos is left in place, to ensure termination.
227 # pragma omp section
228 {
229 __qsb_conquer(__tls, __begin + __split_pos + 1, __end, __comp,
230 __iam + __num_threads_leftside,
231 __num_threads - __num_threads_leftside,
232 __wait);
233 __wait = __parent_wait;
234 }
235 }
236 }
237 }
238
239 /**
240 * @brief Quicksort step doing load-balanced local sort.
241 * @param __tls Array of thread-local storages.
242 * @param __comp Comparator.
243 * @param __iam Number of the thread processing this function.
244 */
245 template<typename _RAIter, typename _Compare>
246 void
247 __qsb_local_sort_with_helping(_QSBThreadLocal<_RAIter>** __tls,
248 _Compare& __comp, int __iam, bool __wait)
249 {
250 typedef std::iterator_traits<_RAIter> _TraitsType;
251 typedef typename _TraitsType::value_type _ValueType;
252 typedef typename _TraitsType::difference_type _DifferenceType;
253 typedef std::pair<_RAIter, _RAIter> _Piece;
254
255 _QSBThreadLocal<_RAIter>& __tl = *__tls[__iam];
256
257 _DifferenceType __base_case_n =
258 _Settings::get().sort_qsb_base_case_maximal_n;
259 if (__base_case_n < 2)
260 __base_case_n = 2;
261 _ThreadIndex __num_threads = __tl._M_num_threads;
262
263 // Every thread has its own random number generator.
264 _RandomNumber __rng(__iam + 1);
265
266 _Piece __current = __tl._M_initial;
267
268 _DifferenceType __elements_done = 0;
269 #if _GLIBCXX_ASSERTIONS
270 _DifferenceType __total_elements_done = 0;
271 #endif
272
273 for (;;)
274 {
275 // Invariant: __current must be a valid (maybe empty) range.
276 _RAIter __begin = __current.first, __end = __current.second;
277 _DifferenceType __n = __end - __begin;
278
279 if (__n > __base_case_n)
280 {
281 // Divide.
282 _RAIter __pivot_pos = __begin + __rng(__n);
283
284 // Swap __pivot_pos value to end.
285 if (__pivot_pos != (__end - 1))
286 std::swap(*__pivot_pos, *(__end - 1));
287 __pivot_pos = __end - 1;
288
289 __gnu_parallel::binder2nd
290 <_Compare, _ValueType, _ValueType, bool>
291 __pred(__comp, *__pivot_pos);
292
293 // Divide, leave pivot unchanged in last place.
294 _RAIter __split_pos1, __split_pos2;
295 __split_pos1 =
296 __gnu_sequential::partition(__begin, __end - 1, __pred);
297
298 // Left side: < __pivot_pos; __right side: >= __pivot_pos.
299 #if _GLIBCXX_ASSERTIONS
300 _GLIBCXX_PARALLEL_ASSERT(__begin <= __split_pos1
301 && __split_pos1 < __end);
302 #endif
303 // Swap pivot back to middle.
304 if (__split_pos1 != __pivot_pos)
305 std::swap(*__split_pos1, *__pivot_pos);
306 __pivot_pos = __split_pos1;
307
308 // In case all elements are equal, __split_pos1 == 0.
309 if ((__split_pos1 + 1 - __begin) < (__n >> 7)
310 || (__end - __split_pos1) < (__n >> 7))
311 {
312 // Very unequal split, one part smaller than one 128th
313 // elements not strictly larger than the pivot.
314 __gnu_parallel::__unary_negate<__gnu_parallel::__binder1st
315 <_Compare, _ValueType, _ValueType, bool>, _ValueType>
316 __pred(__gnu_parallel::__binder1st
317 <_Compare, _ValueType, _ValueType, bool>(
318 __comp, *__pivot_pos));
319
320 // Find other end of pivot-equal range.
321 __split_pos2 = __gnu_sequential::partition(__split_pos1 + 1,
322 __end, __pred);
323 }
324 else
325 // Only skip the pivot.
326 __split_pos2 = __split_pos1 + 1;
327
328 // Elements equal to pivot are done.
329 __elements_done += (__split_pos2 - __split_pos1);
330 #if _GLIBCXX_ASSERTIONS
331 __total_elements_done += (__split_pos2 - __split_pos1);
332 #endif
333 // Always push larger part onto stack.
334 if (((__split_pos1 + 1) - __begin) < (__end - (__split_pos2)))
335 {
336 // Right side larger.
337 if ((__split_pos2) != __end)
338 __tl._M_leftover_parts.push_front(
339 std::make_pair(__split_pos2, __end));
340
341 //__current.first = __begin; //already set anyway
342 __current.second = __split_pos1;
343 continue;
344 }
345 else
346 {
347 // Left side larger.
348 if (__begin != __split_pos1)
349 __tl._M_leftover_parts.push_front(std::make_pair(__begin,
350 __split_pos1));
351
352 __current.first = __split_pos2;
353 //__current.second = __end; //already set anyway
354 continue;
355 }
356 }
357 else
358 {
359 __gnu_sequential::sort(__begin, __end, __comp);
360 __elements_done += __n;
361 #if _GLIBCXX_ASSERTIONS
362 __total_elements_done += __n;
363 #endif
364
365 // Prefer own stack, small pieces.
366 if (__tl._M_leftover_parts.pop_front(__current))
367 continue;
368
369 # pragma omp atomic
370 *__tl._M_elements_leftover -= __elements_done;
371
372 __elements_done = 0;
373
374 #if _GLIBCXX_ASSERTIONS
375 double __search_start = omp_get_wtime();
376 #endif
377
378 // Look for new work.
379 bool __successfully_stolen = false;
380 while (__wait && *__tl._M_elements_leftover > 0
381 && !__successfully_stolen
382 #if _GLIBCXX_ASSERTIONS
383 // Possible dead-lock.
384 && (omp_get_wtime() < (__search_start + 1.0))
385 #endif
386 )
387 {
388 _ThreadIndex __victim;
389 __victim = __rng(__num_threads);
390
391 // Large pieces.
392 __successfully_stolen = (__victim != __iam)
393 && __tls[__victim]->_M_leftover_parts.pop_back(__current);
394 if (!__successfully_stolen)
395 __yield();
396 #if !defined(__ICC) && !defined(__ECC)
397 # pragma omp flush
398 #endif
399 }
400
401 #if _GLIBCXX_ASSERTIONS
402 if (omp_get_wtime() >= (__search_start + 1.0))
403 {
404 sleep(1);
405 _GLIBCXX_PARALLEL_ASSERT(omp_get_wtime()
406 < (__search_start + 1.0));
407 }
408 #endif
409 if (!__successfully_stolen)
410 {
411 #if _GLIBCXX_ASSERTIONS
412 _GLIBCXX_PARALLEL_ASSERT(*__tl._M_elements_leftover == 0);
413 #endif
414 return;
415 }
416 }
417 }
418 }
419
420 /** @brief Top-level quicksort routine.
421 * @param __begin Begin iterator of sequence.
422 * @param __end End iterator of sequence.
423 * @param __comp Comparator.
424 * @param __num_threads Number of threads that are allowed to work on
425 * this part.
426 */
427 template<typename _RAIter, typename _Compare>
428 void
429 __parallel_sort_qsb(_RAIter __begin, _RAIter __end,
430 _Compare __comp,
431 _ThreadIndex __num_threads)
432 {
433 _GLIBCXX_CALL(__end - __begin)
434
435 typedef std::iterator_traits<_RAIter> _TraitsType;
436 typedef typename _TraitsType::value_type _ValueType;
437 typedef typename _TraitsType::difference_type _DifferenceType;
438 typedef std::pair<_RAIter, _RAIter> _Piece;
439
440 typedef _QSBThreadLocal<_RAIter> _TLSType;
441
442 _DifferenceType __n = __end - __begin;
443
444 if (__n <= 1)
445 return;
446
447 // At least one element per processor.
448 if (__num_threads > __n)
449 __num_threads = static_cast<_ThreadIndex>(__n);
450
451 // Initialize thread local storage
452 _TLSType** __tls = new _TLSType*[__num_threads];
453 _DifferenceType __queue_size =
454 __num_threads * (_ThreadIndex)(log2(__n) + 1);
455 for (_ThreadIndex __t = 0; __t < __num_threads; ++__t)
456 __tls[__t] = new _QSBThreadLocal<_RAIter>(__queue_size);
457
458 // There can never be more than ceil(log2(__n)) ranges on the stack,
459 // because
460 // 1. Only one processor pushes onto the stack
461 // 2. The largest range has at most length __n
462 // 3. Each range is larger than half of the range remaining
463 volatile _DifferenceType _M_elements_leftover = __n;
464 for (int __i = 0; __i < __num_threads; ++__i)
465 {
466 __tls[__i]->_M_elements_leftover = &_M_elements_leftover;
467 __tls[__i]->_M_num_threads = __num_threads;
468 __tls[__i]->_M_global = std::make_pair(__begin, __end);
469
470 // Just in case nothing is left to assign.
471 __tls[__i]->_M_initial = std::make_pair(__end, __end);
472 }
473
474 // Main recursion call.
475 __qsb_conquer(
476 __tls, __begin, __begin + __n, __comp, 0, __num_threads, true);
477
478 #if _GLIBCXX_ASSERTIONS
479 // All stack must be empty.
480 _Piece __dummy;
481 for (int __i = 1; __i < __num_threads; ++__i)
482 _GLIBCXX_PARALLEL_ASSERT(
483 !__tls[__i]->_M_leftover_parts.pop_back(__dummy));
484 #endif
485
486 for (int __i = 0; __i < __num_threads; ++__i)
487 delete __tls[__i];
488 delete[] __tls;
489 }
490 } // namespace __gnu_parallel
491
492 #endif /* _GLIBCXX_PARALLEL_BALANCED_QUICKSORT_H */