]> git.ipfire.org Git - thirdparty/gcc.git/blob - libgomp/loop_ull.c
Update copyright years.
[thirdparty/gcc.git] / libgomp / loop_ull.c
1 /* Copyright (C) 2005-2018 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
3
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
6
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
16
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
25
26 /* This file handles the LOOP (FOR/DO) construct. */
27
28 #include <limits.h>
29 #include <stdlib.h>
30 #include "libgomp.h"
31
32 typedef unsigned long long gomp_ull;
33
34 /* Initialize the given work share construct from the given arguments. */
35
36 static inline void
37 gomp_loop_ull_init (struct gomp_work_share *ws, bool up, gomp_ull start,
38 gomp_ull end, gomp_ull incr, enum gomp_schedule_type sched,
39 gomp_ull chunk_size)
40 {
41 ws->sched = sched;
42 ws->chunk_size_ull = chunk_size;
43 /* Canonicalize loops that have zero iterations to ->next == ->end. */
44 ws->end_ull = ((up && start > end) || (!up && start < end))
45 ? start : end;
46 ws->incr_ull = incr;
47 ws->next_ull = start;
48 ws->mode = 0;
49 if (sched == GFS_DYNAMIC)
50 {
51 ws->chunk_size_ull *= incr;
52
53 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
54 {
55 /* For dynamic scheduling prepare things to make each iteration
56 faster. */
57 struct gomp_thread *thr = gomp_thread ();
58 struct gomp_team *team = thr->ts.team;
59 long nthreads = team ? team->nthreads : 1;
60
61 if (__builtin_expect (up, 1))
62 {
63 /* Cheap overflow protection. */
64 if (__builtin_expect ((nthreads | ws->chunk_size_ull)
65 < 1ULL << (sizeof (gomp_ull)
66 * __CHAR_BIT__ / 2 - 1), 1))
67 ws->mode = ws->end_ull < (__LONG_LONG_MAX__ * 2ULL + 1
68 - (nthreads + 1) * ws->chunk_size_ull);
69 }
70 /* Cheap overflow protection. */
71 else if (__builtin_expect ((nthreads | -ws->chunk_size_ull)
72 < 1ULL << (sizeof (gomp_ull)
73 * __CHAR_BIT__ / 2 - 1), 1))
74 ws->mode = ws->end_ull > ((nthreads + 1) * -ws->chunk_size_ull
75 - (__LONG_LONG_MAX__ * 2ULL + 1));
76 }
77 #endif
78 }
79 if (!up)
80 ws->mode |= 2;
81 }
82
83 /* The *_start routines are called when first encountering a loop construct
84 that is not bound directly to a parallel construct. The first thread
85 that arrives will create the work-share construct; subsequent threads
86 will see the construct exists and allocate work from it.
87
88 START, END, INCR are the bounds of the loop; due to the restrictions of
89 OpenMP, these values must be the same in every thread. This is not
90 verified (nor is it entirely verifiable, since START is not necessarily
91 retained intact in the work-share data structure). CHUNK_SIZE is the
92 scheduling parameter; again this must be identical in all threads.
93
94 Returns true if there's any work for this thread to perform. If so,
95 *ISTART and *IEND are filled with the bounds of the iteration block
96 allocated to this thread. Returns false if all work was assigned to
97 other threads prior to this thread's arrival. */
98
99 static bool
100 gomp_loop_ull_static_start (bool up, gomp_ull start, gomp_ull end,
101 gomp_ull incr, gomp_ull chunk_size,
102 gomp_ull *istart, gomp_ull *iend)
103 {
104 struct gomp_thread *thr = gomp_thread ();
105
106 thr->ts.static_trip = 0;
107 if (gomp_work_share_start (false))
108 {
109 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
110 GFS_STATIC, chunk_size);
111 gomp_work_share_init_done ();
112 }
113
114 return !gomp_iter_ull_static_next (istart, iend);
115 }
116
117 static bool
118 gomp_loop_ull_dynamic_start (bool up, gomp_ull start, gomp_ull end,
119 gomp_ull incr, gomp_ull chunk_size,
120 gomp_ull *istart, gomp_ull *iend)
121 {
122 struct gomp_thread *thr = gomp_thread ();
123 bool ret;
124
125 if (gomp_work_share_start (false))
126 {
127 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
128 GFS_DYNAMIC, chunk_size);
129 gomp_work_share_init_done ();
130 }
131
132 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
133 ret = gomp_iter_ull_dynamic_next (istart, iend);
134 #else
135 gomp_mutex_lock (&thr->ts.work_share->lock);
136 ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
137 gomp_mutex_unlock (&thr->ts.work_share->lock);
138 #endif
139
140 return ret;
141 }
142
143 static bool
144 gomp_loop_ull_guided_start (bool up, gomp_ull start, gomp_ull end,
145 gomp_ull incr, gomp_ull chunk_size,
146 gomp_ull *istart, gomp_ull *iend)
147 {
148 struct gomp_thread *thr = gomp_thread ();
149 bool ret;
150
151 if (gomp_work_share_start (false))
152 {
153 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
154 GFS_GUIDED, chunk_size);
155 gomp_work_share_init_done ();
156 }
157
158 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
159 ret = gomp_iter_ull_guided_next (istart, iend);
160 #else
161 gomp_mutex_lock (&thr->ts.work_share->lock);
162 ret = gomp_iter_ull_guided_next_locked (istart, iend);
163 gomp_mutex_unlock (&thr->ts.work_share->lock);
164 #endif
165
166 return ret;
167 }
168
169 bool
170 GOMP_loop_ull_runtime_start (bool up, gomp_ull start, gomp_ull end,
171 gomp_ull incr, gomp_ull *istart, gomp_ull *iend)
172 {
173 struct gomp_task_icv *icv = gomp_icv (false);
174 switch (icv->run_sched_var)
175 {
176 case GFS_STATIC:
177 return gomp_loop_ull_static_start (up, start, end, incr,
178 icv->run_sched_chunk_size,
179 istart, iend);
180 case GFS_DYNAMIC:
181 return gomp_loop_ull_dynamic_start (up, start, end, incr,
182 icv->run_sched_chunk_size,
183 istart, iend);
184 case GFS_GUIDED:
185 return gomp_loop_ull_guided_start (up, start, end, incr,
186 icv->run_sched_chunk_size,
187 istart, iend);
188 case GFS_AUTO:
189 /* For now map to schedule(static), later on we could play with feedback
190 driven choice. */
191 return gomp_loop_ull_static_start (up, start, end, incr,
192 0, istart, iend);
193 default:
194 abort ();
195 }
196 }
197
198 /* The *_ordered_*_start routines are similar. The only difference is that
199 this work-share construct is initialized to expect an ORDERED section. */
200
201 static bool
202 gomp_loop_ull_ordered_static_start (bool up, gomp_ull start, gomp_ull end,
203 gomp_ull incr, gomp_ull chunk_size,
204 gomp_ull *istart, gomp_ull *iend)
205 {
206 struct gomp_thread *thr = gomp_thread ();
207
208 thr->ts.static_trip = 0;
209 if (gomp_work_share_start (true))
210 {
211 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
212 GFS_STATIC, chunk_size);
213 gomp_ordered_static_init ();
214 gomp_work_share_init_done ();
215 }
216
217 return !gomp_iter_ull_static_next (istart, iend);
218 }
219
220 static bool
221 gomp_loop_ull_ordered_dynamic_start (bool up, gomp_ull start, gomp_ull end,
222 gomp_ull incr, gomp_ull chunk_size,
223 gomp_ull *istart, gomp_ull *iend)
224 {
225 struct gomp_thread *thr = gomp_thread ();
226 bool ret;
227
228 if (gomp_work_share_start (true))
229 {
230 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
231 GFS_DYNAMIC, chunk_size);
232 gomp_mutex_lock (&thr->ts.work_share->lock);
233 gomp_work_share_init_done ();
234 }
235 else
236 gomp_mutex_lock (&thr->ts.work_share->lock);
237
238 ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
239 if (ret)
240 gomp_ordered_first ();
241 gomp_mutex_unlock (&thr->ts.work_share->lock);
242
243 return ret;
244 }
245
246 static bool
247 gomp_loop_ull_ordered_guided_start (bool up, gomp_ull start, gomp_ull end,
248 gomp_ull incr, gomp_ull chunk_size,
249 gomp_ull *istart, gomp_ull *iend)
250 {
251 struct gomp_thread *thr = gomp_thread ();
252 bool ret;
253
254 if (gomp_work_share_start (true))
255 {
256 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
257 GFS_GUIDED, chunk_size);
258 gomp_mutex_lock (&thr->ts.work_share->lock);
259 gomp_work_share_init_done ();
260 }
261 else
262 gomp_mutex_lock (&thr->ts.work_share->lock);
263
264 ret = gomp_iter_ull_guided_next_locked (istart, iend);
265 if (ret)
266 gomp_ordered_first ();
267 gomp_mutex_unlock (&thr->ts.work_share->lock);
268
269 return ret;
270 }
271
272 bool
273 GOMP_loop_ull_ordered_runtime_start (bool up, gomp_ull start, gomp_ull end,
274 gomp_ull incr, gomp_ull *istart,
275 gomp_ull *iend)
276 {
277 struct gomp_task_icv *icv = gomp_icv (false);
278 switch (icv->run_sched_var)
279 {
280 case GFS_STATIC:
281 return gomp_loop_ull_ordered_static_start (up, start, end, incr,
282 icv->run_sched_chunk_size,
283 istart, iend);
284 case GFS_DYNAMIC:
285 return gomp_loop_ull_ordered_dynamic_start (up, start, end, incr,
286 icv->run_sched_chunk_size,
287 istart, iend);
288 case GFS_GUIDED:
289 return gomp_loop_ull_ordered_guided_start (up, start, end, incr,
290 icv->run_sched_chunk_size,
291 istart, iend);
292 case GFS_AUTO:
293 /* For now map to schedule(static), later on we could play with feedback
294 driven choice. */
295 return gomp_loop_ull_ordered_static_start (up, start, end, incr,
296 0, istart, iend);
297 default:
298 abort ();
299 }
300 }
301
302 /* The *_doacross_*_start routines are similar. The only difference is that
303 this work-share construct is initialized to expect an ORDERED(N) - DOACROSS
304 section, and the worksharing loop iterates always from 0 to COUNTS[0] - 1
305 and other COUNTS array elements tell the library number of iterations
306 in the ordered inner loops. */
307
308 static bool
309 gomp_loop_ull_doacross_static_start (unsigned ncounts, gomp_ull *counts,
310 gomp_ull chunk_size, gomp_ull *istart,
311 gomp_ull *iend)
312 {
313 struct gomp_thread *thr = gomp_thread ();
314
315 thr->ts.static_trip = 0;
316 if (gomp_work_share_start (false))
317 {
318 gomp_loop_ull_init (thr->ts.work_share, true, 0, counts[0], 1,
319 GFS_STATIC, chunk_size);
320 gomp_doacross_ull_init (ncounts, counts, chunk_size);
321 gomp_work_share_init_done ();
322 }
323
324 return !gomp_iter_ull_static_next (istart, iend);
325 }
326
327 static bool
328 gomp_loop_ull_doacross_dynamic_start (unsigned ncounts, gomp_ull *counts,
329 gomp_ull chunk_size, gomp_ull *istart,
330 gomp_ull *iend)
331 {
332 struct gomp_thread *thr = gomp_thread ();
333 bool ret;
334
335 if (gomp_work_share_start (false))
336 {
337 gomp_loop_ull_init (thr->ts.work_share, true, 0, counts[0], 1,
338 GFS_DYNAMIC, chunk_size);
339 gomp_doacross_ull_init (ncounts, counts, chunk_size);
340 gomp_work_share_init_done ();
341 }
342
343 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
344 ret = gomp_iter_ull_dynamic_next (istart, iend);
345 #else
346 gomp_mutex_lock (&thr->ts.work_share->lock);
347 ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
348 gomp_mutex_unlock (&thr->ts.work_share->lock);
349 #endif
350
351 return ret;
352 }
353
354 static bool
355 gomp_loop_ull_doacross_guided_start (unsigned ncounts, gomp_ull *counts,
356 gomp_ull chunk_size, gomp_ull *istart,
357 gomp_ull *iend)
358 {
359 struct gomp_thread *thr = gomp_thread ();
360 bool ret;
361
362 if (gomp_work_share_start (false))
363 {
364 gomp_loop_ull_init (thr->ts.work_share, true, 0, counts[0], 1,
365 GFS_GUIDED, chunk_size);
366 gomp_doacross_ull_init (ncounts, counts, chunk_size);
367 gomp_work_share_init_done ();
368 }
369
370 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
371 ret = gomp_iter_ull_guided_next (istart, iend);
372 #else
373 gomp_mutex_lock (&thr->ts.work_share->lock);
374 ret = gomp_iter_ull_guided_next_locked (istart, iend);
375 gomp_mutex_unlock (&thr->ts.work_share->lock);
376 #endif
377
378 return ret;
379 }
380
381 bool
382 GOMP_loop_ull_doacross_runtime_start (unsigned ncounts, gomp_ull *counts,
383 gomp_ull *istart, gomp_ull *iend)
384 {
385 struct gomp_task_icv *icv = gomp_icv (false);
386 switch (icv->run_sched_var)
387 {
388 case GFS_STATIC:
389 return gomp_loop_ull_doacross_static_start (ncounts, counts,
390 icv->run_sched_chunk_size,
391 istart, iend);
392 case GFS_DYNAMIC:
393 return gomp_loop_ull_doacross_dynamic_start (ncounts, counts,
394 icv->run_sched_chunk_size,
395 istart, iend);
396 case GFS_GUIDED:
397 return gomp_loop_ull_doacross_guided_start (ncounts, counts,
398 icv->run_sched_chunk_size,
399 istart, iend);
400 case GFS_AUTO:
401 /* For now map to schedule(static), later on we could play with feedback
402 driven choice. */
403 return gomp_loop_ull_doacross_static_start (ncounts, counts,
404 0, istart, iend);
405 default:
406 abort ();
407 }
408 }
409
410 /* The *_next routines are called when the thread completes processing of
411 the iteration block currently assigned to it. If the work-share
412 construct is bound directly to a parallel construct, then the iteration
413 bounds may have been set up before the parallel. In which case, this
414 may be the first iteration for the thread.
415
416 Returns true if there is work remaining to be performed; *ISTART and
417 *IEND are filled with a new iteration block. Returns false if all work
418 has been assigned. */
419
420 static bool
421 gomp_loop_ull_static_next (gomp_ull *istart, gomp_ull *iend)
422 {
423 return !gomp_iter_ull_static_next (istart, iend);
424 }
425
426 static bool
427 gomp_loop_ull_dynamic_next (gomp_ull *istart, gomp_ull *iend)
428 {
429 bool ret;
430
431 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
432 ret = gomp_iter_ull_dynamic_next (istart, iend);
433 #else
434 struct gomp_thread *thr = gomp_thread ();
435 gomp_mutex_lock (&thr->ts.work_share->lock);
436 ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
437 gomp_mutex_unlock (&thr->ts.work_share->lock);
438 #endif
439
440 return ret;
441 }
442
443 static bool
444 gomp_loop_ull_guided_next (gomp_ull *istart, gomp_ull *iend)
445 {
446 bool ret;
447
448 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
449 ret = gomp_iter_ull_guided_next (istart, iend);
450 #else
451 struct gomp_thread *thr = gomp_thread ();
452 gomp_mutex_lock (&thr->ts.work_share->lock);
453 ret = gomp_iter_ull_guided_next_locked (istart, iend);
454 gomp_mutex_unlock (&thr->ts.work_share->lock);
455 #endif
456
457 return ret;
458 }
459
460 bool
461 GOMP_loop_ull_runtime_next (gomp_ull *istart, gomp_ull *iend)
462 {
463 struct gomp_thread *thr = gomp_thread ();
464
465 switch (thr->ts.work_share->sched)
466 {
467 case GFS_STATIC:
468 case GFS_AUTO:
469 return gomp_loop_ull_static_next (istart, iend);
470 case GFS_DYNAMIC:
471 return gomp_loop_ull_dynamic_next (istart, iend);
472 case GFS_GUIDED:
473 return gomp_loop_ull_guided_next (istart, iend);
474 default:
475 abort ();
476 }
477 }
478
479 /* The *_ordered_*_next routines are called when the thread completes
480 processing of the iteration block currently assigned to it.
481
482 Returns true if there is work remaining to be performed; *ISTART and
483 *IEND are filled with a new iteration block. Returns false if all work
484 has been assigned. */
485
486 static bool
487 gomp_loop_ull_ordered_static_next (gomp_ull *istart, gomp_ull *iend)
488 {
489 struct gomp_thread *thr = gomp_thread ();
490 int test;
491
492 gomp_ordered_sync ();
493 gomp_mutex_lock (&thr->ts.work_share->lock);
494 test = gomp_iter_ull_static_next (istart, iend);
495 if (test >= 0)
496 gomp_ordered_static_next ();
497 gomp_mutex_unlock (&thr->ts.work_share->lock);
498
499 return test == 0;
500 }
501
502 static bool
503 gomp_loop_ull_ordered_dynamic_next (gomp_ull *istart, gomp_ull *iend)
504 {
505 struct gomp_thread *thr = gomp_thread ();
506 bool ret;
507
508 gomp_ordered_sync ();
509 gomp_mutex_lock (&thr->ts.work_share->lock);
510 ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
511 if (ret)
512 gomp_ordered_next ();
513 else
514 gomp_ordered_last ();
515 gomp_mutex_unlock (&thr->ts.work_share->lock);
516
517 return ret;
518 }
519
520 static bool
521 gomp_loop_ull_ordered_guided_next (gomp_ull *istart, gomp_ull *iend)
522 {
523 struct gomp_thread *thr = gomp_thread ();
524 bool ret;
525
526 gomp_ordered_sync ();
527 gomp_mutex_lock (&thr->ts.work_share->lock);
528 ret = gomp_iter_ull_guided_next_locked (istart, iend);
529 if (ret)
530 gomp_ordered_next ();
531 else
532 gomp_ordered_last ();
533 gomp_mutex_unlock (&thr->ts.work_share->lock);
534
535 return ret;
536 }
537
538 bool
539 GOMP_loop_ull_ordered_runtime_next (gomp_ull *istart, gomp_ull *iend)
540 {
541 struct gomp_thread *thr = gomp_thread ();
542
543 switch (thr->ts.work_share->sched)
544 {
545 case GFS_STATIC:
546 case GFS_AUTO:
547 return gomp_loop_ull_ordered_static_next (istart, iend);
548 case GFS_DYNAMIC:
549 return gomp_loop_ull_ordered_dynamic_next (istart, iend);
550 case GFS_GUIDED:
551 return gomp_loop_ull_ordered_guided_next (istart, iend);
552 default:
553 abort ();
554 }
555 }
556
557 /* We use static functions above so that we're sure that the "runtime"
558 function can defer to the proper routine without interposition. We
559 export the static function with a strong alias when possible, or with
560 a wrapper function otherwise. */
561
562 #ifdef HAVE_ATTRIBUTE_ALIAS
563 extern __typeof(gomp_loop_ull_static_start) GOMP_loop_ull_static_start
564 __attribute__((alias ("gomp_loop_ull_static_start")));
565 extern __typeof(gomp_loop_ull_dynamic_start) GOMP_loop_ull_dynamic_start
566 __attribute__((alias ("gomp_loop_ull_dynamic_start")));
567 extern __typeof(gomp_loop_ull_guided_start) GOMP_loop_ull_guided_start
568 __attribute__((alias ("gomp_loop_ull_guided_start")));
569 extern __typeof(gomp_loop_ull_dynamic_start) GOMP_loop_ull_nonmonotonic_dynamic_start
570 __attribute__((alias ("gomp_loop_ull_dynamic_start")));
571 extern __typeof(gomp_loop_ull_guided_start) GOMP_loop_ull_nonmonotonic_guided_start
572 __attribute__((alias ("gomp_loop_ull_guided_start")));
573
574 extern __typeof(gomp_loop_ull_ordered_static_start) GOMP_loop_ull_ordered_static_start
575 __attribute__((alias ("gomp_loop_ull_ordered_static_start")));
576 extern __typeof(gomp_loop_ull_ordered_dynamic_start) GOMP_loop_ull_ordered_dynamic_start
577 __attribute__((alias ("gomp_loop_ull_ordered_dynamic_start")));
578 extern __typeof(gomp_loop_ull_ordered_guided_start) GOMP_loop_ull_ordered_guided_start
579 __attribute__((alias ("gomp_loop_ull_ordered_guided_start")));
580
581 extern __typeof(gomp_loop_ull_doacross_static_start) GOMP_loop_ull_doacross_static_start
582 __attribute__((alias ("gomp_loop_ull_doacross_static_start")));
583 extern __typeof(gomp_loop_ull_doacross_dynamic_start) GOMP_loop_ull_doacross_dynamic_start
584 __attribute__((alias ("gomp_loop_ull_doacross_dynamic_start")));
585 extern __typeof(gomp_loop_ull_doacross_guided_start) GOMP_loop_ull_doacross_guided_start
586 __attribute__((alias ("gomp_loop_ull_doacross_guided_start")));
587
588 extern __typeof(gomp_loop_ull_static_next) GOMP_loop_ull_static_next
589 __attribute__((alias ("gomp_loop_ull_static_next")));
590 extern __typeof(gomp_loop_ull_dynamic_next) GOMP_loop_ull_dynamic_next
591 __attribute__((alias ("gomp_loop_ull_dynamic_next")));
592 extern __typeof(gomp_loop_ull_guided_next) GOMP_loop_ull_guided_next
593 __attribute__((alias ("gomp_loop_ull_guided_next")));
594 extern __typeof(gomp_loop_ull_dynamic_next) GOMP_loop_ull_nonmonotonic_dynamic_next
595 __attribute__((alias ("gomp_loop_ull_dynamic_next")));
596 extern __typeof(gomp_loop_ull_guided_next) GOMP_loop_ull_nonmonotonic_guided_next
597 __attribute__((alias ("gomp_loop_ull_guided_next")));
598
599 extern __typeof(gomp_loop_ull_ordered_static_next) GOMP_loop_ull_ordered_static_next
600 __attribute__((alias ("gomp_loop_ull_ordered_static_next")));
601 extern __typeof(gomp_loop_ull_ordered_dynamic_next) GOMP_loop_ull_ordered_dynamic_next
602 __attribute__((alias ("gomp_loop_ull_ordered_dynamic_next")));
603 extern __typeof(gomp_loop_ull_ordered_guided_next) GOMP_loop_ull_ordered_guided_next
604 __attribute__((alias ("gomp_loop_ull_ordered_guided_next")));
605 #else
606 bool
607 GOMP_loop_ull_static_start (bool up, gomp_ull start, gomp_ull end,
608 gomp_ull incr, gomp_ull chunk_size,
609 gomp_ull *istart, gomp_ull *iend)
610 {
611 return gomp_loop_ull_static_start (up, start, end, incr, chunk_size, istart,
612 iend);
613 }
614
615 bool
616 GOMP_loop_ull_dynamic_start (bool up, gomp_ull start, gomp_ull end,
617 gomp_ull incr, gomp_ull chunk_size,
618 gomp_ull *istart, gomp_ull *iend)
619 {
620 return gomp_loop_ull_dynamic_start (up, start, end, incr, chunk_size, istart,
621 iend);
622 }
623
624 bool
625 GOMP_loop_ull_guided_start (bool up, gomp_ull start, gomp_ull end,
626 gomp_ull incr, gomp_ull chunk_size,
627 gomp_ull *istart, gomp_ull *iend)
628 {
629 return gomp_loop_ull_guided_start (up, start, end, incr, chunk_size, istart,
630 iend);
631 }
632
633 bool
634 GOMP_loop_ull_nonmonotonic_dynamic_start (bool up, gomp_ull start,
635 gomp_ull end, gomp_ull incr,
636 gomp_ull chunk_size,
637 gomp_ull *istart, gomp_ull *iend)
638 {
639 return gomp_loop_ull_dynamic_start (up, start, end, incr, chunk_size, istart,
640 iend);
641 }
642
643 bool
644 GOMP_loop_ull_nonmonotonic_guided_start (bool up, gomp_ull start, gomp_ull end,
645 gomp_ull incr, gomp_ull chunk_size,
646 gomp_ull *istart, gomp_ull *iend)
647 {
648 return gomp_loop_ull_guided_start (up, start, end, incr, chunk_size, istart,
649 iend);
650 }
651
652 bool
653 GOMP_loop_ull_ordered_static_start (bool up, gomp_ull start, gomp_ull end,
654 gomp_ull incr, gomp_ull chunk_size,
655 gomp_ull *istart, gomp_ull *iend)
656 {
657 return gomp_loop_ull_ordered_static_start (up, start, end, incr, chunk_size,
658 istart, iend);
659 }
660
661 bool
662 GOMP_loop_ull_ordered_dynamic_start (bool up, gomp_ull start, gomp_ull end,
663 gomp_ull incr, gomp_ull chunk_size,
664 gomp_ull *istart, gomp_ull *iend)
665 {
666 return gomp_loop_ull_ordered_dynamic_start (up, start, end, incr, chunk_size,
667 istart, iend);
668 }
669
670 bool
671 GOMP_loop_ull_ordered_guided_start (bool up, gomp_ull start, gomp_ull end,
672 gomp_ull incr, gomp_ull chunk_size,
673 gomp_ull *istart, gomp_ull *iend)
674 {
675 return gomp_loop_ull_ordered_guided_start (up, start, end, incr, chunk_size,
676 istart, iend);
677 }
678
679 bool
680 GOMP_loop_ull_doacross_static_start (unsigned ncounts, gomp_ull *counts,
681 gomp_ull chunk_size, gomp_ull *istart,
682 gomp_ull *iend)
683 {
684 return gomp_loop_ull_doacross_static_start (ncounts, counts, chunk_size,
685 istart, iend);
686 }
687
688 bool
689 GOMP_loop_ull_doacross_dynamic_start (unsigned ncounts, gomp_ull *counts,
690 gomp_ull chunk_size, gomp_ull *istart,
691 gomp_ull *iend)
692 {
693 return gomp_loop_ull_doacross_dynamic_start (ncounts, counts, chunk_size,
694 istart, iend);
695 }
696
697 bool
698 GOMP_loop_ull_doacross_guided_start (unsigned ncounts, gomp_ull *counts,
699 gomp_ull chunk_size, gomp_ull *istart,
700 gomp_ull *iend)
701 {
702 return gomp_loop_ull_doacross_guided_start (ncounts, counts, chunk_size,
703 istart, iend);
704 }
705
706 bool
707 GOMP_loop_ull_static_next (gomp_ull *istart, gomp_ull *iend)
708 {
709 return gomp_loop_ull_static_next (istart, iend);
710 }
711
712 bool
713 GOMP_loop_ull_dynamic_next (gomp_ull *istart, gomp_ull *iend)
714 {
715 return gomp_loop_ull_dynamic_next (istart, iend);
716 }
717
718 bool
719 GOMP_loop_ull_guided_next (gomp_ull *istart, gomp_ull *iend)
720 {
721 return gomp_loop_ull_guided_next (istart, iend);
722 }
723
724 bool
725 GOMP_loop_ull_nonmonotonic_dynamic_next (gomp_ull *istart, gomp_ull *iend)
726 {
727 return gomp_loop_ull_dynamic_next (istart, iend);
728 }
729
730 bool
731 GOMP_loop_ull_nonmonotonic_guided_next (gomp_ull *istart, gomp_ull *iend)
732 {
733 return gomp_loop_ull_guided_next (istart, iend);
734 }
735
736 bool
737 GOMP_loop_ull_ordered_static_next (gomp_ull *istart, gomp_ull *iend)
738 {
739 return gomp_loop_ull_ordered_static_next (istart, iend);
740 }
741
742 bool
743 GOMP_loop_ull_ordered_dynamic_next (gomp_ull *istart, gomp_ull *iend)
744 {
745 return gomp_loop_ull_ordered_dynamic_next (istart, iend);
746 }
747
748 bool
749 GOMP_loop_ull_ordered_guided_next (gomp_ull *istart, gomp_ull *iend)
750 {
751 return gomp_loop_ull_ordered_guided_next (istart, iend);
752 }
753 #endif