]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/omp-general.c
[arm] Fix testsuite nit when compiling for thumb2
[thirdparty/gcc.git] / gcc / omp-general.c
1 /* General types and functions that are uselful for processing of OpenMP,
2 OpenACC and similar directivers at various stages of compilation.
3
4 Copyright (C) 2005-2019 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Find an OMP clause of type KIND within CLAUSES. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "backend.h"
28 #include "target.h"
29 #include "tree.h"
30 #include "gimple.h"
31 #include "ssa.h"
32 #include "diagnostic-core.h"
33 #include "fold-const.h"
34 #include "langhooks.h"
35 #include "omp-general.h"
36 #include "stringpool.h"
37 #include "attribs.h"
38
39 enum omp_requires omp_requires_mask;
40
41 tree
42 omp_find_clause (tree clauses, enum omp_clause_code kind)
43 {
44 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
45 if (OMP_CLAUSE_CODE (clauses) == kind)
46 return clauses;
47
48 return NULL_TREE;
49 }
50
51 /* True if OpenMP should regard this DECL as being a scalar which has Fortran's
52 allocatable or pointer attribute. */
53 bool
54 omp_is_allocatable_or_ptr (tree decl)
55 {
56 return lang_hooks.decls.omp_is_allocatable_or_ptr (decl);
57 }
58
59 /* Return true if DECL is a Fortran optional argument. */
60
61 bool
62 omp_is_optional_argument (tree decl)
63 {
64 return lang_hooks.decls.omp_is_optional_argument (decl);
65 }
66
67 /* Return true if DECL is a reference type. */
68
69 bool
70 omp_is_reference (tree decl)
71 {
72 return lang_hooks.decls.omp_privatize_by_reference (decl);
73 }
74
75 /* Adjust *COND_CODE and *N2 so that the former is either LT_EXPR or GT_EXPR,
76 given that V is the loop index variable and STEP is loop step. */
77
78 void
79 omp_adjust_for_condition (location_t loc, enum tree_code *cond_code, tree *n2,
80 tree v, tree step)
81 {
82 switch (*cond_code)
83 {
84 case LT_EXPR:
85 case GT_EXPR:
86 break;
87
88 case NE_EXPR:
89 gcc_assert (TREE_CODE (step) == INTEGER_CST);
90 if (TREE_CODE (TREE_TYPE (v)) == INTEGER_TYPE)
91 {
92 if (integer_onep (step))
93 *cond_code = LT_EXPR;
94 else
95 {
96 gcc_assert (integer_minus_onep (step));
97 *cond_code = GT_EXPR;
98 }
99 }
100 else
101 {
102 tree unit = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (v)));
103 gcc_assert (TREE_CODE (unit) == INTEGER_CST);
104 if (tree_int_cst_equal (unit, step))
105 *cond_code = LT_EXPR;
106 else
107 {
108 gcc_assert (wi::neg (wi::to_widest (unit))
109 == wi::to_widest (step));
110 *cond_code = GT_EXPR;
111 }
112 }
113
114 break;
115
116 case LE_EXPR:
117 if (POINTER_TYPE_P (TREE_TYPE (*n2)))
118 *n2 = fold_build_pointer_plus_hwi_loc (loc, *n2, 1);
119 else
120 *n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (*n2), *n2,
121 build_int_cst (TREE_TYPE (*n2), 1));
122 *cond_code = LT_EXPR;
123 break;
124 case GE_EXPR:
125 if (POINTER_TYPE_P (TREE_TYPE (*n2)))
126 *n2 = fold_build_pointer_plus_hwi_loc (loc, *n2, -1);
127 else
128 *n2 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (*n2), *n2,
129 build_int_cst (TREE_TYPE (*n2), 1));
130 *cond_code = GT_EXPR;
131 break;
132 default:
133 gcc_unreachable ();
134 }
135 }
136
137 /* Return the looping step from INCR, extracted from the step of a gimple omp
138 for statement. */
139
140 tree
141 omp_get_for_step_from_incr (location_t loc, tree incr)
142 {
143 tree step;
144 switch (TREE_CODE (incr))
145 {
146 case PLUS_EXPR:
147 step = TREE_OPERAND (incr, 1);
148 break;
149 case POINTER_PLUS_EXPR:
150 step = fold_convert (ssizetype, TREE_OPERAND (incr, 1));
151 break;
152 case MINUS_EXPR:
153 step = TREE_OPERAND (incr, 1);
154 step = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (step), step);
155 break;
156 default:
157 gcc_unreachable ();
158 }
159 return step;
160 }
161
162 /* Extract the header elements of parallel loop FOR_STMT and store
163 them into *FD. */
164
165 void
166 omp_extract_for_data (gomp_for *for_stmt, struct omp_for_data *fd,
167 struct omp_for_data_loop *loops)
168 {
169 tree t, var, *collapse_iter, *collapse_count;
170 tree count = NULL_TREE, iter_type = long_integer_type_node;
171 struct omp_for_data_loop *loop;
172 int i;
173 struct omp_for_data_loop dummy_loop;
174 location_t loc = gimple_location (for_stmt);
175 bool simd = gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_SIMD;
176 bool distribute = gimple_omp_for_kind (for_stmt)
177 == GF_OMP_FOR_KIND_DISTRIBUTE;
178 bool taskloop = gimple_omp_for_kind (for_stmt)
179 == GF_OMP_FOR_KIND_TASKLOOP;
180 tree iterv, countv;
181
182 fd->for_stmt = for_stmt;
183 fd->pre = NULL;
184 fd->have_nowait = distribute || simd;
185 fd->have_ordered = false;
186 fd->have_reductemp = false;
187 fd->have_pointer_condtemp = false;
188 fd->have_scantemp = false;
189 fd->have_nonctrl_scantemp = false;
190 fd->lastprivate_conditional = 0;
191 fd->tiling = NULL_TREE;
192 fd->collapse = 1;
193 fd->ordered = 0;
194 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
195 fd->sched_modifiers = 0;
196 fd->chunk_size = NULL_TREE;
197 fd->simd_schedule = false;
198 collapse_iter = NULL;
199 collapse_count = NULL;
200
201 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
202 switch (OMP_CLAUSE_CODE (t))
203 {
204 case OMP_CLAUSE_NOWAIT:
205 fd->have_nowait = true;
206 break;
207 case OMP_CLAUSE_ORDERED:
208 fd->have_ordered = true;
209 if (OMP_CLAUSE_ORDERED_EXPR (t))
210 fd->ordered = tree_to_shwi (OMP_CLAUSE_ORDERED_EXPR (t));
211 break;
212 case OMP_CLAUSE_SCHEDULE:
213 gcc_assert (!distribute && !taskloop);
214 fd->sched_kind
215 = (enum omp_clause_schedule_kind)
216 (OMP_CLAUSE_SCHEDULE_KIND (t) & OMP_CLAUSE_SCHEDULE_MASK);
217 fd->sched_modifiers = (OMP_CLAUSE_SCHEDULE_KIND (t)
218 & ~OMP_CLAUSE_SCHEDULE_MASK);
219 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
220 fd->simd_schedule = OMP_CLAUSE_SCHEDULE_SIMD (t);
221 break;
222 case OMP_CLAUSE_DIST_SCHEDULE:
223 gcc_assert (distribute);
224 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
225 break;
226 case OMP_CLAUSE_COLLAPSE:
227 fd->collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (t));
228 if (fd->collapse > 1)
229 {
230 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
231 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
232 }
233 break;
234 case OMP_CLAUSE_TILE:
235 fd->tiling = OMP_CLAUSE_TILE_LIST (t);
236 fd->collapse = list_length (fd->tiling);
237 gcc_assert (fd->collapse);
238 collapse_iter = &OMP_CLAUSE_TILE_ITERVAR (t);
239 collapse_count = &OMP_CLAUSE_TILE_COUNT (t);
240 break;
241 case OMP_CLAUSE__REDUCTEMP_:
242 fd->have_reductemp = true;
243 break;
244 case OMP_CLAUSE_LASTPRIVATE:
245 if (OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (t))
246 fd->lastprivate_conditional++;
247 break;
248 case OMP_CLAUSE__CONDTEMP_:
249 if (POINTER_TYPE_P (TREE_TYPE (OMP_CLAUSE_DECL (t))))
250 fd->have_pointer_condtemp = true;
251 break;
252 case OMP_CLAUSE__SCANTEMP_:
253 fd->have_scantemp = true;
254 if (!OMP_CLAUSE__SCANTEMP__ALLOC (t)
255 && !OMP_CLAUSE__SCANTEMP__CONTROL (t))
256 fd->have_nonctrl_scantemp = true;
257 break;
258 default:
259 break;
260 }
261
262 if (fd->collapse > 1 || fd->tiling)
263 fd->loops = loops;
264 else
265 fd->loops = &fd->loop;
266
267 if (fd->ordered && fd->collapse == 1 && loops != NULL)
268 {
269 fd->loops = loops;
270 iterv = NULL_TREE;
271 countv = NULL_TREE;
272 collapse_iter = &iterv;
273 collapse_count = &countv;
274 }
275
276 /* FIXME: for now map schedule(auto) to schedule(static).
277 There should be analysis to determine whether all iterations
278 are approximately the same amount of work (then schedule(static)
279 is best) or if it varies (then schedule(dynamic,N) is better). */
280 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
281 {
282 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
283 gcc_assert (fd->chunk_size == NULL);
284 }
285 gcc_assert ((fd->collapse == 1 && !fd->tiling) || collapse_iter != NULL);
286 if (taskloop)
287 fd->sched_kind = OMP_CLAUSE_SCHEDULE_RUNTIME;
288 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
289 gcc_assert (fd->chunk_size == NULL);
290 else if (fd->chunk_size == NULL)
291 {
292 /* We only need to compute a default chunk size for ordered
293 static loops and dynamic loops. */
294 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
295 || fd->have_ordered)
296 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
297 ? integer_zero_node : integer_one_node;
298 }
299
300 int cnt = fd->ordered ? fd->ordered : fd->collapse;
301 for (i = 0; i < cnt; i++)
302 {
303 if (i == 0
304 && fd->collapse == 1
305 && !fd->tiling
306 && (fd->ordered == 0 || loops == NULL))
307 loop = &fd->loop;
308 else if (loops != NULL)
309 loop = loops + i;
310 else
311 loop = &dummy_loop;
312
313 loop->v = gimple_omp_for_index (for_stmt, i);
314 gcc_assert (SSA_VAR_P (loop->v));
315 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
316 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
317 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
318 loop->n1 = gimple_omp_for_initial (for_stmt, i);
319
320 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
321 loop->n2 = gimple_omp_for_final (for_stmt, i);
322 gcc_assert (loop->cond_code != NE_EXPR
323 || (gimple_omp_for_kind (for_stmt)
324 != GF_OMP_FOR_KIND_OACC_LOOP));
325
326 t = gimple_omp_for_incr (for_stmt, i);
327 gcc_assert (TREE_OPERAND (t, 0) == var);
328 loop->step = omp_get_for_step_from_incr (loc, t);
329
330 omp_adjust_for_condition (loc, &loop->cond_code, &loop->n2, loop->v,
331 loop->step);
332
333 if (simd
334 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
335 && !fd->have_ordered))
336 {
337 if (fd->collapse == 1 && !fd->tiling)
338 iter_type = TREE_TYPE (loop->v);
339 else if (i == 0
340 || TYPE_PRECISION (iter_type)
341 < TYPE_PRECISION (TREE_TYPE (loop->v)))
342 iter_type
343 = build_nonstandard_integer_type
344 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
345 }
346 else if (iter_type != long_long_unsigned_type_node)
347 {
348 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
349 iter_type = long_long_unsigned_type_node;
350 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
351 && TYPE_PRECISION (TREE_TYPE (loop->v))
352 >= TYPE_PRECISION (iter_type))
353 {
354 tree n;
355
356 if (loop->cond_code == LT_EXPR)
357 n = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (loop->v),
358 loop->n2, loop->step);
359 else
360 n = loop->n1;
361 if (TREE_CODE (n) != INTEGER_CST
362 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
363 iter_type = long_long_unsigned_type_node;
364 }
365 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
366 > TYPE_PRECISION (iter_type))
367 {
368 tree n1, n2;
369
370 if (loop->cond_code == LT_EXPR)
371 {
372 n1 = loop->n1;
373 n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (loop->v),
374 loop->n2, loop->step);
375 }
376 else
377 {
378 n1 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (loop->v),
379 loop->n2, loop->step);
380 n2 = loop->n1;
381 }
382 if (TREE_CODE (n1) != INTEGER_CST
383 || TREE_CODE (n2) != INTEGER_CST
384 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
385 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
386 iter_type = long_long_unsigned_type_node;
387 }
388 }
389
390 if (i >= fd->collapse)
391 continue;
392
393 if (collapse_count && *collapse_count == NULL)
394 {
395 t = fold_binary (loop->cond_code, boolean_type_node,
396 fold_convert (TREE_TYPE (loop->v), loop->n1),
397 fold_convert (TREE_TYPE (loop->v), loop->n2));
398 if (t && integer_zerop (t))
399 count = build_zero_cst (long_long_unsigned_type_node);
400 else if ((i == 0 || count != NULL_TREE)
401 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
402 && TREE_CONSTANT (loop->n1)
403 && TREE_CONSTANT (loop->n2)
404 && TREE_CODE (loop->step) == INTEGER_CST)
405 {
406 tree itype = TREE_TYPE (loop->v);
407
408 if (POINTER_TYPE_P (itype))
409 itype = signed_type_for (itype);
410 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
411 t = fold_build2_loc (loc, PLUS_EXPR, itype,
412 fold_convert_loc (loc, itype, loop->step),
413 t);
414 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
415 fold_convert_loc (loc, itype, loop->n2));
416 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
417 fold_convert_loc (loc, itype, loop->n1));
418 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
419 {
420 tree step = fold_convert_loc (loc, itype, loop->step);
421 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
422 fold_build1_loc (loc, NEGATE_EXPR,
423 itype, t),
424 fold_build1_loc (loc, NEGATE_EXPR,
425 itype, step));
426 }
427 else
428 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
429 fold_convert_loc (loc, itype,
430 loop->step));
431 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
432 if (count != NULL_TREE)
433 count = fold_build2_loc (loc, MULT_EXPR,
434 long_long_unsigned_type_node,
435 count, t);
436 else
437 count = t;
438 if (TREE_CODE (count) != INTEGER_CST)
439 count = NULL_TREE;
440 }
441 else if (count && !integer_zerop (count))
442 count = NULL_TREE;
443 }
444 }
445
446 if (count
447 && !simd
448 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
449 || fd->have_ordered))
450 {
451 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
452 iter_type = long_long_unsigned_type_node;
453 else
454 iter_type = long_integer_type_node;
455 }
456 else if (collapse_iter && *collapse_iter != NULL)
457 iter_type = TREE_TYPE (*collapse_iter);
458 fd->iter_type = iter_type;
459 if (collapse_iter && *collapse_iter == NULL)
460 *collapse_iter = create_tmp_var (iter_type, ".iter");
461 if (collapse_count && *collapse_count == NULL)
462 {
463 if (count)
464 *collapse_count = fold_convert_loc (loc, iter_type, count);
465 else
466 *collapse_count = create_tmp_var (iter_type, ".count");
467 }
468
469 if (fd->collapse > 1 || fd->tiling || (fd->ordered && loops))
470 {
471 fd->loop.v = *collapse_iter;
472 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
473 fd->loop.n2 = *collapse_count;
474 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
475 fd->loop.cond_code = LT_EXPR;
476 }
477 else if (loops)
478 loops[0] = fd->loop;
479 }
480
481 /* Build a call to GOMP_barrier. */
482
483 gimple *
484 omp_build_barrier (tree lhs)
485 {
486 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
487 : BUILT_IN_GOMP_BARRIER);
488 gcall *g = gimple_build_call (fndecl, 0);
489 if (lhs)
490 gimple_call_set_lhs (g, lhs);
491 return g;
492 }
493
494 /* Return maximum possible vectorization factor for the target. */
495
496 poly_uint64
497 omp_max_vf (void)
498 {
499 if (!optimize
500 || optimize_debug
501 || !flag_tree_loop_optimize
502 || (!flag_tree_loop_vectorize
503 && global_options_set.x_flag_tree_loop_vectorize))
504 return 1;
505
506 auto_vector_sizes sizes;
507 targetm.vectorize.autovectorize_vector_sizes (&sizes, true);
508 if (!sizes.is_empty ())
509 {
510 poly_uint64 vf = 0;
511 for (unsigned int i = 0; i < sizes.length (); ++i)
512 vf = ordered_max (vf, sizes[i]);
513 return vf;
514 }
515
516 machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
517 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
518 return GET_MODE_NUNITS (vqimode);
519
520 return 1;
521 }
522
523 /* Return maximum SIMT width if offloading may target SIMT hardware. */
524
525 int
526 omp_max_simt_vf (void)
527 {
528 if (!optimize)
529 return 0;
530 if (ENABLE_OFFLOADING)
531 for (const char *c = getenv ("OFFLOAD_TARGET_NAMES"); c;)
532 {
533 if (!strncmp (c, "nvptx", strlen ("nvptx")))
534 return 32;
535 else if ((c = strchr (c, ',')))
536 c++;
537 }
538 return 0;
539 }
540
541 /* Encode an oacc launch argument. This matches the GOMP_LAUNCH_PACK
542 macro on gomp-constants.h. We do not check for overflow. */
543
544 tree
545 oacc_launch_pack (unsigned code, tree device, unsigned op)
546 {
547 tree res;
548
549 res = build_int_cst (unsigned_type_node, GOMP_LAUNCH_PACK (code, 0, op));
550 if (device)
551 {
552 device = fold_build2 (LSHIFT_EXPR, unsigned_type_node,
553 device, build_int_cst (unsigned_type_node,
554 GOMP_LAUNCH_DEVICE_SHIFT));
555 res = fold_build2 (BIT_IOR_EXPR, unsigned_type_node, res, device);
556 }
557 return res;
558 }
559
560 /* FIXME: What is the following comment for? */
561 /* Look for compute grid dimension clauses and convert to an attribute
562 attached to FN. This permits the target-side code to (a) massage
563 the dimensions, (b) emit that data and (c) optimize. Non-constant
564 dimensions are pushed onto ARGS.
565
566 The attribute value is a TREE_LIST. A set of dimensions is
567 represented as a list of INTEGER_CST. Those that are runtime
568 exprs are represented as an INTEGER_CST of zero.
569
570 TODO: Normally the attribute will just contain a single such list. If
571 however it contains a list of lists, this will represent the use of
572 device_type. Each member of the outer list is an assoc list of
573 dimensions, keyed by the device type. The first entry will be the
574 default. Well, that's the plan. */
575
576 /* Replace any existing oacc fn attribute with updated dimensions. */
577
578 /* Variant working on a list of attributes. */
579
580 tree
581 oacc_replace_fn_attrib_attr (tree attribs, tree dims)
582 {
583 tree ident = get_identifier (OACC_FN_ATTRIB);
584
585 /* If we happen to be present as the first attrib, drop it. */
586 if (attribs && TREE_PURPOSE (attribs) == ident)
587 attribs = TREE_CHAIN (attribs);
588 return tree_cons (ident, dims, attribs);
589 }
590
591 /* Variant working on a function decl. */
592
593 void
594 oacc_replace_fn_attrib (tree fn, tree dims)
595 {
596 DECL_ATTRIBUTES (fn)
597 = oacc_replace_fn_attrib_attr (DECL_ATTRIBUTES (fn), dims);
598 }
599
600 /* Scan CLAUSES for launch dimensions and attach them to the oacc
601 function attribute. Push any that are non-constant onto the ARGS
602 list, along with an appropriate GOMP_LAUNCH_DIM tag. */
603
604 void
605 oacc_set_fn_attrib (tree fn, tree clauses, vec<tree> *args)
606 {
607 /* Must match GOMP_DIM ordering. */
608 static const omp_clause_code ids[]
609 = { OMP_CLAUSE_NUM_GANGS, OMP_CLAUSE_NUM_WORKERS,
610 OMP_CLAUSE_VECTOR_LENGTH };
611 unsigned ix;
612 tree dims[GOMP_DIM_MAX];
613
614 tree attr = NULL_TREE;
615 unsigned non_const = 0;
616
617 for (ix = GOMP_DIM_MAX; ix--;)
618 {
619 tree clause = omp_find_clause (clauses, ids[ix]);
620 tree dim = NULL_TREE;
621
622 if (clause)
623 dim = OMP_CLAUSE_EXPR (clause, ids[ix]);
624 dims[ix] = dim;
625 if (dim && TREE_CODE (dim) != INTEGER_CST)
626 {
627 dim = integer_zero_node;
628 non_const |= GOMP_DIM_MASK (ix);
629 }
630 attr = tree_cons (NULL_TREE, dim, attr);
631 }
632
633 oacc_replace_fn_attrib (fn, attr);
634
635 if (non_const)
636 {
637 /* Push a dynamic argument set. */
638 args->safe_push (oacc_launch_pack (GOMP_LAUNCH_DIM,
639 NULL_TREE, non_const));
640 for (unsigned ix = 0; ix != GOMP_DIM_MAX; ix++)
641 if (non_const & GOMP_DIM_MASK (ix))
642 args->safe_push (dims[ix]);
643 }
644 }
645
646 /* Verify OpenACC routine clauses.
647
648 Returns 0 if FNDECL should be marked with an OpenACC 'routine' directive, 1
649 if it has already been marked in compatible way, and -1 if incompatible.
650 Upon returning, the chain of clauses will contain exactly one clause
651 specifying the level of parallelism. */
652
653 int
654 oacc_verify_routine_clauses (tree fndecl, tree *clauses, location_t loc,
655 const char *routine_str)
656 {
657 tree c_level = NULL_TREE;
658 tree c_p = NULL_TREE;
659 for (tree c = *clauses; c; c_p = c, c = OMP_CLAUSE_CHAIN (c))
660 switch (OMP_CLAUSE_CODE (c))
661 {
662 case OMP_CLAUSE_GANG:
663 case OMP_CLAUSE_WORKER:
664 case OMP_CLAUSE_VECTOR:
665 case OMP_CLAUSE_SEQ:
666 if (c_level == NULL_TREE)
667 c_level = c;
668 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_CODE (c_level))
669 {
670 /* This has already been diagnosed in the front ends. */
671 /* Drop the duplicate clause. */
672 gcc_checking_assert (c_p != NULL_TREE);
673 OMP_CLAUSE_CHAIN (c_p) = OMP_CLAUSE_CHAIN (c);
674 c = c_p;
675 }
676 else
677 {
678 error_at (OMP_CLAUSE_LOCATION (c),
679 "%qs specifies a conflicting level of parallelism",
680 omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
681 inform (OMP_CLAUSE_LOCATION (c_level),
682 "... to the previous %qs clause here",
683 omp_clause_code_name[OMP_CLAUSE_CODE (c_level)]);
684 /* Drop the conflicting clause. */
685 gcc_checking_assert (c_p != NULL_TREE);
686 OMP_CLAUSE_CHAIN (c_p) = OMP_CLAUSE_CHAIN (c);
687 c = c_p;
688 }
689 break;
690 default:
691 gcc_unreachable ();
692 }
693 if (c_level == NULL_TREE)
694 {
695 /* Default to an implicit 'seq' clause. */
696 c_level = build_omp_clause (loc, OMP_CLAUSE_SEQ);
697 OMP_CLAUSE_CHAIN (c_level) = *clauses;
698 *clauses = c_level;
699 }
700 /* In *clauses, we now have exactly one clause specifying the level of
701 parallelism. */
702
703 tree attr
704 = lookup_attribute ("omp declare target", DECL_ATTRIBUTES (fndecl));
705 if (attr != NULL_TREE)
706 {
707 /* If a "#pragma acc routine" has already been applied, just verify
708 this one for compatibility. */
709 /* Collect previous directive's clauses. */
710 tree c_level_p = NULL_TREE;
711 for (tree c = TREE_VALUE (attr); c; c = OMP_CLAUSE_CHAIN (c))
712 switch (OMP_CLAUSE_CODE (c))
713 {
714 case OMP_CLAUSE_GANG:
715 case OMP_CLAUSE_WORKER:
716 case OMP_CLAUSE_VECTOR:
717 case OMP_CLAUSE_SEQ:
718 gcc_checking_assert (c_level_p == NULL_TREE);
719 c_level_p = c;
720 break;
721 default:
722 gcc_unreachable ();
723 }
724 gcc_checking_assert (c_level_p != NULL_TREE);
725 /* ..., and compare to current directive's, which we've already collected
726 above. */
727 tree c_diag;
728 tree c_diag_p;
729 /* Matching level of parallelism? */
730 if (OMP_CLAUSE_CODE (c_level) != OMP_CLAUSE_CODE (c_level_p))
731 {
732 c_diag = c_level;
733 c_diag_p = c_level_p;
734 goto incompatible;
735 }
736 /* Compatible. */
737 return 1;
738
739 incompatible:
740 if (c_diag != NULL_TREE)
741 error_at (OMP_CLAUSE_LOCATION (c_diag),
742 "incompatible %qs clause when applying"
743 " %<%s%> to %qD, which has already been"
744 " marked with an OpenACC 'routine' directive",
745 omp_clause_code_name[OMP_CLAUSE_CODE (c_diag)],
746 routine_str, fndecl);
747 else if (c_diag_p != NULL_TREE)
748 error_at (loc,
749 "missing %qs clause when applying"
750 " %<%s%> to %qD, which has already been"
751 " marked with an OpenACC 'routine' directive",
752 omp_clause_code_name[OMP_CLAUSE_CODE (c_diag_p)],
753 routine_str, fndecl);
754 else
755 gcc_unreachable ();
756 if (c_diag_p != NULL_TREE)
757 inform (OMP_CLAUSE_LOCATION (c_diag_p),
758 "... with %qs clause here",
759 omp_clause_code_name[OMP_CLAUSE_CODE (c_diag_p)]);
760 else
761 {
762 /* In the front ends, we don't preserve location information for the
763 OpenACC routine directive itself. However, that of c_level_p
764 should be close. */
765 location_t loc_routine = OMP_CLAUSE_LOCATION (c_level_p);
766 inform (loc_routine, "... without %qs clause near to here",
767 omp_clause_code_name[OMP_CLAUSE_CODE (c_diag)]);
768 }
769 /* Incompatible. */
770 return -1;
771 }
772
773 return 0;
774 }
775
776 /* Process the OpenACC 'routine' directive clauses to generate an attribute
777 for the level of parallelism. All dimensions have a size of zero
778 (dynamic). TREE_PURPOSE is set to indicate whether that dimension
779 can have a loop partitioned on it. non-zero indicates
780 yes, zero indicates no. By construction once a non-zero has been
781 reached, further inner dimensions must also be non-zero. We set
782 TREE_VALUE to zero for the dimensions that may be partitioned and
783 1 for the other ones -- if a loop is (erroneously) spawned at
784 an outer level, we don't want to try and partition it. */
785
786 tree
787 oacc_build_routine_dims (tree clauses)
788 {
789 /* Must match GOMP_DIM ordering. */
790 static const omp_clause_code ids[]
791 = {OMP_CLAUSE_GANG, OMP_CLAUSE_WORKER, OMP_CLAUSE_VECTOR, OMP_CLAUSE_SEQ};
792 int ix;
793 int level = -1;
794
795 for (; clauses; clauses = OMP_CLAUSE_CHAIN (clauses))
796 for (ix = GOMP_DIM_MAX + 1; ix--;)
797 if (OMP_CLAUSE_CODE (clauses) == ids[ix])
798 {
799 level = ix;
800 break;
801 }
802 gcc_checking_assert (level >= 0);
803
804 tree dims = NULL_TREE;
805
806 for (ix = GOMP_DIM_MAX; ix--;)
807 dims = tree_cons (build_int_cst (boolean_type_node, ix >= level),
808 build_int_cst (integer_type_node, ix < level), dims);
809
810 return dims;
811 }
812
813 /* Retrieve the oacc function attrib and return it. Non-oacc
814 functions will return NULL. */
815
816 tree
817 oacc_get_fn_attrib (tree fn)
818 {
819 return lookup_attribute (OACC_FN_ATTRIB, DECL_ATTRIBUTES (fn));
820 }
821
822 /* Return true if FN is an OpenMP or OpenACC offloading function. */
823
824 bool
825 offloading_function_p (tree fn)
826 {
827 tree attrs = DECL_ATTRIBUTES (fn);
828 return (lookup_attribute ("omp declare target", attrs)
829 || lookup_attribute ("omp target entrypoint", attrs));
830 }
831
832 /* Extract an oacc execution dimension from FN. FN must be an
833 offloaded function or routine that has already had its execution
834 dimensions lowered to the target-specific values. */
835
836 int
837 oacc_get_fn_dim_size (tree fn, int axis)
838 {
839 tree attrs = oacc_get_fn_attrib (fn);
840
841 gcc_assert (axis < GOMP_DIM_MAX);
842
843 tree dims = TREE_VALUE (attrs);
844 while (axis--)
845 dims = TREE_CHAIN (dims);
846
847 int size = TREE_INT_CST_LOW (TREE_VALUE (dims));
848
849 return size;
850 }
851
852 /* Extract the dimension axis from an IFN_GOACC_DIM_POS or
853 IFN_GOACC_DIM_SIZE call. */
854
855 int
856 oacc_get_ifn_dim_arg (const gimple *stmt)
857 {
858 gcc_checking_assert (gimple_call_internal_fn (stmt) == IFN_GOACC_DIM_SIZE
859 || gimple_call_internal_fn (stmt) == IFN_GOACC_DIM_POS);
860 tree arg = gimple_call_arg (stmt, 0);
861 HOST_WIDE_INT axis = TREE_INT_CST_LOW (arg);
862
863 gcc_checking_assert (axis >= 0 && axis < GOMP_DIM_MAX);
864 return (int) axis;
865 }