]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/c-family/c-omp.c
tree-core.h: Include symtab.h.
[thirdparty/gcc.git] / gcc / c-family / c-omp.c
1 /* This file contains routines to construct OpenACC and OpenMP constructs,
2 called from parsing in the C and C++ front ends.
3
4 Copyright (C) 2005-2015 Free Software Foundation, Inc.
5 Contributed by Richard Henderson <rth@redhat.com>,
6 Diego Novillo <dnovillo@redhat.com>.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "alias.h"
29 #include "tree.h"
30 #include "c-common.h"
31 #include "c-pragma.h"
32 #include "gimple-expr.h"
33 #include "langhooks.h"
34 #include "omp-low.h"
35 #include "gomp-constants.h"
36
37
38 /* Complete a #pragma oacc wait construct. LOC is the location of
39 the #pragma. */
40
41 tree
42 c_finish_oacc_wait (location_t loc, tree parms, tree clauses)
43 {
44 const int nparms = list_length (parms);
45 tree stmt, t;
46 vec<tree, va_gc> *args;
47
48 vec_alloc (args, nparms + 2);
49 stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT);
50
51 if (find_omp_clause (clauses, OMP_CLAUSE_ASYNC))
52 t = OMP_CLAUSE_ASYNC_EXPR (clauses);
53 else
54 t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC);
55
56 args->quick_push (t);
57 args->quick_push (build_int_cst (integer_type_node, nparms));
58
59 for (t = parms; t; t = TREE_CHAIN (t))
60 {
61 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST)
62 args->quick_push (build_int_cst (integer_type_node,
63 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t))));
64 else
65 args->quick_push (OMP_CLAUSE_WAIT_EXPR (t));
66 }
67
68 stmt = build_call_expr_loc_vec (loc, stmt, args);
69 add_stmt (stmt);
70
71 vec_free (args);
72
73 return stmt;
74 }
75
76 /* Complete a #pragma omp master construct. STMT is the structured-block
77 that follows the pragma. LOC is the l*/
78
79 tree
80 c_finish_omp_master (location_t loc, tree stmt)
81 {
82 tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
83 SET_EXPR_LOCATION (t, loc);
84 return t;
85 }
86
87 /* Complete a #pragma omp taskgroup construct. STMT is the structured-block
88 that follows the pragma. LOC is the l*/
89
90 tree
91 c_finish_omp_taskgroup (location_t loc, tree stmt)
92 {
93 tree t = add_stmt (build1 (OMP_TASKGROUP, void_type_node, stmt));
94 SET_EXPR_LOCATION (t, loc);
95 return t;
96 }
97
98 /* Complete a #pragma omp critical construct. STMT is the structured-block
99 that follows the pragma, NAME is the identifier in the pragma, or null
100 if it was omitted. LOC is the location of the #pragma. */
101
102 tree
103 c_finish_omp_critical (location_t loc, tree body, tree name)
104 {
105 tree stmt = make_node (OMP_CRITICAL);
106 TREE_TYPE (stmt) = void_type_node;
107 OMP_CRITICAL_BODY (stmt) = body;
108 OMP_CRITICAL_NAME (stmt) = name;
109 SET_EXPR_LOCATION (stmt, loc);
110 return add_stmt (stmt);
111 }
112
113 /* Complete a #pragma omp ordered construct. STMT is the structured-block
114 that follows the pragma. LOC is the location of the #pragma. */
115
116 tree
117 c_finish_omp_ordered (location_t loc, tree stmt)
118 {
119 tree t = build1 (OMP_ORDERED, void_type_node, stmt);
120 SET_EXPR_LOCATION (t, loc);
121 return add_stmt (t);
122 }
123
124
125 /* Complete a #pragma omp barrier construct. LOC is the location of
126 the #pragma. */
127
128 void
129 c_finish_omp_barrier (location_t loc)
130 {
131 tree x;
132
133 x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
134 x = build_call_expr_loc (loc, x, 0);
135 add_stmt (x);
136 }
137
138
139 /* Complete a #pragma omp taskwait construct. LOC is the location of the
140 pragma. */
141
142 void
143 c_finish_omp_taskwait (location_t loc)
144 {
145 tree x;
146
147 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
148 x = build_call_expr_loc (loc, x, 0);
149 add_stmt (x);
150 }
151
152
153 /* Complete a #pragma omp taskyield construct. LOC is the location of the
154 pragma. */
155
156 void
157 c_finish_omp_taskyield (location_t loc)
158 {
159 tree x;
160
161 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
162 x = build_call_expr_loc (loc, x, 0);
163 add_stmt (x);
164 }
165
166
167 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
168 the expression to be implemented atomically is LHS opcode= RHS.
169 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
170 opcode= RHS with the new or old content of LHS returned.
171 LOC is the location of the atomic statement. The value returned
172 is either error_mark_node (if the construct was erroneous) or an
173 OMP_ATOMIC* node which should be added to the current statement
174 tree with add_stmt. */
175
176 tree
177 c_finish_omp_atomic (location_t loc, enum tree_code code,
178 enum tree_code opcode, tree lhs, tree rhs,
179 tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst)
180 {
181 tree x, type, addr, pre = NULL_TREE;
182
183 if (lhs == error_mark_node || rhs == error_mark_node
184 || v == error_mark_node || lhs1 == error_mark_node
185 || rhs1 == error_mark_node)
186 return error_mark_node;
187
188 /* ??? According to one reading of the OpenMP spec, complex type are
189 supported, but there are no atomic stores for any architecture.
190 But at least icc 9.0 doesn't support complex types here either.
191 And lets not even talk about vector types... */
192 type = TREE_TYPE (lhs);
193 if (!INTEGRAL_TYPE_P (type)
194 && !POINTER_TYPE_P (type)
195 && !SCALAR_FLOAT_TYPE_P (type))
196 {
197 error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
198 return error_mark_node;
199 }
200
201 if (opcode == RDIV_EXPR)
202 opcode = TRUNC_DIV_EXPR;
203
204 /* ??? Validate that rhs does not overlap lhs. */
205
206 /* Take and save the address of the lhs. From then on we'll reference it
207 via indirection. */
208 addr = build_unary_op (loc, ADDR_EXPR, lhs, 0);
209 if (addr == error_mark_node)
210 return error_mark_node;
211 addr = save_expr (addr);
212 if (TREE_CODE (addr) != SAVE_EXPR
213 && (TREE_CODE (addr) != ADDR_EXPR
214 || !VAR_P (TREE_OPERAND (addr, 0))))
215 {
216 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
217 it even after unsharing function body. */
218 tree var = create_tmp_var_raw (TREE_TYPE (addr));
219 DECL_CONTEXT (var) = current_function_decl;
220 addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
221 }
222 lhs = build_indirect_ref (loc, addr, RO_NULL);
223
224 if (code == OMP_ATOMIC_READ)
225 {
226 x = build1 (OMP_ATOMIC_READ, type, addr);
227 SET_EXPR_LOCATION (x, loc);
228 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
229 return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
230 loc, x, NULL_TREE);
231 }
232
233 /* There are lots of warnings, errors, and conversions that need to happen
234 in the course of interpreting a statement. Use the normal mechanisms
235 to do this, and then take it apart again. */
236 if (swapped)
237 {
238 rhs = build_binary_op (loc, opcode, rhs, lhs, 1);
239 opcode = NOP_EXPR;
240 }
241 bool save = in_late_binary_op;
242 in_late_binary_op = true;
243 x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE);
244 in_late_binary_op = save;
245 if (x == error_mark_node)
246 return error_mark_node;
247 if (TREE_CODE (x) == COMPOUND_EXPR)
248 {
249 pre = TREE_OPERAND (x, 0);
250 gcc_assert (TREE_CODE (pre) == SAVE_EXPR);
251 x = TREE_OPERAND (x, 1);
252 }
253 gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
254 rhs = TREE_OPERAND (x, 1);
255
256 /* Punt the actual generation of atomic operations to common code. */
257 if (code == OMP_ATOMIC)
258 type = void_type_node;
259 x = build2 (code, type, addr, rhs);
260 SET_EXPR_LOCATION (x, loc);
261 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
262
263 /* Generally it is hard to prove lhs1 and lhs are the same memory
264 location, just diagnose different variables. */
265 if (rhs1
266 && VAR_P (rhs1)
267 && VAR_P (lhs)
268 && rhs1 != lhs)
269 {
270 if (code == OMP_ATOMIC)
271 error_at (loc, "%<#pragma omp atomic update%> uses two different variables for memory");
272 else
273 error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
274 return error_mark_node;
275 }
276
277 if (code != OMP_ATOMIC)
278 {
279 /* Generally it is hard to prove lhs1 and lhs are the same memory
280 location, just diagnose different variables. */
281 if (lhs1 && VAR_P (lhs1) && VAR_P (lhs))
282 {
283 if (lhs1 != lhs)
284 {
285 error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
286 return error_mark_node;
287 }
288 }
289 x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
290 loc, x, NULL_TREE);
291 if (rhs1 && rhs1 != lhs)
292 {
293 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
294 if (rhs1addr == error_mark_node)
295 return error_mark_node;
296 x = omit_one_operand_loc (loc, type, x, rhs1addr);
297 }
298 if (lhs1 && lhs1 != lhs)
299 {
300 tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, 0);
301 if (lhs1addr == error_mark_node)
302 return error_mark_node;
303 if (code == OMP_ATOMIC_CAPTURE_OLD)
304 x = omit_one_operand_loc (loc, type, x, lhs1addr);
305 else
306 {
307 x = save_expr (x);
308 x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
309 }
310 }
311 }
312 else if (rhs1 && rhs1 != lhs)
313 {
314 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
315 if (rhs1addr == error_mark_node)
316 return error_mark_node;
317 x = omit_one_operand_loc (loc, type, x, rhs1addr);
318 }
319
320 if (pre)
321 x = omit_one_operand_loc (loc, type, x, pre);
322 return x;
323 }
324
325
326 /* Complete a #pragma omp flush construct. We don't do anything with
327 the variable list that the syntax allows. LOC is the location of
328 the #pragma. */
329
330 void
331 c_finish_omp_flush (location_t loc)
332 {
333 tree x;
334
335 x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
336 x = build_call_expr_loc (loc, x, 0);
337 add_stmt (x);
338 }
339
340
341 /* Check and canonicalize OMP_FOR increment expression.
342 Helper function for c_finish_omp_for. */
343
344 static tree
345 check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
346 {
347 tree t;
348
349 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
350 || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
351 return error_mark_node;
352
353 if (exp == decl)
354 return build_int_cst (TREE_TYPE (exp), 0);
355
356 switch (TREE_CODE (exp))
357 {
358 CASE_CONVERT:
359 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
360 if (t != error_mark_node)
361 return fold_convert_loc (loc, TREE_TYPE (exp), t);
362 break;
363 case MINUS_EXPR:
364 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
365 if (t != error_mark_node)
366 return fold_build2_loc (loc, MINUS_EXPR,
367 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
368 break;
369 case PLUS_EXPR:
370 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
371 if (t != error_mark_node)
372 return fold_build2_loc (loc, PLUS_EXPR,
373 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
374 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
375 if (t != error_mark_node)
376 return fold_build2_loc (loc, PLUS_EXPR,
377 TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
378 break;
379 case COMPOUND_EXPR:
380 {
381 /* cp_build_modify_expr forces preevaluation of the RHS to make
382 sure that it is evaluated before the lvalue-rvalue conversion
383 is applied to the LHS. Reconstruct the original expression. */
384 tree op0 = TREE_OPERAND (exp, 0);
385 if (TREE_CODE (op0) == TARGET_EXPR
386 && !VOID_TYPE_P (TREE_TYPE (op0)))
387 {
388 tree op1 = TREE_OPERAND (exp, 1);
389 tree temp = TARGET_EXPR_SLOT (op0);
390 if (BINARY_CLASS_P (op1)
391 && TREE_OPERAND (op1, 1) == temp)
392 {
393 op1 = copy_node (op1);
394 TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
395 return check_omp_for_incr_expr (loc, op1, decl);
396 }
397 }
398 break;
399 }
400 default:
401 break;
402 }
403
404 return error_mark_node;
405 }
406
407 /* If the OMP_FOR increment expression in INCR is of pointer type,
408 canonicalize it into an expression handled by gimplify_omp_for()
409 and return it. DECL is the iteration variable. */
410
411 static tree
412 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
413 {
414 if (POINTER_TYPE_P (TREE_TYPE (decl))
415 && TREE_OPERAND (incr, 1))
416 {
417 tree t = fold_convert_loc (loc,
418 sizetype, TREE_OPERAND (incr, 1));
419
420 if (TREE_CODE (incr) == POSTDECREMENT_EXPR
421 || TREE_CODE (incr) == PREDECREMENT_EXPR)
422 t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
423 t = fold_build_pointer_plus (decl, t);
424 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
425 }
426 return incr;
427 }
428
429 /* Validate and generate OMP_FOR.
430 DECLV is a vector of iteration variables, for each collapsed loop.
431 INITV, CONDV and INCRV are vectors containing initialization
432 expressions, controlling predicates and increment expressions.
433 BODY is the body of the loop and PRE_BODY statements that go before
434 the loop. */
435
436 tree
437 c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
438 tree initv, tree condv, tree incrv, tree body, tree pre_body)
439 {
440 location_t elocus;
441 bool fail = false;
442 int i;
443
444 if ((code == CILK_SIMD || code == CILK_FOR)
445 && !c_check_cilk_loop (locus, TREE_VEC_ELT (declv, 0)))
446 fail = true;
447
448 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
449 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
450 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
451 for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
452 {
453 tree decl = TREE_VEC_ELT (declv, i);
454 tree init = TREE_VEC_ELT (initv, i);
455 tree cond = TREE_VEC_ELT (condv, i);
456 tree incr = TREE_VEC_ELT (incrv, i);
457
458 elocus = locus;
459 if (EXPR_HAS_LOCATION (init))
460 elocus = EXPR_LOCATION (init);
461
462 /* Validate the iteration variable. */
463 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
464 && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
465 {
466 error_at (elocus, "invalid type for iteration variable %qE", decl);
467 fail = true;
468 }
469
470 /* In the case of "for (int i = 0...)", init will be a decl. It should
471 have a DECL_INITIAL that we can turn into an assignment. */
472 if (init == decl)
473 {
474 elocus = DECL_SOURCE_LOCATION (decl);
475
476 init = DECL_INITIAL (decl);
477 if (init == NULL)
478 {
479 error_at (elocus, "%qE is not initialized", decl);
480 init = integer_zero_node;
481 fail = true;
482 }
483
484 init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
485 /* FIXME diagnostics: This should
486 be the location of the INIT. */
487 elocus,
488 init,
489 NULL_TREE);
490 }
491 if (init != error_mark_node)
492 {
493 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
494 gcc_assert (TREE_OPERAND (init, 0) == decl);
495 }
496
497 if (cond == NULL_TREE)
498 {
499 error_at (elocus, "missing controlling predicate");
500 fail = true;
501 }
502 else
503 {
504 bool cond_ok = false;
505
506 if (EXPR_HAS_LOCATION (cond))
507 elocus = EXPR_LOCATION (cond);
508
509 if (TREE_CODE (cond) == LT_EXPR
510 || TREE_CODE (cond) == LE_EXPR
511 || TREE_CODE (cond) == GT_EXPR
512 || TREE_CODE (cond) == GE_EXPR
513 || TREE_CODE (cond) == NE_EXPR
514 || TREE_CODE (cond) == EQ_EXPR)
515 {
516 tree op0 = TREE_OPERAND (cond, 0);
517 tree op1 = TREE_OPERAND (cond, 1);
518
519 /* 2.5.1. The comparison in the condition is computed in
520 the type of DECL, otherwise the behavior is undefined.
521
522 For example:
523 long n; int i;
524 i < n;
525
526 according to ISO will be evaluated as:
527 (long)i < n;
528
529 We want to force:
530 i < (int)n; */
531 if (TREE_CODE (op0) == NOP_EXPR
532 && decl == TREE_OPERAND (op0, 0))
533 {
534 TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
535 TREE_OPERAND (cond, 1)
536 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
537 TREE_OPERAND (cond, 1));
538 }
539 else if (TREE_CODE (op1) == NOP_EXPR
540 && decl == TREE_OPERAND (op1, 0))
541 {
542 TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
543 TREE_OPERAND (cond, 0)
544 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
545 TREE_OPERAND (cond, 0));
546 }
547
548 if (decl == TREE_OPERAND (cond, 0))
549 cond_ok = true;
550 else if (decl == TREE_OPERAND (cond, 1))
551 {
552 TREE_SET_CODE (cond,
553 swap_tree_comparison (TREE_CODE (cond)));
554 TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
555 TREE_OPERAND (cond, 0) = decl;
556 cond_ok = true;
557 }
558
559 if (TREE_CODE (cond) == NE_EXPR
560 || TREE_CODE (cond) == EQ_EXPR)
561 {
562 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
563 {
564 if (code != CILK_SIMD && code != CILK_FOR)
565 cond_ok = false;
566 }
567 else if (operand_equal_p (TREE_OPERAND (cond, 1),
568 TYPE_MIN_VALUE (TREE_TYPE (decl)),
569 0))
570 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
571 ? GT_EXPR : LE_EXPR);
572 else if (operand_equal_p (TREE_OPERAND (cond, 1),
573 TYPE_MAX_VALUE (TREE_TYPE (decl)),
574 0))
575 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
576 ? LT_EXPR : GE_EXPR);
577 else if (code != CILK_SIMD && code != CILK_FOR)
578 cond_ok = false;
579 }
580 }
581
582 if (!cond_ok)
583 {
584 error_at (elocus, "invalid controlling predicate");
585 fail = true;
586 }
587 }
588
589 if (incr == NULL_TREE)
590 {
591 error_at (elocus, "missing increment expression");
592 fail = true;
593 }
594 else
595 {
596 bool incr_ok = false;
597
598 if (EXPR_HAS_LOCATION (incr))
599 elocus = EXPR_LOCATION (incr);
600
601 /* Check all the valid increment expressions: v++, v--, ++v, --v,
602 v = v + incr, v = incr + v and v = v - incr. */
603 switch (TREE_CODE (incr))
604 {
605 case POSTINCREMENT_EXPR:
606 case PREINCREMENT_EXPR:
607 case POSTDECREMENT_EXPR:
608 case PREDECREMENT_EXPR:
609 if (TREE_OPERAND (incr, 0) != decl)
610 break;
611
612 incr_ok = true;
613 incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
614 break;
615
616 case COMPOUND_EXPR:
617 if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
618 || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
619 break;
620 incr = TREE_OPERAND (incr, 1);
621 /* FALLTHRU */
622 case MODIFY_EXPR:
623 if (TREE_OPERAND (incr, 0) != decl)
624 break;
625 if (TREE_OPERAND (incr, 1) == decl)
626 break;
627 if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
628 && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
629 || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
630 incr_ok = true;
631 else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
632 || (TREE_CODE (TREE_OPERAND (incr, 1))
633 == POINTER_PLUS_EXPR))
634 && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
635 incr_ok = true;
636 else
637 {
638 tree t = check_omp_for_incr_expr (elocus,
639 TREE_OPERAND (incr, 1),
640 decl);
641 if (t != error_mark_node)
642 {
643 incr_ok = true;
644 t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
645 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
646 }
647 }
648 break;
649
650 default:
651 break;
652 }
653 if (!incr_ok)
654 {
655 error_at (elocus, "invalid increment expression");
656 fail = true;
657 }
658 }
659
660 TREE_VEC_ELT (initv, i) = init;
661 TREE_VEC_ELT (incrv, i) = incr;
662 }
663
664 if (fail)
665 return NULL;
666 else
667 {
668 tree t = make_node (code);
669
670 TREE_TYPE (t) = void_type_node;
671 OMP_FOR_INIT (t) = initv;
672 OMP_FOR_COND (t) = condv;
673 OMP_FOR_INCR (t) = incrv;
674 OMP_FOR_BODY (t) = body;
675 OMP_FOR_PRE_BODY (t) = pre_body;
676
677 SET_EXPR_LOCATION (t, locus);
678 return add_stmt (t);
679 }
680 }
681
682 /* Right now we have 14 different combined constructs, this
683 function attempts to split or duplicate clauses for combined
684 constructs. CODE is the innermost construct in the combined construct,
685 and MASK allows to determine which constructs are combined together,
686 as every construct has at least one clause that no other construct
687 has (except for OMP_SECTIONS, but that can be only combined with parallel).
688 Combined constructs are:
689 #pragma omp parallel for
690 #pragma omp parallel sections
691 #pragma omp parallel for simd
692 #pragma omp for simd
693 #pragma omp distribute simd
694 #pragma omp distribute parallel for
695 #pragma omp distribute parallel for simd
696 #pragma omp teams distribute
697 #pragma omp teams distribute parallel for
698 #pragma omp teams distribute parallel for simd
699 #pragma omp target teams
700 #pragma omp target teams distribute
701 #pragma omp target teams distribute parallel for
702 #pragma omp target teams distribute parallel for simd */
703
704 void
705 c_omp_split_clauses (location_t loc, enum tree_code code,
706 omp_clause_mask mask, tree clauses, tree *cclauses)
707 {
708 tree next, c;
709 enum c_omp_clause_split s;
710 int i;
711
712 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
713 cclauses[i] = NULL;
714 /* Add implicit nowait clause on
715 #pragma omp parallel {for,for simd,sections}. */
716 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
717 switch (code)
718 {
719 case OMP_FOR:
720 case OMP_SIMD:
721 cclauses[C_OMP_CLAUSE_SPLIT_FOR]
722 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
723 break;
724 case OMP_SECTIONS:
725 cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
726 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
727 break;
728 default:
729 break;
730 }
731
732 for (; clauses ; clauses = next)
733 {
734 next = OMP_CLAUSE_CHAIN (clauses);
735
736 switch (OMP_CLAUSE_CODE (clauses))
737 {
738 /* First the clauses that are unique to some constructs. */
739 case OMP_CLAUSE_DEVICE:
740 case OMP_CLAUSE_MAP:
741 s = C_OMP_CLAUSE_SPLIT_TARGET;
742 break;
743 case OMP_CLAUSE_NUM_TEAMS:
744 case OMP_CLAUSE_THREAD_LIMIT:
745 s = C_OMP_CLAUSE_SPLIT_TEAMS;
746 break;
747 case OMP_CLAUSE_DIST_SCHEDULE:
748 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
749 break;
750 case OMP_CLAUSE_COPYIN:
751 case OMP_CLAUSE_NUM_THREADS:
752 case OMP_CLAUSE_PROC_BIND:
753 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
754 break;
755 case OMP_CLAUSE_ORDERED:
756 case OMP_CLAUSE_SCHEDULE:
757 case OMP_CLAUSE_NOWAIT:
758 s = C_OMP_CLAUSE_SPLIT_FOR;
759 break;
760 case OMP_CLAUSE_SAFELEN:
761 case OMP_CLAUSE_LINEAR:
762 case OMP_CLAUSE_ALIGNED:
763 s = C_OMP_CLAUSE_SPLIT_SIMD;
764 break;
765 /* Duplicate this to all of distribute, for and simd. */
766 case OMP_CLAUSE_COLLAPSE:
767 if (code == OMP_SIMD)
768 {
769 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
770 OMP_CLAUSE_COLLAPSE);
771 OMP_CLAUSE_COLLAPSE_EXPR (c)
772 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
773 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
774 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
775 }
776 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
777 {
778 if ((mask & (OMP_CLAUSE_MASK_1
779 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
780 {
781 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
782 OMP_CLAUSE_COLLAPSE);
783 OMP_CLAUSE_COLLAPSE_EXPR (c)
784 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
785 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
786 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
787 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
788 }
789 else
790 s = C_OMP_CLAUSE_SPLIT_FOR;
791 }
792 else
793 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
794 break;
795 /* Private clause is supported on all constructs but target,
796 it is enough to put it on the innermost one. For
797 #pragma omp {for,sections} put it on parallel though,
798 as that's what we did for OpenMP 3.1. */
799 case OMP_CLAUSE_PRIVATE:
800 switch (code)
801 {
802 case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
803 case OMP_FOR: case OMP_SECTIONS:
804 case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
805 case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
806 case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
807 default: gcc_unreachable ();
808 }
809 break;
810 /* Firstprivate clause is supported on all constructs but
811 target and simd. Put it on the outermost of those and
812 duplicate on parallel. */
813 case OMP_CLAUSE_FIRSTPRIVATE:
814 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
815 != 0)
816 {
817 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
818 | (OMP_CLAUSE_MASK_1
819 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
820 {
821 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
822 OMP_CLAUSE_FIRSTPRIVATE);
823 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
824 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
825 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
826 if ((mask & (OMP_CLAUSE_MASK_1
827 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
828 s = C_OMP_CLAUSE_SPLIT_TEAMS;
829 else
830 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
831 }
832 else
833 /* This must be
834 #pragma omp parallel{, for{, simd}, sections}. */
835 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
836 }
837 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
838 != 0)
839 {
840 /* This must be one of
841 #pragma omp {,target }teams distribute
842 #pragma omp target teams
843 #pragma omp {,target }teams distribute simd. */
844 gcc_assert (code == OMP_DISTRIBUTE
845 || code == OMP_TEAMS
846 || code == OMP_SIMD);
847 s = C_OMP_CLAUSE_SPLIT_TEAMS;
848 }
849 else if ((mask & (OMP_CLAUSE_MASK_1
850 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
851 {
852 /* This must be #pragma omp distribute simd. */
853 gcc_assert (code == OMP_SIMD);
854 s = C_OMP_CLAUSE_SPLIT_TEAMS;
855 }
856 else
857 {
858 /* This must be #pragma omp for simd. */
859 gcc_assert (code == OMP_SIMD);
860 s = C_OMP_CLAUSE_SPLIT_FOR;
861 }
862 break;
863 /* Lastprivate is allowed on for, sections and simd. In
864 parallel {for{, simd},sections} we actually want to put it on
865 parallel rather than for or sections. */
866 case OMP_CLAUSE_LASTPRIVATE:
867 if (code == OMP_FOR || code == OMP_SECTIONS)
868 {
869 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
870 != 0)
871 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
872 else
873 s = C_OMP_CLAUSE_SPLIT_FOR;
874 break;
875 }
876 gcc_assert (code == OMP_SIMD);
877 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
878 {
879 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
880 OMP_CLAUSE_LASTPRIVATE);
881 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
882 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
883 != 0)
884 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
885 else
886 s = C_OMP_CLAUSE_SPLIT_FOR;
887 OMP_CLAUSE_CHAIN (c) = cclauses[s];
888 cclauses[s] = c;
889 }
890 s = C_OMP_CLAUSE_SPLIT_SIMD;
891 break;
892 /* Shared and default clauses are allowed on private and teams. */
893 case OMP_CLAUSE_SHARED:
894 case OMP_CLAUSE_DEFAULT:
895 if (code == OMP_TEAMS)
896 {
897 s = C_OMP_CLAUSE_SPLIT_TEAMS;
898 break;
899 }
900 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
901 != 0)
902 {
903 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
904 OMP_CLAUSE_CODE (clauses));
905 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
906 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
907 else
908 OMP_CLAUSE_DEFAULT_KIND (c)
909 = OMP_CLAUSE_DEFAULT_KIND (clauses);
910 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
911 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
912
913 }
914 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
915 break;
916 /* Reduction is allowed on simd, for, parallel, sections and teams.
917 Duplicate it on all of them, but omit on for or sections if
918 parallel is present. */
919 case OMP_CLAUSE_REDUCTION:
920 if (code == OMP_SIMD)
921 {
922 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
923 OMP_CLAUSE_REDUCTION);
924 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
925 OMP_CLAUSE_REDUCTION_CODE (c)
926 = OMP_CLAUSE_REDUCTION_CODE (clauses);
927 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
928 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
929 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
930 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
931 }
932 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
933 {
934 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
935 != 0)
936 {
937 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
938 OMP_CLAUSE_REDUCTION);
939 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
940 OMP_CLAUSE_REDUCTION_CODE (c)
941 = OMP_CLAUSE_REDUCTION_CODE (clauses);
942 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
943 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
944 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
945 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
946 s = C_OMP_CLAUSE_SPLIT_TEAMS;
947 }
948 else if ((mask & (OMP_CLAUSE_MASK_1
949 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
950 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
951 else
952 s = C_OMP_CLAUSE_SPLIT_FOR;
953 }
954 else if (code == OMP_SECTIONS)
955 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
956 else
957 s = C_OMP_CLAUSE_SPLIT_TEAMS;
958 break;
959 case OMP_CLAUSE_IF:
960 /* FIXME: This is currently being discussed. */
961 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
962 != 0)
963 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
964 else
965 s = C_OMP_CLAUSE_SPLIT_TARGET;
966 break;
967 default:
968 gcc_unreachable ();
969 }
970 OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
971 cclauses[s] = clauses;
972 }
973 }
974
975
976 /* qsort callback to compare #pragma omp declare simd clauses. */
977
978 static int
979 c_omp_declare_simd_clause_cmp (const void *p, const void *q)
980 {
981 tree a = *(const tree *) p;
982 tree b = *(const tree *) q;
983 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
984 {
985 if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
986 return -1;
987 return 1;
988 }
989 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
990 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
991 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
992 {
993 int c = tree_to_shwi (OMP_CLAUSE_DECL (a));
994 int d = tree_to_shwi (OMP_CLAUSE_DECL (b));
995 if (c < d)
996 return 1;
997 if (c > d)
998 return -1;
999 }
1000 return 0;
1001 }
1002
1003 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
1004 CLAUSES on FNDECL into argument indexes and sort them. */
1005
1006 tree
1007 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
1008 {
1009 tree c;
1010 vec<tree> clvec = vNULL;
1011
1012 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1013 {
1014 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1015 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1016 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1017 {
1018 tree decl = OMP_CLAUSE_DECL (c);
1019 tree arg;
1020 int idx;
1021 for (arg = parms, idx = 0; arg;
1022 arg = TREE_CHAIN (arg), idx++)
1023 if (arg == decl)
1024 break;
1025 if (arg == NULL_TREE)
1026 {
1027 error_at (OMP_CLAUSE_LOCATION (c),
1028 "%qD is not an function argument", decl);
1029 continue;
1030 }
1031 OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
1032 }
1033 clvec.safe_push (c);
1034 }
1035 if (!clvec.is_empty ())
1036 {
1037 unsigned int len = clvec.length (), i;
1038 clvec.qsort (c_omp_declare_simd_clause_cmp);
1039 clauses = clvec[0];
1040 for (i = 0; i < len; i++)
1041 OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
1042 }
1043 clvec.release ();
1044 return clauses;
1045 }
1046
1047 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
1048
1049 void
1050 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
1051 {
1052 tree c;
1053
1054 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1055 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1056 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1057 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1058 {
1059 int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i;
1060 tree arg;
1061 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
1062 arg = TREE_CHAIN (arg), i++)
1063 if (i == idx)
1064 break;
1065 gcc_assert (arg);
1066 OMP_CLAUSE_DECL (c) = arg;
1067 }
1068 }
1069
1070 /* True if OpenMP sharing attribute of DECL is predetermined. */
1071
1072 enum omp_clause_default_kind
1073 c_omp_predetermined_sharing (tree decl)
1074 {
1075 /* Variables with const-qualified type having no mutable member
1076 are predetermined shared. */
1077 if (TREE_READONLY (decl))
1078 return OMP_CLAUSE_DEFAULT_SHARED;
1079
1080 return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
1081 }