1 /* This file contains routines to construct OpenACC and OpenMP constructs,
2 called from parsing in the C and C++ front ends.
4 Copyright (C) 2005-2015 Free Software Foundation, Inc.
5 Contributed by Richard Henderson <rth@redhat.com>,
6 Diego Novillo <dnovillo@redhat.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
32 #include "gimple-expr.h"
33 #include "langhooks.h"
35 #include "gomp-constants.h"
38 /* Complete a #pragma oacc wait construct. LOC is the location of
42 c_finish_oacc_wait (location_t loc
, tree parms
, tree clauses
)
44 const int nparms
= list_length (parms
);
46 vec
<tree
, va_gc
> *args
;
48 vec_alloc (args
, nparms
+ 2);
49 stmt
= builtin_decl_explicit (BUILT_IN_GOACC_WAIT
);
51 if (find_omp_clause (clauses
, OMP_CLAUSE_ASYNC
))
52 t
= OMP_CLAUSE_ASYNC_EXPR (clauses
);
54 t
= build_int_cst (integer_type_node
, GOMP_ASYNC_SYNC
);
57 args
->quick_push (build_int_cst (integer_type_node
, nparms
));
59 for (t
= parms
; t
; t
= TREE_CHAIN (t
))
61 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t
)) == INTEGER_CST
)
62 args
->quick_push (build_int_cst (integer_type_node
,
63 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t
))));
65 args
->quick_push (OMP_CLAUSE_WAIT_EXPR (t
));
68 stmt
= build_call_expr_loc_vec (loc
, stmt
, args
);
76 /* Complete a #pragma omp master construct. STMT is the structured-block
77 that follows the pragma. LOC is the l*/
80 c_finish_omp_master (location_t loc
, tree stmt
)
82 tree t
= add_stmt (build1 (OMP_MASTER
, void_type_node
, stmt
));
83 SET_EXPR_LOCATION (t
, loc
);
87 /* Complete a #pragma omp taskgroup construct. STMT is the structured-block
88 that follows the pragma. LOC is the l*/
91 c_finish_omp_taskgroup (location_t loc
, tree stmt
)
93 tree t
= add_stmt (build1 (OMP_TASKGROUP
, void_type_node
, stmt
));
94 SET_EXPR_LOCATION (t
, loc
);
98 /* Complete a #pragma omp critical construct. STMT is the structured-block
99 that follows the pragma, NAME is the identifier in the pragma, or null
100 if it was omitted. LOC is the location of the #pragma. */
103 c_finish_omp_critical (location_t loc
, tree body
, tree name
)
105 tree stmt
= make_node (OMP_CRITICAL
);
106 TREE_TYPE (stmt
) = void_type_node
;
107 OMP_CRITICAL_BODY (stmt
) = body
;
108 OMP_CRITICAL_NAME (stmt
) = name
;
109 SET_EXPR_LOCATION (stmt
, loc
);
110 return add_stmt (stmt
);
113 /* Complete a #pragma omp ordered construct. STMT is the structured-block
114 that follows the pragma. LOC is the location of the #pragma. */
117 c_finish_omp_ordered (location_t loc
, tree stmt
)
119 tree t
= build1 (OMP_ORDERED
, void_type_node
, stmt
);
120 SET_EXPR_LOCATION (t
, loc
);
125 /* Complete a #pragma omp barrier construct. LOC is the location of
129 c_finish_omp_barrier (location_t loc
)
133 x
= builtin_decl_explicit (BUILT_IN_GOMP_BARRIER
);
134 x
= build_call_expr_loc (loc
, x
, 0);
139 /* Complete a #pragma omp taskwait construct. LOC is the location of the
143 c_finish_omp_taskwait (location_t loc
)
147 x
= builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT
);
148 x
= build_call_expr_loc (loc
, x
, 0);
153 /* Complete a #pragma omp taskyield construct. LOC is the location of the
157 c_finish_omp_taskyield (location_t loc
)
161 x
= builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD
);
162 x
= build_call_expr_loc (loc
, x
, 0);
167 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
168 the expression to be implemented atomically is LHS opcode= RHS.
169 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
170 opcode= RHS with the new or old content of LHS returned.
171 LOC is the location of the atomic statement. The value returned
172 is either error_mark_node (if the construct was erroneous) or an
173 OMP_ATOMIC* node which should be added to the current statement
174 tree with add_stmt. */
177 c_finish_omp_atomic (location_t loc
, enum tree_code code
,
178 enum tree_code opcode
, tree lhs
, tree rhs
,
179 tree v
, tree lhs1
, tree rhs1
, bool swapped
, bool seq_cst
)
181 tree x
, type
, addr
, pre
= NULL_TREE
;
183 if (lhs
== error_mark_node
|| rhs
== error_mark_node
184 || v
== error_mark_node
|| lhs1
== error_mark_node
185 || rhs1
== error_mark_node
)
186 return error_mark_node
;
188 /* ??? According to one reading of the OpenMP spec, complex type are
189 supported, but there are no atomic stores for any architecture.
190 But at least icc 9.0 doesn't support complex types here either.
191 And lets not even talk about vector types... */
192 type
= TREE_TYPE (lhs
);
193 if (!INTEGRAL_TYPE_P (type
)
194 && !POINTER_TYPE_P (type
)
195 && !SCALAR_FLOAT_TYPE_P (type
))
197 error_at (loc
, "invalid expression type for %<#pragma omp atomic%>");
198 return error_mark_node
;
201 if (opcode
== RDIV_EXPR
)
202 opcode
= TRUNC_DIV_EXPR
;
204 /* ??? Validate that rhs does not overlap lhs. */
206 /* Take and save the address of the lhs. From then on we'll reference it
208 addr
= build_unary_op (loc
, ADDR_EXPR
, lhs
, 0);
209 if (addr
== error_mark_node
)
210 return error_mark_node
;
211 addr
= save_expr (addr
);
212 if (TREE_CODE (addr
) != SAVE_EXPR
213 && (TREE_CODE (addr
) != ADDR_EXPR
214 || !VAR_P (TREE_OPERAND (addr
, 0))))
216 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
217 it even after unsharing function body. */
218 tree var
= create_tmp_var_raw (TREE_TYPE (addr
));
219 DECL_CONTEXT (var
) = current_function_decl
;
220 addr
= build4 (TARGET_EXPR
, TREE_TYPE (addr
), var
, addr
, NULL
, NULL
);
222 lhs
= build_indirect_ref (loc
, addr
, RO_NULL
);
224 if (code
== OMP_ATOMIC_READ
)
226 x
= build1 (OMP_ATOMIC_READ
, type
, addr
);
227 SET_EXPR_LOCATION (x
, loc
);
228 OMP_ATOMIC_SEQ_CST (x
) = seq_cst
;
229 return build_modify_expr (loc
, v
, NULL_TREE
, NOP_EXPR
,
233 /* There are lots of warnings, errors, and conversions that need to happen
234 in the course of interpreting a statement. Use the normal mechanisms
235 to do this, and then take it apart again. */
238 rhs
= build_binary_op (loc
, opcode
, rhs
, lhs
, 1);
241 bool save
= in_late_binary_op
;
242 in_late_binary_op
= true;
243 x
= build_modify_expr (loc
, lhs
, NULL_TREE
, opcode
, loc
, rhs
, NULL_TREE
);
244 in_late_binary_op
= save
;
245 if (x
== error_mark_node
)
246 return error_mark_node
;
247 if (TREE_CODE (x
) == COMPOUND_EXPR
)
249 pre
= TREE_OPERAND (x
, 0);
250 gcc_assert (TREE_CODE (pre
) == SAVE_EXPR
);
251 x
= TREE_OPERAND (x
, 1);
253 gcc_assert (TREE_CODE (x
) == MODIFY_EXPR
);
254 rhs
= TREE_OPERAND (x
, 1);
256 /* Punt the actual generation of atomic operations to common code. */
257 if (code
== OMP_ATOMIC
)
258 type
= void_type_node
;
259 x
= build2 (code
, type
, addr
, rhs
);
260 SET_EXPR_LOCATION (x
, loc
);
261 OMP_ATOMIC_SEQ_CST (x
) = seq_cst
;
263 /* Generally it is hard to prove lhs1 and lhs are the same memory
264 location, just diagnose different variables. */
270 if (code
== OMP_ATOMIC
)
271 error_at (loc
, "%<#pragma omp atomic update%> uses two different variables for memory");
273 error_at (loc
, "%<#pragma omp atomic capture%> uses two different variables for memory");
274 return error_mark_node
;
277 if (code
!= OMP_ATOMIC
)
279 /* Generally it is hard to prove lhs1 and lhs are the same memory
280 location, just diagnose different variables. */
281 if (lhs1
&& VAR_P (lhs1
) && VAR_P (lhs
))
285 error_at (loc
, "%<#pragma omp atomic capture%> uses two different variables for memory");
286 return error_mark_node
;
289 x
= build_modify_expr (loc
, v
, NULL_TREE
, NOP_EXPR
,
291 if (rhs1
&& rhs1
!= lhs
)
293 tree rhs1addr
= build_unary_op (loc
, ADDR_EXPR
, rhs1
, 0);
294 if (rhs1addr
== error_mark_node
)
295 return error_mark_node
;
296 x
= omit_one_operand_loc (loc
, type
, x
, rhs1addr
);
298 if (lhs1
&& lhs1
!= lhs
)
300 tree lhs1addr
= build_unary_op (loc
, ADDR_EXPR
, lhs1
, 0);
301 if (lhs1addr
== error_mark_node
)
302 return error_mark_node
;
303 if (code
== OMP_ATOMIC_CAPTURE_OLD
)
304 x
= omit_one_operand_loc (loc
, type
, x
, lhs1addr
);
308 x
= omit_two_operands_loc (loc
, type
, x
, x
, lhs1addr
);
312 else if (rhs1
&& rhs1
!= lhs
)
314 tree rhs1addr
= build_unary_op (loc
, ADDR_EXPR
, rhs1
, 0);
315 if (rhs1addr
== error_mark_node
)
316 return error_mark_node
;
317 x
= omit_one_operand_loc (loc
, type
, x
, rhs1addr
);
321 x
= omit_one_operand_loc (loc
, type
, x
, pre
);
326 /* Complete a #pragma omp flush construct. We don't do anything with
327 the variable list that the syntax allows. LOC is the location of
331 c_finish_omp_flush (location_t loc
)
335 x
= builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE
);
336 x
= build_call_expr_loc (loc
, x
, 0);
341 /* Check and canonicalize OMP_FOR increment expression.
342 Helper function for c_finish_omp_for. */
345 check_omp_for_incr_expr (location_t loc
, tree exp
, tree decl
)
349 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp
))
350 || TYPE_PRECISION (TREE_TYPE (exp
)) < TYPE_PRECISION (TREE_TYPE (decl
)))
351 return error_mark_node
;
354 return build_int_cst (TREE_TYPE (exp
), 0);
356 switch (TREE_CODE (exp
))
359 t
= check_omp_for_incr_expr (loc
, TREE_OPERAND (exp
, 0), decl
);
360 if (t
!= error_mark_node
)
361 return fold_convert_loc (loc
, TREE_TYPE (exp
), t
);
364 t
= check_omp_for_incr_expr (loc
, TREE_OPERAND (exp
, 0), decl
);
365 if (t
!= error_mark_node
)
366 return fold_build2_loc (loc
, MINUS_EXPR
,
367 TREE_TYPE (exp
), t
, TREE_OPERAND (exp
, 1));
370 t
= check_omp_for_incr_expr (loc
, TREE_OPERAND (exp
, 0), decl
);
371 if (t
!= error_mark_node
)
372 return fold_build2_loc (loc
, PLUS_EXPR
,
373 TREE_TYPE (exp
), t
, TREE_OPERAND (exp
, 1));
374 t
= check_omp_for_incr_expr (loc
, TREE_OPERAND (exp
, 1), decl
);
375 if (t
!= error_mark_node
)
376 return fold_build2_loc (loc
, PLUS_EXPR
,
377 TREE_TYPE (exp
), TREE_OPERAND (exp
, 0), t
);
381 /* cp_build_modify_expr forces preevaluation of the RHS to make
382 sure that it is evaluated before the lvalue-rvalue conversion
383 is applied to the LHS. Reconstruct the original expression. */
384 tree op0
= TREE_OPERAND (exp
, 0);
385 if (TREE_CODE (op0
) == TARGET_EXPR
386 && !VOID_TYPE_P (TREE_TYPE (op0
)))
388 tree op1
= TREE_OPERAND (exp
, 1);
389 tree temp
= TARGET_EXPR_SLOT (op0
);
390 if (BINARY_CLASS_P (op1
)
391 && TREE_OPERAND (op1
, 1) == temp
)
393 op1
= copy_node (op1
);
394 TREE_OPERAND (op1
, 1) = TARGET_EXPR_INITIAL (op0
);
395 return check_omp_for_incr_expr (loc
, op1
, decl
);
404 return error_mark_node
;
407 /* If the OMP_FOR increment expression in INCR is of pointer type,
408 canonicalize it into an expression handled by gimplify_omp_for()
409 and return it. DECL is the iteration variable. */
412 c_omp_for_incr_canonicalize_ptr (location_t loc
, tree decl
, tree incr
)
414 if (POINTER_TYPE_P (TREE_TYPE (decl
))
415 && TREE_OPERAND (incr
, 1))
417 tree t
= fold_convert_loc (loc
,
418 sizetype
, TREE_OPERAND (incr
, 1));
420 if (TREE_CODE (incr
) == POSTDECREMENT_EXPR
421 || TREE_CODE (incr
) == PREDECREMENT_EXPR
)
422 t
= fold_build1_loc (loc
, NEGATE_EXPR
, sizetype
, t
);
423 t
= fold_build_pointer_plus (decl
, t
);
424 incr
= build2 (MODIFY_EXPR
, void_type_node
, decl
, t
);
429 /* Validate and generate OMP_FOR.
430 DECLV is a vector of iteration variables, for each collapsed loop.
431 INITV, CONDV and INCRV are vectors containing initialization
432 expressions, controlling predicates and increment expressions.
433 BODY is the body of the loop and PRE_BODY statements that go before
437 c_finish_omp_for (location_t locus
, enum tree_code code
, tree declv
,
438 tree initv
, tree condv
, tree incrv
, tree body
, tree pre_body
)
444 if ((code
== CILK_SIMD
|| code
== CILK_FOR
)
445 && !c_check_cilk_loop (locus
, TREE_VEC_ELT (declv
, 0)))
448 gcc_assert (TREE_VEC_LENGTH (declv
) == TREE_VEC_LENGTH (initv
));
449 gcc_assert (TREE_VEC_LENGTH (declv
) == TREE_VEC_LENGTH (condv
));
450 gcc_assert (TREE_VEC_LENGTH (declv
) == TREE_VEC_LENGTH (incrv
));
451 for (i
= 0; i
< TREE_VEC_LENGTH (declv
); i
++)
453 tree decl
= TREE_VEC_ELT (declv
, i
);
454 tree init
= TREE_VEC_ELT (initv
, i
);
455 tree cond
= TREE_VEC_ELT (condv
, i
);
456 tree incr
= TREE_VEC_ELT (incrv
, i
);
459 if (EXPR_HAS_LOCATION (init
))
460 elocus
= EXPR_LOCATION (init
);
462 /* Validate the iteration variable. */
463 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl
))
464 && TREE_CODE (TREE_TYPE (decl
)) != POINTER_TYPE
)
466 error_at (elocus
, "invalid type for iteration variable %qE", decl
);
470 /* In the case of "for (int i = 0...)", init will be a decl. It should
471 have a DECL_INITIAL that we can turn into an assignment. */
474 elocus
= DECL_SOURCE_LOCATION (decl
);
476 init
= DECL_INITIAL (decl
);
479 error_at (elocus
, "%qE is not initialized", decl
);
480 init
= integer_zero_node
;
484 init
= build_modify_expr (elocus
, decl
, NULL_TREE
, NOP_EXPR
,
485 /* FIXME diagnostics: This should
486 be the location of the INIT. */
491 if (init
!= error_mark_node
)
493 gcc_assert (TREE_CODE (init
) == MODIFY_EXPR
);
494 gcc_assert (TREE_OPERAND (init
, 0) == decl
);
497 if (cond
== NULL_TREE
)
499 error_at (elocus
, "missing controlling predicate");
504 bool cond_ok
= false;
506 if (EXPR_HAS_LOCATION (cond
))
507 elocus
= EXPR_LOCATION (cond
);
509 if (TREE_CODE (cond
) == LT_EXPR
510 || TREE_CODE (cond
) == LE_EXPR
511 || TREE_CODE (cond
) == GT_EXPR
512 || TREE_CODE (cond
) == GE_EXPR
513 || TREE_CODE (cond
) == NE_EXPR
514 || TREE_CODE (cond
) == EQ_EXPR
)
516 tree op0
= TREE_OPERAND (cond
, 0);
517 tree op1
= TREE_OPERAND (cond
, 1);
519 /* 2.5.1. The comparison in the condition is computed in
520 the type of DECL, otherwise the behavior is undefined.
526 according to ISO will be evaluated as:
531 if (TREE_CODE (op0
) == NOP_EXPR
532 && decl
== TREE_OPERAND (op0
, 0))
534 TREE_OPERAND (cond
, 0) = TREE_OPERAND (op0
, 0);
535 TREE_OPERAND (cond
, 1)
536 = fold_build1_loc (elocus
, NOP_EXPR
, TREE_TYPE (decl
),
537 TREE_OPERAND (cond
, 1));
539 else if (TREE_CODE (op1
) == NOP_EXPR
540 && decl
== TREE_OPERAND (op1
, 0))
542 TREE_OPERAND (cond
, 1) = TREE_OPERAND (op1
, 0);
543 TREE_OPERAND (cond
, 0)
544 = fold_build1_loc (elocus
, NOP_EXPR
, TREE_TYPE (decl
),
545 TREE_OPERAND (cond
, 0));
548 if (decl
== TREE_OPERAND (cond
, 0))
550 else if (decl
== TREE_OPERAND (cond
, 1))
553 swap_tree_comparison (TREE_CODE (cond
)));
554 TREE_OPERAND (cond
, 1) = TREE_OPERAND (cond
, 0);
555 TREE_OPERAND (cond
, 0) = decl
;
559 if (TREE_CODE (cond
) == NE_EXPR
560 || TREE_CODE (cond
) == EQ_EXPR
)
562 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl
)))
564 if (code
!= CILK_SIMD
&& code
!= CILK_FOR
)
567 else if (operand_equal_p (TREE_OPERAND (cond
, 1),
568 TYPE_MIN_VALUE (TREE_TYPE (decl
)),
570 TREE_SET_CODE (cond
, TREE_CODE (cond
) == NE_EXPR
571 ? GT_EXPR
: LE_EXPR
);
572 else if (operand_equal_p (TREE_OPERAND (cond
, 1),
573 TYPE_MAX_VALUE (TREE_TYPE (decl
)),
575 TREE_SET_CODE (cond
, TREE_CODE (cond
) == NE_EXPR
576 ? LT_EXPR
: GE_EXPR
);
577 else if (code
!= CILK_SIMD
&& code
!= CILK_FOR
)
584 error_at (elocus
, "invalid controlling predicate");
589 if (incr
== NULL_TREE
)
591 error_at (elocus
, "missing increment expression");
596 bool incr_ok
= false;
598 if (EXPR_HAS_LOCATION (incr
))
599 elocus
= EXPR_LOCATION (incr
);
601 /* Check all the valid increment expressions: v++, v--, ++v, --v,
602 v = v + incr, v = incr + v and v = v - incr. */
603 switch (TREE_CODE (incr
))
605 case POSTINCREMENT_EXPR
:
606 case PREINCREMENT_EXPR
:
607 case POSTDECREMENT_EXPR
:
608 case PREDECREMENT_EXPR
:
609 if (TREE_OPERAND (incr
, 0) != decl
)
613 incr
= c_omp_for_incr_canonicalize_ptr (elocus
, decl
, incr
);
617 if (TREE_CODE (TREE_OPERAND (incr
, 0)) != SAVE_EXPR
618 || TREE_CODE (TREE_OPERAND (incr
, 1)) != MODIFY_EXPR
)
620 incr
= TREE_OPERAND (incr
, 1);
623 if (TREE_OPERAND (incr
, 0) != decl
)
625 if (TREE_OPERAND (incr
, 1) == decl
)
627 if (TREE_CODE (TREE_OPERAND (incr
, 1)) == PLUS_EXPR
628 && (TREE_OPERAND (TREE_OPERAND (incr
, 1), 0) == decl
629 || TREE_OPERAND (TREE_OPERAND (incr
, 1), 1) == decl
))
631 else if ((TREE_CODE (TREE_OPERAND (incr
, 1)) == MINUS_EXPR
632 || (TREE_CODE (TREE_OPERAND (incr
, 1))
633 == POINTER_PLUS_EXPR
))
634 && TREE_OPERAND (TREE_OPERAND (incr
, 1), 0) == decl
)
638 tree t
= check_omp_for_incr_expr (elocus
,
639 TREE_OPERAND (incr
, 1),
641 if (t
!= error_mark_node
)
644 t
= build2 (PLUS_EXPR
, TREE_TYPE (decl
), decl
, t
);
645 incr
= build2 (MODIFY_EXPR
, void_type_node
, decl
, t
);
655 error_at (elocus
, "invalid increment expression");
660 TREE_VEC_ELT (initv
, i
) = init
;
661 TREE_VEC_ELT (incrv
, i
) = incr
;
668 tree t
= make_node (code
);
670 TREE_TYPE (t
) = void_type_node
;
671 OMP_FOR_INIT (t
) = initv
;
672 OMP_FOR_COND (t
) = condv
;
673 OMP_FOR_INCR (t
) = incrv
;
674 OMP_FOR_BODY (t
) = body
;
675 OMP_FOR_PRE_BODY (t
) = pre_body
;
677 SET_EXPR_LOCATION (t
, locus
);
682 /* Right now we have 14 different combined constructs, this
683 function attempts to split or duplicate clauses for combined
684 constructs. CODE is the innermost construct in the combined construct,
685 and MASK allows to determine which constructs are combined together,
686 as every construct has at least one clause that no other construct
687 has (except for OMP_SECTIONS, but that can be only combined with parallel).
688 Combined constructs are:
689 #pragma omp parallel for
690 #pragma omp parallel sections
691 #pragma omp parallel for simd
693 #pragma omp distribute simd
694 #pragma omp distribute parallel for
695 #pragma omp distribute parallel for simd
696 #pragma omp teams distribute
697 #pragma omp teams distribute parallel for
698 #pragma omp teams distribute parallel for simd
699 #pragma omp target teams
700 #pragma omp target teams distribute
701 #pragma omp target teams distribute parallel for
702 #pragma omp target teams distribute parallel for simd */
705 c_omp_split_clauses (location_t loc
, enum tree_code code
,
706 omp_clause_mask mask
, tree clauses
, tree
*cclauses
)
709 enum c_omp_clause_split s
;
712 for (i
= 0; i
< C_OMP_CLAUSE_SPLIT_COUNT
; i
++)
714 /* Add implicit nowait clause on
715 #pragma omp parallel {for,for simd,sections}. */
716 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
)) != 0)
721 cclauses
[C_OMP_CLAUSE_SPLIT_FOR
]
722 = build_omp_clause (loc
, OMP_CLAUSE_NOWAIT
);
725 cclauses
[C_OMP_CLAUSE_SPLIT_SECTIONS
]
726 = build_omp_clause (loc
, OMP_CLAUSE_NOWAIT
);
732 for (; clauses
; clauses
= next
)
734 next
= OMP_CLAUSE_CHAIN (clauses
);
736 switch (OMP_CLAUSE_CODE (clauses
))
738 /* First the clauses that are unique to some constructs. */
739 case OMP_CLAUSE_DEVICE
:
741 s
= C_OMP_CLAUSE_SPLIT_TARGET
;
743 case OMP_CLAUSE_NUM_TEAMS
:
744 case OMP_CLAUSE_THREAD_LIMIT
:
745 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
747 case OMP_CLAUSE_DIST_SCHEDULE
:
748 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
750 case OMP_CLAUSE_COPYIN
:
751 case OMP_CLAUSE_NUM_THREADS
:
752 case OMP_CLAUSE_PROC_BIND
:
753 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
755 case OMP_CLAUSE_ORDERED
:
756 case OMP_CLAUSE_SCHEDULE
:
757 case OMP_CLAUSE_NOWAIT
:
758 s
= C_OMP_CLAUSE_SPLIT_FOR
;
760 case OMP_CLAUSE_SAFELEN
:
761 case OMP_CLAUSE_LINEAR
:
762 case OMP_CLAUSE_ALIGNED
:
763 s
= C_OMP_CLAUSE_SPLIT_SIMD
;
765 /* Duplicate this to all of distribute, for and simd. */
766 case OMP_CLAUSE_COLLAPSE
:
767 if (code
== OMP_SIMD
)
769 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
770 OMP_CLAUSE_COLLAPSE
);
771 OMP_CLAUSE_COLLAPSE_EXPR (c
)
772 = OMP_CLAUSE_COLLAPSE_EXPR (clauses
);
773 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
];
774 cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
] = c
;
776 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE
)) != 0)
778 if ((mask
& (OMP_CLAUSE_MASK_1
779 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
)) != 0)
781 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
782 OMP_CLAUSE_COLLAPSE
);
783 OMP_CLAUSE_COLLAPSE_EXPR (c
)
784 = OMP_CLAUSE_COLLAPSE_EXPR (clauses
);
785 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_FOR
];
786 cclauses
[C_OMP_CLAUSE_SPLIT_FOR
] = c
;
787 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
790 s
= C_OMP_CLAUSE_SPLIT_FOR
;
793 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
795 /* Private clause is supported on all constructs but target,
796 it is enough to put it on the innermost one. For
797 #pragma omp {for,sections} put it on parallel though,
798 as that's what we did for OpenMP 3.1. */
799 case OMP_CLAUSE_PRIVATE
:
802 case OMP_SIMD
: s
= C_OMP_CLAUSE_SPLIT_SIMD
; break;
803 case OMP_FOR
: case OMP_SECTIONS
:
804 case OMP_PARALLEL
: s
= C_OMP_CLAUSE_SPLIT_PARALLEL
; break;
805 case OMP_DISTRIBUTE
: s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
; break;
806 case OMP_TEAMS
: s
= C_OMP_CLAUSE_SPLIT_TEAMS
; break;
807 default: gcc_unreachable ();
810 /* Firstprivate clause is supported on all constructs but
811 target and simd. Put it on the outermost of those and
812 duplicate on parallel. */
813 case OMP_CLAUSE_FIRSTPRIVATE
:
814 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
))
817 if ((mask
& ((OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
)
819 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
))) != 0)
821 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
822 OMP_CLAUSE_FIRSTPRIVATE
);
823 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
824 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
];
825 cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
] = c
;
826 if ((mask
& (OMP_CLAUSE_MASK_1
827 << PRAGMA_OMP_CLAUSE_NUM_TEAMS
)) != 0)
828 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
830 s
= C_OMP_CLAUSE_SPLIT_DISTRIBUTE
;
834 #pragma omp parallel{, for{, simd}, sections}. */
835 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
837 else if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
))
840 /* This must be one of
841 #pragma omp {,target }teams distribute
842 #pragma omp target teams
843 #pragma omp {,target }teams distribute simd. */
844 gcc_assert (code
== OMP_DISTRIBUTE
846 || code
== OMP_SIMD
);
847 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
849 else if ((mask
& (OMP_CLAUSE_MASK_1
850 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE
)) != 0)
852 /* This must be #pragma omp distribute simd. */
853 gcc_assert (code
== OMP_SIMD
);
854 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
858 /* This must be #pragma omp for simd. */
859 gcc_assert (code
== OMP_SIMD
);
860 s
= C_OMP_CLAUSE_SPLIT_FOR
;
863 /* Lastprivate is allowed on for, sections and simd. In
864 parallel {for{, simd},sections} we actually want to put it on
865 parallel rather than for or sections. */
866 case OMP_CLAUSE_LASTPRIVATE
:
867 if (code
== OMP_FOR
|| code
== OMP_SECTIONS
)
869 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
))
871 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
873 s
= C_OMP_CLAUSE_SPLIT_FOR
;
876 gcc_assert (code
== OMP_SIMD
);
877 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE
)) != 0)
879 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
880 OMP_CLAUSE_LASTPRIVATE
);
881 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
882 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
))
884 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
886 s
= C_OMP_CLAUSE_SPLIT_FOR
;
887 OMP_CLAUSE_CHAIN (c
) = cclauses
[s
];
890 s
= C_OMP_CLAUSE_SPLIT_SIMD
;
892 /* Shared and default clauses are allowed on private and teams. */
893 case OMP_CLAUSE_SHARED
:
894 case OMP_CLAUSE_DEFAULT
:
895 if (code
== OMP_TEAMS
)
897 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
900 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
))
903 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
904 OMP_CLAUSE_CODE (clauses
));
905 if (OMP_CLAUSE_CODE (clauses
) == OMP_CLAUSE_SHARED
)
906 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
908 OMP_CLAUSE_DEFAULT_KIND (c
)
909 = OMP_CLAUSE_DEFAULT_KIND (clauses
);
910 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_TEAMS
];
911 cclauses
[C_OMP_CLAUSE_SPLIT_TEAMS
] = c
;
914 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
916 /* Reduction is allowed on simd, for, parallel, sections and teams.
917 Duplicate it on all of them, but omit on for or sections if
918 parallel is present. */
919 case OMP_CLAUSE_REDUCTION
:
920 if (code
== OMP_SIMD
)
922 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
923 OMP_CLAUSE_REDUCTION
);
924 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
925 OMP_CLAUSE_REDUCTION_CODE (c
)
926 = OMP_CLAUSE_REDUCTION_CODE (clauses
);
927 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
)
928 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses
);
929 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
];
930 cclauses
[C_OMP_CLAUSE_SPLIT_SIMD
] = c
;
932 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE
)) != 0)
934 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS
))
937 c
= build_omp_clause (OMP_CLAUSE_LOCATION (clauses
),
938 OMP_CLAUSE_REDUCTION
);
939 OMP_CLAUSE_DECL (c
) = OMP_CLAUSE_DECL (clauses
);
940 OMP_CLAUSE_REDUCTION_CODE (c
)
941 = OMP_CLAUSE_REDUCTION_CODE (clauses
);
942 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
)
943 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses
);
944 OMP_CLAUSE_CHAIN (c
) = cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
];
945 cclauses
[C_OMP_CLAUSE_SPLIT_PARALLEL
] = c
;
946 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
948 else if ((mask
& (OMP_CLAUSE_MASK_1
949 << PRAGMA_OMP_CLAUSE_NUM_THREADS
)) != 0)
950 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
952 s
= C_OMP_CLAUSE_SPLIT_FOR
;
954 else if (code
== OMP_SECTIONS
)
955 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
957 s
= C_OMP_CLAUSE_SPLIT_TEAMS
;
960 /* FIXME: This is currently being discussed. */
961 if ((mask
& (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS
))
963 s
= C_OMP_CLAUSE_SPLIT_PARALLEL
;
965 s
= C_OMP_CLAUSE_SPLIT_TARGET
;
970 OMP_CLAUSE_CHAIN (clauses
) = cclauses
[s
];
971 cclauses
[s
] = clauses
;
976 /* qsort callback to compare #pragma omp declare simd clauses. */
979 c_omp_declare_simd_clause_cmp (const void *p
, const void *q
)
981 tree a
= *(const tree
*) p
;
982 tree b
= *(const tree
*) q
;
983 if (OMP_CLAUSE_CODE (a
) != OMP_CLAUSE_CODE (b
))
985 if (OMP_CLAUSE_CODE (a
) > OMP_CLAUSE_CODE (b
))
989 if (OMP_CLAUSE_CODE (a
) != OMP_CLAUSE_SIMDLEN
990 && OMP_CLAUSE_CODE (a
) != OMP_CLAUSE_INBRANCH
991 && OMP_CLAUSE_CODE (a
) != OMP_CLAUSE_NOTINBRANCH
)
993 int c
= tree_to_shwi (OMP_CLAUSE_DECL (a
));
994 int d
= tree_to_shwi (OMP_CLAUSE_DECL (b
));
1003 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
1004 CLAUSES on FNDECL into argument indexes and sort them. */
1007 c_omp_declare_simd_clauses_to_numbers (tree parms
, tree clauses
)
1010 vec
<tree
> clvec
= vNULL
;
1012 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1014 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_SIMDLEN
1015 && OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_INBRANCH
1016 && OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_NOTINBRANCH
)
1018 tree decl
= OMP_CLAUSE_DECL (c
);
1021 for (arg
= parms
, idx
= 0; arg
;
1022 arg
= TREE_CHAIN (arg
), idx
++)
1025 if (arg
== NULL_TREE
)
1027 error_at (OMP_CLAUSE_LOCATION (c
),
1028 "%qD is not an function argument", decl
);
1031 OMP_CLAUSE_DECL (c
) = build_int_cst (integer_type_node
, idx
);
1033 clvec
.safe_push (c
);
1035 if (!clvec
.is_empty ())
1037 unsigned int len
= clvec
.length (), i
;
1038 clvec
.qsort (c_omp_declare_simd_clause_cmp
);
1040 for (i
= 0; i
< len
; i
++)
1041 OMP_CLAUSE_CHAIN (clvec
[i
]) = (i
< len
- 1) ? clvec
[i
+ 1] : NULL_TREE
;
1047 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
1050 c_omp_declare_simd_clauses_to_decls (tree fndecl
, tree clauses
)
1054 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1055 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_SIMDLEN
1056 && OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_INBRANCH
1057 && OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_NOTINBRANCH
)
1059 int idx
= tree_to_shwi (OMP_CLAUSE_DECL (c
)), i
;
1061 for (arg
= DECL_ARGUMENTS (fndecl
), i
= 0; arg
;
1062 arg
= TREE_CHAIN (arg
), i
++)
1066 OMP_CLAUSE_DECL (c
) = arg
;
1070 /* True if OpenMP sharing attribute of DECL is predetermined. */
1072 enum omp_clause_default_kind
1073 c_omp_predetermined_sharing (tree decl
)
1075 /* Variables with const-qualified type having no mutable member
1076 are predetermined shared. */
1077 if (TREE_READONLY (decl
))
1078 return OMP_CLAUSE_DEFAULT_SHARED
;
1080 return OMP_CLAUSE_DEFAULT_UNSPECIFIED
;