1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005-2013 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic-core.h"
35 #include "tree-flow.h"
39 #include "tree-pass.h"
42 #include "splay-tree.h"
47 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
48 phases. The first phase scans the function looking for OMP statements
49 and then for variables that must be replaced to satisfy data sharing
50 clauses. The second phase expands code for the constructs, as well as
51 re-gimplifying things when variables have been replaced with complex
54 Final code generation is done by pass_expand_omp. The flowgraph is
55 scanned for parallel regions which are then moved to a new
56 function, to be invoked by the thread library. */
58 /* Context structure. Used to store information about each parallel
59 directive in the code. */
61 typedef struct omp_context
63 /* This field must be at the beginning, as we do "inheritance": Some
64 callback functions for tree-inline.c (e.g., omp_copy_decl)
65 receive a copy_body_data pointer that is up-casted to an
66 omp_context pointer. */
69 /* The tree of contexts corresponding to the encountered constructs. */
70 struct omp_context
*outer
;
73 /* Map variables to fields in a structure that allows communication
74 between sending and receiving threads. */
80 /* These are used just by task contexts, if task firstprivate fn is
81 needed. srecord_type is used to communicate from the thread
82 that encountered the task construct to task firstprivate fn,
83 record_type is allocated by GOMP_task, initialized by task firstprivate
84 fn and passed to the task body fn. */
85 splay_tree sfield_map
;
88 /* A chain of variables to add to the top-level block surrounding the
89 construct. In the case of a parallel, this is in the child function. */
92 /* What to do with variables with implicitly determined sharing
94 enum omp_clause_default_kind default_kind
;
96 /* Nesting depth of this context. Used to beautify error messages re
97 invalid gotos. The outermost ctx is depth 1, with depth 0 being
98 reserved for the main body of the function. */
101 /* True if this parallel directive is nested within another. */
106 struct omp_for_data_loop
108 tree v
, n1
, n2
, step
;
109 enum tree_code cond_code
;
112 /* A structure describing the main elements of a parallel loop. */
116 struct omp_for_data_loop loop
;
121 bool have_nowait
, have_ordered
;
122 enum omp_clause_schedule_kind sched_kind
;
123 struct omp_for_data_loop
*loops
;
127 static splay_tree all_contexts
;
128 static int taskreg_nesting_level
;
129 struct omp_region
*root_omp_region
;
130 static bitmap task_shared_vars
;
132 static void scan_omp (gimple_seq
*, omp_context
*);
133 static tree
scan_omp_1_op (tree
*, int *, void *);
135 #define WALK_SUBSTMTS \
139 case GIMPLE_EH_FILTER: \
140 case GIMPLE_TRANSACTION: \
141 /* The sub-statements for these should be walked. */ \
142 *handled_ops_p = false; \
145 /* Convenience function for calling scan_omp_1_op on tree operands. */
148 scan_omp_op (tree
*tp
, omp_context
*ctx
)
150 struct walk_stmt_info wi
;
152 memset (&wi
, 0, sizeof (wi
));
154 wi
.want_locations
= true;
156 return walk_tree (tp
, scan_omp_1_op
, &wi
, NULL
);
159 static void lower_omp (gimple_seq
*, omp_context
*);
160 static tree
lookup_decl_in_outer_ctx (tree
, omp_context
*);
161 static tree
maybe_lookup_decl_in_outer_ctx (tree
, omp_context
*);
163 /* Find an OpenMP clause of type KIND within CLAUSES. */
166 find_omp_clause (tree clauses
, enum omp_clause_code kind
)
168 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
169 if (OMP_CLAUSE_CODE (clauses
) == kind
)
175 /* Return true if CTX is for an omp parallel. */
178 is_parallel_ctx (omp_context
*ctx
)
180 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
;
184 /* Return true if CTX is for an omp task. */
187 is_task_ctx (omp_context
*ctx
)
189 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
193 /* Return true if CTX is for an omp parallel or omp task. */
196 is_taskreg_ctx (omp_context
*ctx
)
198 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
199 || gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
203 /* Return true if REGION is a combined parallel+workshare region. */
206 is_combined_parallel (struct omp_region
*region
)
208 return region
->is_combined_parallel
;
212 /* Extract the header elements of parallel loop FOR_STMT and store
216 extract_omp_for_data (gimple for_stmt
, struct omp_for_data
*fd
,
217 struct omp_for_data_loop
*loops
)
219 tree t
, var
, *collapse_iter
, *collapse_count
;
220 tree count
= NULL_TREE
, iter_type
= long_integer_type_node
;
221 struct omp_for_data_loop
*loop
;
223 struct omp_for_data_loop dummy_loop
;
224 location_t loc
= gimple_location (for_stmt
);
226 fd
->for_stmt
= for_stmt
;
228 fd
->collapse
= gimple_omp_for_collapse (for_stmt
);
229 if (fd
->collapse
> 1)
232 fd
->loops
= &fd
->loop
;
234 fd
->have_nowait
= fd
->have_ordered
= false;
235 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
236 fd
->chunk_size
= NULL_TREE
;
237 collapse_iter
= NULL
;
238 collapse_count
= NULL
;
240 for (t
= gimple_omp_for_clauses (for_stmt
); t
; t
= OMP_CLAUSE_CHAIN (t
))
241 switch (OMP_CLAUSE_CODE (t
))
243 case OMP_CLAUSE_NOWAIT
:
244 fd
->have_nowait
= true;
246 case OMP_CLAUSE_ORDERED
:
247 fd
->have_ordered
= true;
249 case OMP_CLAUSE_SCHEDULE
:
250 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_KIND (t
);
251 fd
->chunk_size
= OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t
);
253 case OMP_CLAUSE_COLLAPSE
:
254 if (fd
->collapse
> 1)
256 collapse_iter
= &OMP_CLAUSE_COLLAPSE_ITERVAR (t
);
257 collapse_count
= &OMP_CLAUSE_COLLAPSE_COUNT (t
);
263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
266 is best) or if it varies (then schedule(dynamic,N) is better). */
267 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_AUTO
)
269 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
270 gcc_assert (fd
->chunk_size
== NULL
);
272 gcc_assert (fd
->collapse
== 1 || collapse_iter
!= NULL
);
273 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
274 gcc_assert (fd
->chunk_size
== NULL
);
275 else if (fd
->chunk_size
== NULL
)
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
279 if (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
282 fd
->chunk_size
= (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
283 ? integer_zero_node
: integer_one_node
;
286 for (i
= 0; i
< fd
->collapse
; i
++)
288 if (fd
->collapse
== 1)
290 else if (loops
!= NULL
)
296 loop
->v
= gimple_omp_for_index (for_stmt
, i
);
297 gcc_assert (SSA_VAR_P (loop
->v
));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop
->v
)) == POINTER_TYPE
);
300 var
= TREE_CODE (loop
->v
) == SSA_NAME
? SSA_NAME_VAR (loop
->v
) : loop
->v
;
301 loop
->n1
= gimple_omp_for_initial (for_stmt
, i
);
303 loop
->cond_code
= gimple_omp_for_cond (for_stmt
, i
);
304 loop
->n2
= gimple_omp_for_final (for_stmt
, i
);
305 switch (loop
->cond_code
)
311 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
312 loop
->n2
= fold_build_pointer_plus_hwi_loc (loc
, loop
->n2
, 1);
314 loop
->n2
= fold_build2_loc (loc
,
315 PLUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
316 build_int_cst (TREE_TYPE (loop
->n2
), 1));
317 loop
->cond_code
= LT_EXPR
;
320 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
321 loop
->n2
= fold_build_pointer_plus_hwi_loc (loc
, loop
->n2
, -1);
323 loop
->n2
= fold_build2_loc (loc
,
324 MINUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
325 build_int_cst (TREE_TYPE (loop
->n2
), 1));
326 loop
->cond_code
= GT_EXPR
;
332 t
= gimple_omp_for_incr (for_stmt
, i
);
333 gcc_assert (TREE_OPERAND (t
, 0) == var
);
334 switch (TREE_CODE (t
))
337 loop
->step
= TREE_OPERAND (t
, 1);
339 case POINTER_PLUS_EXPR
:
340 loop
->step
= fold_convert (ssizetype
, TREE_OPERAND (t
, 1));
343 loop
->step
= TREE_OPERAND (t
, 1);
344 loop
->step
= fold_build1_loc (loc
,
345 NEGATE_EXPR
, TREE_TYPE (loop
->step
),
352 if (iter_type
!= long_long_unsigned_type_node
)
354 if (POINTER_TYPE_P (TREE_TYPE (loop
->v
)))
355 iter_type
= long_long_unsigned_type_node
;
356 else if (TYPE_UNSIGNED (TREE_TYPE (loop
->v
))
357 && TYPE_PRECISION (TREE_TYPE (loop
->v
))
358 >= TYPE_PRECISION (iter_type
))
362 if (loop
->cond_code
== LT_EXPR
)
363 n
= fold_build2_loc (loc
,
364 PLUS_EXPR
, TREE_TYPE (loop
->v
),
365 loop
->n2
, loop
->step
);
368 if (TREE_CODE (n
) != INTEGER_CST
369 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type
), n
))
370 iter_type
= long_long_unsigned_type_node
;
372 else if (TYPE_PRECISION (TREE_TYPE (loop
->v
))
373 > TYPE_PRECISION (iter_type
))
377 if (loop
->cond_code
== LT_EXPR
)
380 n2
= fold_build2_loc (loc
,
381 PLUS_EXPR
, TREE_TYPE (loop
->v
),
382 loop
->n2
, loop
->step
);
386 n1
= fold_build2_loc (loc
,
387 MINUS_EXPR
, TREE_TYPE (loop
->v
),
388 loop
->n2
, loop
->step
);
391 if (TREE_CODE (n1
) != INTEGER_CST
392 || TREE_CODE (n2
) != INTEGER_CST
393 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type
), n1
)
394 || !tree_int_cst_lt (n2
, TYPE_MAX_VALUE (iter_type
)))
395 iter_type
= long_long_unsigned_type_node
;
399 if (collapse_count
&& *collapse_count
== NULL
)
401 if ((i
== 0 || count
!= NULL_TREE
)
402 && TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
403 && TREE_CONSTANT (loop
->n1
)
404 && TREE_CONSTANT (loop
->n2
)
405 && TREE_CODE (loop
->step
) == INTEGER_CST
)
407 tree itype
= TREE_TYPE (loop
->v
);
409 if (POINTER_TYPE_P (itype
))
410 itype
= signed_type_for (itype
);
411 t
= build_int_cst (itype
, (loop
->cond_code
== LT_EXPR
? -1 : 1));
412 t
= fold_build2_loc (loc
,
414 fold_convert_loc (loc
, itype
, loop
->step
), t
);
415 t
= fold_build2_loc (loc
, PLUS_EXPR
, itype
, t
,
416 fold_convert_loc (loc
, itype
, loop
->n2
));
417 t
= fold_build2_loc (loc
, MINUS_EXPR
, itype
, t
,
418 fold_convert_loc (loc
, itype
, loop
->n1
));
419 if (TYPE_UNSIGNED (itype
) && loop
->cond_code
== GT_EXPR
)
420 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
,
421 fold_build1_loc (loc
, NEGATE_EXPR
, itype
, t
),
422 fold_build1_loc (loc
, NEGATE_EXPR
, itype
,
423 fold_convert_loc (loc
, itype
,
426 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
, t
,
427 fold_convert_loc (loc
, itype
, loop
->step
));
428 t
= fold_convert_loc (loc
, long_long_unsigned_type_node
, t
);
429 if (count
!= NULL_TREE
)
430 count
= fold_build2_loc (loc
,
431 MULT_EXPR
, long_long_unsigned_type_node
,
435 if (TREE_CODE (count
) != INTEGER_CST
)
445 if (!tree_int_cst_lt (count
, TYPE_MAX_VALUE (long_integer_type_node
)))
446 iter_type
= long_long_unsigned_type_node
;
448 iter_type
= long_integer_type_node
;
450 else if (collapse_iter
&& *collapse_iter
!= NULL
)
451 iter_type
= TREE_TYPE (*collapse_iter
);
452 fd
->iter_type
= iter_type
;
453 if (collapse_iter
&& *collapse_iter
== NULL
)
454 *collapse_iter
= create_tmp_var (iter_type
, ".iter");
455 if (collapse_count
&& *collapse_count
== NULL
)
458 *collapse_count
= fold_convert_loc (loc
, iter_type
, count
);
460 *collapse_count
= create_tmp_var (iter_type
, ".count");
463 if (fd
->collapse
> 1)
465 fd
->loop
.v
= *collapse_iter
;
466 fd
->loop
.n1
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
467 fd
->loop
.n2
= *collapse_count
;
468 fd
->loop
.step
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 1);
469 fd
->loop
.cond_code
= LT_EXPR
;
474 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
475 is the immediate dominator of PAR_ENTRY_BB, return true if there
476 are no data dependencies that would prevent expanding the parallel
477 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
479 When expanding a combined parallel+workshare region, the call to
480 the child function may need additional arguments in the case of
481 GIMPLE_OMP_FOR regions. In some cases, these arguments are
482 computed out of variables passed in from the parent to the child
483 via 'struct .omp_data_s'. For instance:
485 #pragma omp parallel for schedule (guided, i * 4)
490 # BLOCK 2 (PAR_ENTRY_BB)
492 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
494 # BLOCK 3 (WS_ENTRY_BB)
495 .omp_data_i = &.omp_data_o;
496 D.1667 = .omp_data_i->i;
498 #pragma omp for schedule (guided, D.1598)
500 When we outline the parallel region, the call to the child function
501 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
502 that value is computed *after* the call site. So, in principle we
503 cannot do the transformation.
505 To see whether the code in WS_ENTRY_BB blocks the combined
506 parallel+workshare call, we collect all the variables used in the
507 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
508 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
511 FIXME. If we had the SSA form built at this point, we could merely
512 hoist the code in block 3 into block 2 and be done with it. But at
513 this point we don't have dataflow information and though we could
514 hack something up here, it is really not worth the aggravation. */
517 workshare_safe_to_combine_p (basic_block ws_entry_bb
)
519 struct omp_for_data fd
;
520 gimple ws_stmt
= last_stmt (ws_entry_bb
);
522 if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
525 gcc_assert (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
);
527 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
529 if (fd
.collapse
> 1 && TREE_CODE (fd
.loop
.n2
) != INTEGER_CST
)
531 if (fd
.iter_type
!= long_integer_type_node
)
534 /* FIXME. We give up too easily here. If any of these arguments
535 are not constants, they will likely involve variables that have
536 been mapped into fields of .omp_data_s for sharing with the child
537 function. With appropriate data flow, it would be possible to
539 if (!is_gimple_min_invariant (fd
.loop
.n1
)
540 || !is_gimple_min_invariant (fd
.loop
.n2
)
541 || !is_gimple_min_invariant (fd
.loop
.step
)
542 || (fd
.chunk_size
&& !is_gimple_min_invariant (fd
.chunk_size
)))
549 /* Collect additional arguments needed to emit a combined
550 parallel+workshare call. WS_STMT is the workshare directive being
553 static vec
<tree
, va_gc
> *
554 get_ws_args_for (gimple ws_stmt
)
557 location_t loc
= gimple_location (ws_stmt
);
558 vec
<tree
, va_gc
> *ws_args
;
560 if (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
)
562 struct omp_for_data fd
;
564 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
566 vec_alloc (ws_args
, 3 + (fd
.chunk_size
!= 0));
568 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.n1
);
569 ws_args
->quick_push (t
);
571 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.n2
);
572 ws_args
->quick_push (t
);
574 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.step
);
575 ws_args
->quick_push (t
);
579 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.chunk_size
);
580 ws_args
->quick_push (t
);
585 else if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
587 /* Number of sections is equal to the number of edges from the
588 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
589 the exit of the sections region. */
590 basic_block bb
= single_succ (gimple_bb (ws_stmt
));
591 t
= build_int_cst (unsigned_type_node
, EDGE_COUNT (bb
->succs
) - 1);
592 vec_alloc (ws_args
, 1);
593 ws_args
->quick_push (t
);
601 /* Discover whether REGION is a combined parallel+workshare region. */
604 determine_parallel_type (struct omp_region
*region
)
606 basic_block par_entry_bb
, par_exit_bb
;
607 basic_block ws_entry_bb
, ws_exit_bb
;
609 if (region
== NULL
|| region
->inner
== NULL
610 || region
->exit
== NULL
|| region
->inner
->exit
== NULL
611 || region
->inner
->cont
== NULL
)
614 /* We only support parallel+for and parallel+sections. */
615 if (region
->type
!= GIMPLE_OMP_PARALLEL
616 || (region
->inner
->type
!= GIMPLE_OMP_FOR
617 && region
->inner
->type
!= GIMPLE_OMP_SECTIONS
))
620 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
621 WS_EXIT_BB -> PAR_EXIT_BB. */
622 par_entry_bb
= region
->entry
;
623 par_exit_bb
= region
->exit
;
624 ws_entry_bb
= region
->inner
->entry
;
625 ws_exit_bb
= region
->inner
->exit
;
627 if (single_succ (par_entry_bb
) == ws_entry_bb
628 && single_succ (ws_exit_bb
) == par_exit_bb
629 && workshare_safe_to_combine_p (ws_entry_bb
)
630 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb
))
631 || (last_and_only_stmt (ws_entry_bb
)
632 && last_and_only_stmt (par_exit_bb
))))
634 gimple ws_stmt
= last_stmt (ws_entry_bb
);
636 if (region
->inner
->type
== GIMPLE_OMP_FOR
)
638 /* If this is a combined parallel loop, we need to determine
639 whether or not to use the combined library calls. There
640 are two cases where we do not apply the transformation:
641 static loops and any kind of ordered loop. In the first
642 case, we already open code the loop so there is no need
643 to do anything else. In the latter case, the combined
644 parallel loop call would still need extra synchronization
645 to implement ordered semantics, so there would not be any
646 gain in using the combined call. */
647 tree clauses
= gimple_omp_for_clauses (ws_stmt
);
648 tree c
= find_omp_clause (clauses
, OMP_CLAUSE_SCHEDULE
);
650 || OMP_CLAUSE_SCHEDULE_KIND (c
) == OMP_CLAUSE_SCHEDULE_STATIC
651 || find_omp_clause (clauses
, OMP_CLAUSE_ORDERED
))
653 region
->is_combined_parallel
= false;
654 region
->inner
->is_combined_parallel
= false;
659 region
->is_combined_parallel
= true;
660 region
->inner
->is_combined_parallel
= true;
661 region
->ws_args
= get_ws_args_for (ws_stmt
);
666 /* Return true if EXPR is variable sized. */
669 is_variable_sized (const_tree expr
)
671 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
674 /* Return true if DECL is a reference type. */
677 is_reference (tree decl
)
679 return lang_hooks
.decls
.omp_privatize_by_reference (decl
);
682 /* Lookup variables in the decl or field splay trees. The "maybe" form
683 allows for the variable form to not have been entered, otherwise we
684 assert that the variable must have been entered. */
687 lookup_decl (tree var
, omp_context
*ctx
)
690 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
695 maybe_lookup_decl (const_tree var
, omp_context
*ctx
)
698 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
699 return n
? *n
: NULL_TREE
;
703 lookup_field (tree var
, omp_context
*ctx
)
706 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
707 return (tree
) n
->value
;
711 lookup_sfield (tree var
, omp_context
*ctx
)
714 n
= splay_tree_lookup (ctx
->sfield_map
715 ? ctx
->sfield_map
: ctx
->field_map
,
716 (splay_tree_key
) var
);
717 return (tree
) n
->value
;
721 maybe_lookup_field (tree var
, omp_context
*ctx
)
724 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
725 return n
? (tree
) n
->value
: NULL_TREE
;
728 /* Return true if DECL should be copied by pointer. SHARED_CTX is
729 the parallel context if DECL is to be shared. */
732 use_pointer_for_field (tree decl
, omp_context
*shared_ctx
)
734 if (AGGREGATE_TYPE_P (TREE_TYPE (decl
)))
737 /* We can only use copy-in/copy-out semantics for shared variables
738 when we know the value is not accessible from an outer scope. */
741 /* ??? Trivially accessible from anywhere. But why would we even
742 be passing an address in this case? Should we simply assert
743 this to be false, or should we have a cleanup pass that removes
744 these from the list of mappings? */
745 if (TREE_STATIC (decl
) || DECL_EXTERNAL (decl
))
748 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
749 without analyzing the expression whether or not its location
750 is accessible to anyone else. In the case of nested parallel
751 regions it certainly may be. */
752 if (TREE_CODE (decl
) != RESULT_DECL
&& DECL_HAS_VALUE_EXPR_P (decl
))
755 /* Do not use copy-in/copy-out for variables that have their
757 if (TREE_ADDRESSABLE (decl
))
760 /* lower_send_shared_vars only uses copy-in, but not copy-out
762 if (TREE_READONLY (decl
)
763 || ((TREE_CODE (decl
) == RESULT_DECL
764 || TREE_CODE (decl
) == PARM_DECL
)
765 && DECL_BY_REFERENCE (decl
)))
768 /* Disallow copy-in/out in nested parallel if
769 decl is shared in outer parallel, otherwise
770 each thread could store the shared variable
771 in its own copy-in location, making the
772 variable no longer really shared. */
773 if (shared_ctx
->is_nested
)
777 for (up
= shared_ctx
->outer
; up
; up
= up
->outer
)
778 if (is_taskreg_ctx (up
) && maybe_lookup_decl (decl
, up
))
785 for (c
= gimple_omp_taskreg_clauses (up
->stmt
);
786 c
; c
= OMP_CLAUSE_CHAIN (c
))
787 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_SHARED
788 && OMP_CLAUSE_DECL (c
) == decl
)
792 goto maybe_mark_addressable_and_ret
;
796 /* For tasks avoid using copy-in/out. As tasks can be
797 deferred or executed in different thread, when GOMP_task
798 returns, the task hasn't necessarily terminated. */
799 if (is_task_ctx (shared_ctx
))
802 maybe_mark_addressable_and_ret
:
803 outer
= maybe_lookup_decl_in_outer_ctx (decl
, shared_ctx
);
804 if (is_gimple_reg (outer
))
806 /* Taking address of OUTER in lower_send_shared_vars
807 might need regimplification of everything that uses the
809 if (!task_shared_vars
)
810 task_shared_vars
= BITMAP_ALLOC (NULL
);
811 bitmap_set_bit (task_shared_vars
, DECL_UID (outer
));
812 TREE_ADDRESSABLE (outer
) = 1;
821 /* Create a new VAR_DECL and copy information from VAR to it. */
824 copy_var_decl (tree var
, tree name
, tree type
)
826 tree copy
= build_decl (DECL_SOURCE_LOCATION (var
), VAR_DECL
, name
, type
);
828 TREE_ADDRESSABLE (copy
) = TREE_ADDRESSABLE (var
);
829 TREE_THIS_VOLATILE (copy
) = TREE_THIS_VOLATILE (var
);
830 DECL_GIMPLE_REG_P (copy
) = DECL_GIMPLE_REG_P (var
);
831 DECL_ARTIFICIAL (copy
) = DECL_ARTIFICIAL (var
);
832 DECL_IGNORED_P (copy
) = DECL_IGNORED_P (var
);
833 DECL_CONTEXT (copy
) = DECL_CONTEXT (var
);
834 TREE_USED (copy
) = 1;
835 DECL_SEEN_IN_BIND_EXPR_P (copy
) = 1;
840 /* Construct a new automatic decl similar to VAR. */
843 omp_copy_decl_2 (tree var
, tree name
, tree type
, omp_context
*ctx
)
845 tree copy
= copy_var_decl (var
, name
, type
);
847 DECL_CONTEXT (copy
) = current_function_decl
;
848 DECL_CHAIN (copy
) = ctx
->block_vars
;
849 ctx
->block_vars
= copy
;
855 omp_copy_decl_1 (tree var
, omp_context
*ctx
)
857 return omp_copy_decl_2 (var
, DECL_NAME (var
), TREE_TYPE (var
), ctx
);
860 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
863 omp_build_component_ref (tree obj
, tree field
)
865 tree ret
= build3 (COMPONENT_REF
, TREE_TYPE (field
), obj
, field
, NULL
);
866 if (TREE_THIS_VOLATILE (field
))
867 TREE_THIS_VOLATILE (ret
) |= 1;
868 if (TREE_READONLY (field
))
869 TREE_READONLY (ret
) |= 1;
873 /* Build tree nodes to access the field for VAR on the receiver side. */
876 build_receiver_ref (tree var
, bool by_ref
, omp_context
*ctx
)
878 tree x
, field
= lookup_field (var
, ctx
);
880 /* If the receiver record type was remapped in the child function,
881 remap the field into the new record type. */
882 x
= maybe_lookup_field (field
, ctx
);
886 x
= build_simple_mem_ref (ctx
->receiver_decl
);
887 x
= omp_build_component_ref (x
, field
);
889 x
= build_simple_mem_ref (x
);
894 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
895 of a parallel, this is a component reference; for workshare constructs
896 this is some variable. */
899 build_outer_var_ref (tree var
, omp_context
*ctx
)
903 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
, ctx
)))
905 else if (is_variable_sized (var
))
907 x
= TREE_OPERAND (DECL_VALUE_EXPR (var
), 0);
908 x
= build_outer_var_ref (x
, ctx
);
909 x
= build_simple_mem_ref (x
);
911 else if (is_taskreg_ctx (ctx
))
913 bool by_ref
= use_pointer_for_field (var
, NULL
);
914 x
= build_receiver_ref (var
, by_ref
, ctx
);
917 x
= lookup_decl (var
, ctx
->outer
);
918 else if (is_reference (var
))
919 /* This can happen with orphaned constructs. If var is reference, it is
920 possible it is shared and as such valid. */
925 if (is_reference (var
))
926 x
= build_simple_mem_ref (x
);
931 /* Build tree nodes to access the field for VAR on the sender side. */
934 build_sender_ref (tree var
, omp_context
*ctx
)
936 tree field
= lookup_sfield (var
, ctx
);
937 return omp_build_component_ref (ctx
->sender_decl
, field
);
940 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
943 install_var_field (tree var
, bool by_ref
, int mask
, omp_context
*ctx
)
945 tree field
, type
, sfield
= NULL_TREE
;
947 gcc_assert ((mask
& 1) == 0
948 || !splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
));
949 gcc_assert ((mask
& 2) == 0 || !ctx
->sfield_map
950 || !splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) var
));
952 type
= TREE_TYPE (var
);
954 type
= build_pointer_type (type
);
955 else if ((mask
& 3) == 1 && is_reference (var
))
956 type
= TREE_TYPE (type
);
958 field
= build_decl (DECL_SOURCE_LOCATION (var
),
959 FIELD_DECL
, DECL_NAME (var
), type
);
961 /* Remember what variable this field was created for. This does have a
962 side effect of making dwarf2out ignore this member, so for helpful
963 debugging we clear it later in delete_omp_context. */
964 DECL_ABSTRACT_ORIGIN (field
) = var
;
965 if (type
== TREE_TYPE (var
))
967 DECL_ALIGN (field
) = DECL_ALIGN (var
);
968 DECL_USER_ALIGN (field
) = DECL_USER_ALIGN (var
);
969 TREE_THIS_VOLATILE (field
) = TREE_THIS_VOLATILE (var
);
972 DECL_ALIGN (field
) = TYPE_ALIGN (type
);
976 insert_field_into_struct (ctx
->record_type
, field
);
977 if (ctx
->srecord_type
)
979 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
980 FIELD_DECL
, DECL_NAME (var
), type
);
981 DECL_ABSTRACT_ORIGIN (sfield
) = var
;
982 DECL_ALIGN (sfield
) = DECL_ALIGN (field
);
983 DECL_USER_ALIGN (sfield
) = DECL_USER_ALIGN (field
);
984 TREE_THIS_VOLATILE (sfield
) = TREE_THIS_VOLATILE (field
);
985 insert_field_into_struct (ctx
->srecord_type
, sfield
);
990 if (ctx
->srecord_type
== NULL_TREE
)
994 ctx
->srecord_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
995 ctx
->sfield_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
996 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= TREE_CHAIN (t
))
998 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
999 FIELD_DECL
, DECL_NAME (t
), TREE_TYPE (t
));
1000 DECL_ABSTRACT_ORIGIN (sfield
) = DECL_ABSTRACT_ORIGIN (t
);
1001 insert_field_into_struct (ctx
->srecord_type
, sfield
);
1002 splay_tree_insert (ctx
->sfield_map
,
1003 (splay_tree_key
) DECL_ABSTRACT_ORIGIN (t
),
1004 (splay_tree_value
) sfield
);
1008 insert_field_into_struct ((mask
& 1) ? ctx
->record_type
1009 : ctx
->srecord_type
, field
);
1013 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) var
,
1014 (splay_tree_value
) field
);
1015 if ((mask
& 2) && ctx
->sfield_map
)
1016 splay_tree_insert (ctx
->sfield_map
, (splay_tree_key
) var
,
1017 (splay_tree_value
) sfield
);
1021 install_var_local (tree var
, omp_context
*ctx
)
1023 tree new_var
= omp_copy_decl_1 (var
, ctx
);
1024 insert_decl_map (&ctx
->cb
, var
, new_var
);
1028 /* Adjust the replacement for DECL in CTX for the new context. This means
1029 copying the DECL_VALUE_EXPR, and fixing up the type. */
1032 fixup_remapped_decl (tree decl
, omp_context
*ctx
, bool private_debug
)
1034 tree new_decl
, size
;
1036 new_decl
= lookup_decl (decl
, ctx
);
1038 TREE_TYPE (new_decl
) = remap_type (TREE_TYPE (decl
), &ctx
->cb
);
1040 if ((!TREE_CONSTANT (DECL_SIZE (new_decl
)) || private_debug
)
1041 && DECL_HAS_VALUE_EXPR_P (decl
))
1043 tree ve
= DECL_VALUE_EXPR (decl
);
1044 walk_tree (&ve
, copy_tree_body_r
, &ctx
->cb
, NULL
);
1045 SET_DECL_VALUE_EXPR (new_decl
, ve
);
1046 DECL_HAS_VALUE_EXPR_P (new_decl
) = 1;
1049 if (!TREE_CONSTANT (DECL_SIZE (new_decl
)))
1051 size
= remap_decl (DECL_SIZE (decl
), &ctx
->cb
);
1052 if (size
== error_mark_node
)
1053 size
= TYPE_SIZE (TREE_TYPE (new_decl
));
1054 DECL_SIZE (new_decl
) = size
;
1056 size
= remap_decl (DECL_SIZE_UNIT (decl
), &ctx
->cb
);
1057 if (size
== error_mark_node
)
1058 size
= TYPE_SIZE_UNIT (TREE_TYPE (new_decl
));
1059 DECL_SIZE_UNIT (new_decl
) = size
;
1063 /* The callback for remap_decl. Search all containing contexts for a
1064 mapping of the variable; this avoids having to duplicate the splay
1065 tree ahead of time. We know a mapping doesn't already exist in the
1066 given context. Create new mappings to implement default semantics. */
1069 omp_copy_decl (tree var
, copy_body_data
*cb
)
1071 omp_context
*ctx
= (omp_context
*) cb
;
1074 if (TREE_CODE (var
) == LABEL_DECL
)
1076 new_var
= create_artificial_label (DECL_SOURCE_LOCATION (var
));
1077 DECL_CONTEXT (new_var
) = current_function_decl
;
1078 insert_decl_map (&ctx
->cb
, var
, new_var
);
1082 while (!is_taskreg_ctx (ctx
))
1087 new_var
= maybe_lookup_decl (var
, ctx
);
1092 if (is_global_var (var
) || decl_function_context (var
) != ctx
->cb
.src_fn
)
1095 return error_mark_node
;
1099 /* Return the parallel region associated with STMT. */
1101 /* Debugging dumps for parallel regions. */
1102 void dump_omp_region (FILE *, struct omp_region
*, int);
1103 void debug_omp_region (struct omp_region
*);
1104 void debug_all_omp_regions (void);
1106 /* Dump the parallel region tree rooted at REGION. */
1109 dump_omp_region (FILE *file
, struct omp_region
*region
, int indent
)
1111 fprintf (file
, "%*sbb %d: %s\n", indent
, "", region
->entry
->index
,
1112 gimple_code_name
[region
->type
]);
1115 dump_omp_region (file
, region
->inner
, indent
+ 4);
1119 fprintf (file
, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent
, "",
1120 region
->cont
->index
);
1124 fprintf (file
, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent
, "",
1125 region
->exit
->index
);
1127 fprintf (file
, "%*s[no exit marker]\n", indent
, "");
1130 dump_omp_region (file
, region
->next
, indent
);
1134 debug_omp_region (struct omp_region
*region
)
1136 dump_omp_region (stderr
, region
, 0);
1140 debug_all_omp_regions (void)
1142 dump_omp_region (stderr
, root_omp_region
, 0);
1146 /* Create a new parallel region starting at STMT inside region PARENT. */
1149 new_omp_region (basic_block bb
, enum gimple_code type
,
1150 struct omp_region
*parent
)
1152 struct omp_region
*region
= XCNEW (struct omp_region
);
1154 region
->outer
= parent
;
1156 region
->type
= type
;
1160 /* This is a nested region. Add it to the list of inner
1161 regions in PARENT. */
1162 region
->next
= parent
->inner
;
1163 parent
->inner
= region
;
1167 /* This is a toplevel region. Add it to the list of toplevel
1168 regions in ROOT_OMP_REGION. */
1169 region
->next
= root_omp_region
;
1170 root_omp_region
= region
;
1176 /* Release the memory associated with the region tree rooted at REGION. */
1179 free_omp_region_1 (struct omp_region
*region
)
1181 struct omp_region
*i
, *n
;
1183 for (i
= region
->inner
; i
; i
= n
)
1186 free_omp_region_1 (i
);
1192 /* Release the memory for the entire omp region tree. */
1195 free_omp_regions (void)
1197 struct omp_region
*r
, *n
;
1198 for (r
= root_omp_region
; r
; r
= n
)
1201 free_omp_region_1 (r
);
1203 root_omp_region
= NULL
;
1207 /* Create a new context, with OUTER_CTX being the surrounding context. */
1209 static omp_context
*
1210 new_omp_context (gimple stmt
, omp_context
*outer_ctx
)
1212 omp_context
*ctx
= XCNEW (omp_context
);
1214 splay_tree_insert (all_contexts
, (splay_tree_key
) stmt
,
1215 (splay_tree_value
) ctx
);
1220 ctx
->outer
= outer_ctx
;
1221 ctx
->cb
= outer_ctx
->cb
;
1222 ctx
->cb
.block
= NULL
;
1223 ctx
->depth
= outer_ctx
->depth
+ 1;
1227 ctx
->cb
.src_fn
= current_function_decl
;
1228 ctx
->cb
.dst_fn
= current_function_decl
;
1229 ctx
->cb
.src_node
= cgraph_get_node (current_function_decl
);
1230 gcc_checking_assert (ctx
->cb
.src_node
);
1231 ctx
->cb
.dst_node
= ctx
->cb
.src_node
;
1232 ctx
->cb
.src_cfun
= cfun
;
1233 ctx
->cb
.copy_decl
= omp_copy_decl
;
1234 ctx
->cb
.eh_lp_nr
= 0;
1235 ctx
->cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
1239 ctx
->cb
.decl_map
= pointer_map_create ();
1244 static gimple_seq
maybe_catch_exception (gimple_seq
);
1246 /* Finalize task copyfn. */
1249 finalize_task_copyfn (gimple task_stmt
)
1251 struct function
*child_cfun
;
1253 gimple_seq seq
= NULL
, new_seq
;
1256 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
1257 if (child_fn
== NULL_TREE
)
1260 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
1262 /* Inform the callgraph about the new function. */
1263 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
1264 = cfun
->curr_properties
& ~PROP_loops
;
1266 push_cfun (child_cfun
);
1267 bind
= gimplify_body (child_fn
, false);
1268 gimple_seq_add_stmt (&seq
, bind
);
1269 new_seq
= maybe_catch_exception (seq
);
1272 bind
= gimple_build_bind (NULL
, new_seq
, NULL
);
1274 gimple_seq_add_stmt (&seq
, bind
);
1276 gimple_set_body (child_fn
, seq
);
1279 cgraph_add_new_function (child_fn
, false);
1282 /* Destroy a omp_context data structures. Called through the splay tree
1283 value delete callback. */
1286 delete_omp_context (splay_tree_value value
)
1288 omp_context
*ctx
= (omp_context
*) value
;
1290 pointer_map_destroy (ctx
->cb
.decl_map
);
1293 splay_tree_delete (ctx
->field_map
);
1294 if (ctx
->sfield_map
)
1295 splay_tree_delete (ctx
->sfield_map
);
1297 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1298 it produces corrupt debug information. */
1299 if (ctx
->record_type
)
1302 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= DECL_CHAIN (t
))
1303 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1305 if (ctx
->srecord_type
)
1308 for (t
= TYPE_FIELDS (ctx
->srecord_type
); t
; t
= DECL_CHAIN (t
))
1309 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1312 if (is_task_ctx (ctx
))
1313 finalize_task_copyfn (ctx
->stmt
);
1318 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1322 fixup_child_record_type (omp_context
*ctx
)
1324 tree f
, type
= ctx
->record_type
;
1326 /* ??? It isn't sufficient to just call remap_type here, because
1327 variably_modified_type_p doesn't work the way we expect for
1328 record types. Testing each field for whether it needs remapping
1329 and creating a new record by hand works, however. */
1330 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
1331 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
1335 tree name
, new_fields
= NULL
;
1337 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1338 name
= DECL_NAME (TYPE_NAME (ctx
->record_type
));
1339 name
= build_decl (DECL_SOURCE_LOCATION (ctx
->receiver_decl
),
1340 TYPE_DECL
, name
, type
);
1341 TYPE_NAME (type
) = name
;
1343 for (f
= TYPE_FIELDS (ctx
->record_type
); f
; f
= DECL_CHAIN (f
))
1345 tree new_f
= copy_node (f
);
1346 DECL_CONTEXT (new_f
) = type
;
1347 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &ctx
->cb
);
1348 DECL_CHAIN (new_f
) = new_fields
;
1349 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &ctx
->cb
, NULL
);
1350 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
,
1352 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
1356 /* Arrange to be able to look up the receiver field
1357 given the sender field. */
1358 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) f
,
1359 (splay_tree_value
) new_f
);
1361 TYPE_FIELDS (type
) = nreverse (new_fields
);
1365 TREE_TYPE (ctx
->receiver_decl
) = build_pointer_type (type
);
1368 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1369 specified by CLAUSES. */
1372 scan_sharing_clauses (tree clauses
, omp_context
*ctx
)
1375 bool scan_array_reductions
= false;
1377 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1381 switch (OMP_CLAUSE_CODE (c
))
1383 case OMP_CLAUSE_PRIVATE
:
1384 decl
= OMP_CLAUSE_DECL (c
);
1385 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
1387 else if (!is_variable_sized (decl
))
1388 install_var_local (decl
, ctx
);
1391 case OMP_CLAUSE_SHARED
:
1392 gcc_assert (is_taskreg_ctx (ctx
));
1393 decl
= OMP_CLAUSE_DECL (c
);
1394 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl
))
1395 || !is_variable_sized (decl
));
1396 /* Global variables don't need to be copied,
1397 the receiver side will use them directly. */
1398 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1400 by_ref
= use_pointer_for_field (decl
, ctx
);
1401 if (! TREE_READONLY (decl
)
1402 || TREE_ADDRESSABLE (decl
)
1404 || is_reference (decl
))
1406 install_var_field (decl
, by_ref
, 3, ctx
);
1407 install_var_local (decl
, ctx
);
1410 /* We don't need to copy const scalar vars back. */
1411 OMP_CLAUSE_SET_CODE (c
, OMP_CLAUSE_FIRSTPRIVATE
);
1414 case OMP_CLAUSE_LASTPRIVATE
:
1415 /* Let the corresponding firstprivate clause create
1417 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1421 case OMP_CLAUSE_FIRSTPRIVATE
:
1422 case OMP_CLAUSE_REDUCTION
:
1423 decl
= OMP_CLAUSE_DECL (c
);
1425 if (is_variable_sized (decl
))
1427 if (is_task_ctx (ctx
))
1428 install_var_field (decl
, false, 1, ctx
);
1431 else if (is_taskreg_ctx (ctx
))
1434 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
));
1435 by_ref
= use_pointer_for_field (decl
, NULL
);
1437 if (is_task_ctx (ctx
)
1438 && (global
|| by_ref
|| is_reference (decl
)))
1440 install_var_field (decl
, false, 1, ctx
);
1442 install_var_field (decl
, by_ref
, 2, ctx
);
1445 install_var_field (decl
, by_ref
, 3, ctx
);
1447 install_var_local (decl
, ctx
);
1450 case OMP_CLAUSE_COPYPRIVATE
:
1451 case OMP_CLAUSE_COPYIN
:
1452 decl
= OMP_CLAUSE_DECL (c
);
1453 by_ref
= use_pointer_for_field (decl
, NULL
);
1454 install_var_field (decl
, by_ref
, 3, ctx
);
1457 case OMP_CLAUSE_DEFAULT
:
1458 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_KIND (c
);
1461 case OMP_CLAUSE_FINAL
:
1463 case OMP_CLAUSE_NUM_THREADS
:
1464 case OMP_CLAUSE_SCHEDULE
:
1466 scan_omp_op (&OMP_CLAUSE_OPERAND (c
, 0), ctx
->outer
);
1469 case OMP_CLAUSE_NOWAIT
:
1470 case OMP_CLAUSE_ORDERED
:
1471 case OMP_CLAUSE_COLLAPSE
:
1472 case OMP_CLAUSE_UNTIED
:
1473 case OMP_CLAUSE_MERGEABLE
:
1481 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1483 switch (OMP_CLAUSE_CODE (c
))
1485 case OMP_CLAUSE_LASTPRIVATE
:
1486 /* Let the corresponding firstprivate clause create
1488 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1489 scan_array_reductions
= true;
1490 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1494 case OMP_CLAUSE_PRIVATE
:
1495 case OMP_CLAUSE_FIRSTPRIVATE
:
1496 case OMP_CLAUSE_REDUCTION
:
1497 decl
= OMP_CLAUSE_DECL (c
);
1498 if (is_variable_sized (decl
))
1499 install_var_local (decl
, ctx
);
1500 fixup_remapped_decl (decl
, ctx
,
1501 OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_PRIVATE
1502 && OMP_CLAUSE_PRIVATE_DEBUG (c
));
1503 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1504 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1505 scan_array_reductions
= true;
1508 case OMP_CLAUSE_SHARED
:
1509 decl
= OMP_CLAUSE_DECL (c
);
1510 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1511 fixup_remapped_decl (decl
, ctx
, false);
1514 case OMP_CLAUSE_COPYPRIVATE
:
1515 case OMP_CLAUSE_COPYIN
:
1516 case OMP_CLAUSE_DEFAULT
:
1518 case OMP_CLAUSE_NUM_THREADS
:
1519 case OMP_CLAUSE_SCHEDULE
:
1520 case OMP_CLAUSE_NOWAIT
:
1521 case OMP_CLAUSE_ORDERED
:
1522 case OMP_CLAUSE_COLLAPSE
:
1523 case OMP_CLAUSE_UNTIED
:
1524 case OMP_CLAUSE_FINAL
:
1525 case OMP_CLAUSE_MERGEABLE
:
1533 if (scan_array_reductions
)
1534 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1535 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1536 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1538 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
1539 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
1541 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
1542 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1543 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
1546 /* Create a new name for omp child function. Returns an identifier. */
1548 static GTY(()) unsigned int tmp_ompfn_id_num
;
1551 create_omp_child_function_name (bool task_copy
)
1553 return (clone_function_name (current_function_decl
,
1554 task_copy
? "_omp_cpyfn" : "_omp_fn"));
1557 /* Build a decl for the omp child function. It'll not contain a body
1558 yet, just the bare decl. */
1561 create_omp_child_function (omp_context
*ctx
, bool task_copy
)
1563 tree decl
, type
, name
, t
;
1565 name
= create_omp_child_function_name (task_copy
);
1567 type
= build_function_type_list (void_type_node
, ptr_type_node
,
1568 ptr_type_node
, NULL_TREE
);
1570 type
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
1572 decl
= build_decl (gimple_location (ctx
->stmt
),
1573 FUNCTION_DECL
, name
, type
);
1576 ctx
->cb
.dst_fn
= decl
;
1578 gimple_omp_task_set_copy_fn (ctx
->stmt
, decl
);
1580 TREE_STATIC (decl
) = 1;
1581 TREE_USED (decl
) = 1;
1582 DECL_ARTIFICIAL (decl
) = 1;
1583 DECL_NAMELESS (decl
) = 1;
1584 DECL_IGNORED_P (decl
) = 0;
1585 TREE_PUBLIC (decl
) = 0;
1586 DECL_UNINLINABLE (decl
) = 1;
1587 DECL_EXTERNAL (decl
) = 0;
1588 DECL_CONTEXT (decl
) = NULL_TREE
;
1589 DECL_INITIAL (decl
) = make_node (BLOCK
);
1591 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1592 RESULT_DECL
, NULL_TREE
, void_type_node
);
1593 DECL_ARTIFICIAL (t
) = 1;
1594 DECL_IGNORED_P (t
) = 1;
1595 DECL_CONTEXT (t
) = decl
;
1596 DECL_RESULT (decl
) = t
;
1598 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1599 PARM_DECL
, get_identifier (".omp_data_i"), ptr_type_node
);
1600 DECL_ARTIFICIAL (t
) = 1;
1601 DECL_NAMELESS (t
) = 1;
1602 DECL_ARG_TYPE (t
) = ptr_type_node
;
1603 DECL_CONTEXT (t
) = current_function_decl
;
1605 DECL_ARGUMENTS (decl
) = t
;
1607 ctx
->receiver_decl
= t
;
1610 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1611 PARM_DECL
, get_identifier (".omp_data_o"),
1613 DECL_ARTIFICIAL (t
) = 1;
1614 DECL_NAMELESS (t
) = 1;
1615 DECL_ARG_TYPE (t
) = ptr_type_node
;
1616 DECL_CONTEXT (t
) = current_function_decl
;
1618 TREE_ADDRESSABLE (t
) = 1;
1619 DECL_CHAIN (t
) = DECL_ARGUMENTS (decl
);
1620 DECL_ARGUMENTS (decl
) = t
;
1623 /* Allocate memory for the function structure. The call to
1624 allocate_struct_function clobbers CFUN, so we need to restore
1626 push_struct_function (decl
);
1627 cfun
->function_end_locus
= gimple_location (ctx
->stmt
);
1632 /* Scan an OpenMP parallel directive. */
1635 scan_omp_parallel (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1639 gimple stmt
= gsi_stmt (*gsi
);
1641 /* Ignore parallel directives with empty bodies, unless there
1642 are copyin clauses. */
1644 && empty_body_p (gimple_omp_body (stmt
))
1645 && find_omp_clause (gimple_omp_parallel_clauses (stmt
),
1646 OMP_CLAUSE_COPYIN
) == NULL
)
1648 gsi_replace (gsi
, gimple_build_nop (), false);
1652 ctx
= new_omp_context (stmt
, outer_ctx
);
1653 if (taskreg_nesting_level
> 1)
1654 ctx
->is_nested
= true;
1655 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1656 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1657 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1658 name
= create_tmp_var_name (".omp_data_s");
1659 name
= build_decl (gimple_location (stmt
),
1660 TYPE_DECL
, name
, ctx
->record_type
);
1661 DECL_ARTIFICIAL (name
) = 1;
1662 DECL_NAMELESS (name
) = 1;
1663 TYPE_NAME (ctx
->record_type
) = name
;
1664 create_omp_child_function (ctx
, false);
1665 gimple_omp_parallel_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
1667 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt
), ctx
);
1668 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
1670 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1671 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1674 layout_type (ctx
->record_type
);
1675 fixup_child_record_type (ctx
);
1679 /* Scan an OpenMP task directive. */
1682 scan_omp_task (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1686 gimple stmt
= gsi_stmt (*gsi
);
1687 location_t loc
= gimple_location (stmt
);
1689 /* Ignore task directives with empty bodies. */
1691 && empty_body_p (gimple_omp_body (stmt
)))
1693 gsi_replace (gsi
, gimple_build_nop (), false);
1697 ctx
= new_omp_context (stmt
, outer_ctx
);
1698 if (taskreg_nesting_level
> 1)
1699 ctx
->is_nested
= true;
1700 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1701 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1702 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1703 name
= create_tmp_var_name (".omp_data_s");
1704 name
= build_decl (gimple_location (stmt
),
1705 TYPE_DECL
, name
, ctx
->record_type
);
1706 DECL_ARTIFICIAL (name
) = 1;
1707 DECL_NAMELESS (name
) = 1;
1708 TYPE_NAME (ctx
->record_type
) = name
;
1709 create_omp_child_function (ctx
, false);
1710 gimple_omp_task_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
1712 scan_sharing_clauses (gimple_omp_task_clauses (stmt
), ctx
);
1714 if (ctx
->srecord_type
)
1716 name
= create_tmp_var_name (".omp_data_a");
1717 name
= build_decl (gimple_location (stmt
),
1718 TYPE_DECL
, name
, ctx
->srecord_type
);
1719 DECL_ARTIFICIAL (name
) = 1;
1720 DECL_NAMELESS (name
) = 1;
1721 TYPE_NAME (ctx
->srecord_type
) = name
;
1722 create_omp_child_function (ctx
, true);
1725 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
1727 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1729 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1730 t
= build_int_cst (long_integer_type_node
, 0);
1731 gimple_omp_task_set_arg_size (stmt
, t
);
1732 t
= build_int_cst (long_integer_type_node
, 1);
1733 gimple_omp_task_set_arg_align (stmt
, t
);
1737 tree
*p
, vla_fields
= NULL_TREE
, *q
= &vla_fields
;
1738 /* Move VLA fields to the end. */
1739 p
= &TYPE_FIELDS (ctx
->record_type
);
1741 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p
))
1742 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p
))))
1745 *p
= TREE_CHAIN (*p
);
1746 TREE_CHAIN (*q
) = NULL_TREE
;
1747 q
= &TREE_CHAIN (*q
);
1750 p
= &DECL_CHAIN (*p
);
1752 layout_type (ctx
->record_type
);
1753 fixup_child_record_type (ctx
);
1754 if (ctx
->srecord_type
)
1755 layout_type (ctx
->srecord_type
);
1756 t
= fold_convert_loc (loc
, long_integer_type_node
,
1757 TYPE_SIZE_UNIT (ctx
->record_type
));
1758 gimple_omp_task_set_arg_size (stmt
, t
);
1759 t
= build_int_cst (long_integer_type_node
,
1760 TYPE_ALIGN_UNIT (ctx
->record_type
));
1761 gimple_omp_task_set_arg_align (stmt
, t
);
1766 /* Scan an OpenMP loop directive. */
1769 scan_omp_for (gimple stmt
, omp_context
*outer_ctx
)
1774 ctx
= new_omp_context (stmt
, outer_ctx
);
1776 scan_sharing_clauses (gimple_omp_for_clauses (stmt
), ctx
);
1778 scan_omp (gimple_omp_for_pre_body_ptr (stmt
), ctx
);
1779 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
1781 scan_omp_op (gimple_omp_for_index_ptr (stmt
, i
), ctx
);
1782 scan_omp_op (gimple_omp_for_initial_ptr (stmt
, i
), ctx
);
1783 scan_omp_op (gimple_omp_for_final_ptr (stmt
, i
), ctx
);
1784 scan_omp_op (gimple_omp_for_incr_ptr (stmt
, i
), ctx
);
1786 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
1789 /* Scan an OpenMP sections directive. */
1792 scan_omp_sections (gimple stmt
, omp_context
*outer_ctx
)
1796 ctx
= new_omp_context (stmt
, outer_ctx
);
1797 scan_sharing_clauses (gimple_omp_sections_clauses (stmt
), ctx
);
1798 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
1801 /* Scan an OpenMP single directive. */
1804 scan_omp_single (gimple stmt
, omp_context
*outer_ctx
)
1809 ctx
= new_omp_context (stmt
, outer_ctx
);
1810 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1811 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1812 name
= create_tmp_var_name (".omp_copy_s");
1813 name
= build_decl (gimple_location (stmt
),
1814 TYPE_DECL
, name
, ctx
->record_type
);
1815 TYPE_NAME (ctx
->record_type
) = name
;
1817 scan_sharing_clauses (gimple_omp_single_clauses (stmt
), ctx
);
1818 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
1820 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1821 ctx
->record_type
= NULL
;
1823 layout_type (ctx
->record_type
);
1827 /* Check OpenMP nesting restrictions. */
1829 check_omp_nesting_restrictions (gimple stmt
, omp_context
*ctx
)
1831 switch (gimple_code (stmt
))
1833 case GIMPLE_OMP_FOR
:
1834 case GIMPLE_OMP_SECTIONS
:
1835 case GIMPLE_OMP_SINGLE
:
1837 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1838 switch (gimple_code (ctx
->stmt
))
1840 case GIMPLE_OMP_FOR
:
1841 case GIMPLE_OMP_SECTIONS
:
1842 case GIMPLE_OMP_SINGLE
:
1843 case GIMPLE_OMP_ORDERED
:
1844 case GIMPLE_OMP_MASTER
:
1845 case GIMPLE_OMP_TASK
:
1846 if (is_gimple_call (stmt
))
1848 error_at (gimple_location (stmt
),
1849 "barrier region may not be closely nested inside "
1850 "of work-sharing, critical, ordered, master or "
1851 "explicit task region");
1854 error_at (gimple_location (stmt
),
1855 "work-sharing region may not be closely nested inside "
1856 "of work-sharing, critical, ordered, master or explicit "
1859 case GIMPLE_OMP_PARALLEL
:
1865 case GIMPLE_OMP_MASTER
:
1866 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1867 switch (gimple_code (ctx
->stmt
))
1869 case GIMPLE_OMP_FOR
:
1870 case GIMPLE_OMP_SECTIONS
:
1871 case GIMPLE_OMP_SINGLE
:
1872 case GIMPLE_OMP_TASK
:
1873 error_at (gimple_location (stmt
),
1874 "master region may not be closely nested inside "
1875 "of work-sharing or explicit task region");
1877 case GIMPLE_OMP_PARALLEL
:
1883 case GIMPLE_OMP_ORDERED
:
1884 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1885 switch (gimple_code (ctx
->stmt
))
1887 case GIMPLE_OMP_CRITICAL
:
1888 case GIMPLE_OMP_TASK
:
1889 error_at (gimple_location (stmt
),
1890 "ordered region may not be closely nested inside "
1891 "of critical or explicit task region");
1893 case GIMPLE_OMP_FOR
:
1894 if (find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
1895 OMP_CLAUSE_ORDERED
) == NULL
)
1897 error_at (gimple_location (stmt
),
1898 "ordered region must be closely nested inside "
1899 "a loop region with an ordered clause");
1903 case GIMPLE_OMP_PARALLEL
:
1909 case GIMPLE_OMP_CRITICAL
:
1910 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1911 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_CRITICAL
1912 && (gimple_omp_critical_name (stmt
)
1913 == gimple_omp_critical_name (ctx
->stmt
)))
1915 error_at (gimple_location (stmt
),
1916 "critical region may not be nested inside a critical "
1917 "region with the same name");
1928 /* Helper function scan_omp.
1930 Callback for walk_tree or operators in walk_gimple_stmt used to
1931 scan for OpenMP directives in TP. */
1934 scan_omp_1_op (tree
*tp
, int *walk_subtrees
, void *data
)
1936 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
1937 omp_context
*ctx
= (omp_context
*) wi
->info
;
1940 switch (TREE_CODE (t
))
1947 *tp
= remap_decl (t
, &ctx
->cb
);
1951 if (ctx
&& TYPE_P (t
))
1952 *tp
= remap_type (t
, &ctx
->cb
);
1953 else if (!DECL_P (t
))
1958 tree tem
= remap_type (TREE_TYPE (t
), &ctx
->cb
);
1959 if (tem
!= TREE_TYPE (t
))
1961 if (TREE_CODE (t
) == INTEGER_CST
)
1962 *tp
= build_int_cst_wide (tem
,
1963 TREE_INT_CST_LOW (t
),
1964 TREE_INT_CST_HIGH (t
));
1966 TREE_TYPE (t
) = tem
;
1977 /* Helper function for scan_omp.
1979 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1980 the current statement in GSI. */
1983 scan_omp_1_stmt (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
1984 struct walk_stmt_info
*wi
)
1986 gimple stmt
= gsi_stmt (*gsi
);
1987 omp_context
*ctx
= (omp_context
*) wi
->info
;
1989 if (gimple_has_location (stmt
))
1990 input_location
= gimple_location (stmt
);
1992 /* Check the OpenMP nesting restrictions. */
1995 bool remove
= false;
1996 if (is_gimple_omp (stmt
))
1997 remove
= !check_omp_nesting_restrictions (stmt
, ctx
);
1998 else if (is_gimple_call (stmt
))
2000 tree fndecl
= gimple_call_fndecl (stmt
);
2001 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
2002 && DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_GOMP_BARRIER
)
2003 remove
= !check_omp_nesting_restrictions (stmt
, ctx
);
2007 stmt
= gimple_build_nop ();
2008 gsi_replace (gsi
, stmt
, false);
2012 *handled_ops_p
= true;
2014 switch (gimple_code (stmt
))
2016 case GIMPLE_OMP_PARALLEL
:
2017 taskreg_nesting_level
++;
2018 scan_omp_parallel (gsi
, ctx
);
2019 taskreg_nesting_level
--;
2022 case GIMPLE_OMP_TASK
:
2023 taskreg_nesting_level
++;
2024 scan_omp_task (gsi
, ctx
);
2025 taskreg_nesting_level
--;
2028 case GIMPLE_OMP_FOR
:
2029 scan_omp_for (stmt
, ctx
);
2032 case GIMPLE_OMP_SECTIONS
:
2033 scan_omp_sections (stmt
, ctx
);
2036 case GIMPLE_OMP_SINGLE
:
2037 scan_omp_single (stmt
, ctx
);
2040 case GIMPLE_OMP_SECTION
:
2041 case GIMPLE_OMP_MASTER
:
2042 case GIMPLE_OMP_ORDERED
:
2043 case GIMPLE_OMP_CRITICAL
:
2044 ctx
= new_omp_context (stmt
, ctx
);
2045 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2052 *handled_ops_p
= false;
2054 for (var
= gimple_bind_vars (stmt
); var
; var
= DECL_CHAIN (var
))
2055 insert_decl_map (&ctx
->cb
, var
, var
);
2059 *handled_ops_p
= false;
2067 /* Scan all the statements starting at the current statement. CTX
2068 contains context information about the OpenMP directives and
2069 clauses found during the scan. */
2072 scan_omp (gimple_seq
*body_p
, omp_context
*ctx
)
2074 location_t saved_location
;
2075 struct walk_stmt_info wi
;
2077 memset (&wi
, 0, sizeof (wi
));
2079 wi
.want_locations
= true;
2081 saved_location
= input_location
;
2082 walk_gimple_seq_mod (body_p
, scan_omp_1_stmt
, scan_omp_1_op
, &wi
);
2083 input_location
= saved_location
;
2086 /* Re-gimplification and code generation routines. */
2088 /* Build a call to GOMP_barrier. */
2091 build_omp_barrier (void)
2093 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER
), 0);
2096 /* If a context was created for STMT when it was scanned, return it. */
2098 static omp_context
*
2099 maybe_lookup_ctx (gimple stmt
)
2102 n
= splay_tree_lookup (all_contexts
, (splay_tree_key
) stmt
);
2103 return n
? (omp_context
*) n
->value
: NULL
;
2107 /* Find the mapping for DECL in CTX or the immediately enclosing
2108 context that has a mapping for DECL.
2110 If CTX is a nested parallel directive, we may have to use the decl
2111 mappings created in CTX's parent context. Suppose that we have the
2112 following parallel nesting (variable UIDs showed for clarity):
2115 #omp parallel shared(iD.1562) -> outer parallel
2116 iD.1562 = iD.1562 + 1;
2118 #omp parallel shared (iD.1562) -> inner parallel
2119 iD.1562 = iD.1562 - 1;
2121 Each parallel structure will create a distinct .omp_data_s structure
2122 for copying iD.1562 in/out of the directive:
2124 outer parallel .omp_data_s.1.i -> iD.1562
2125 inner parallel .omp_data_s.2.i -> iD.1562
2127 A shared variable mapping will produce a copy-out operation before
2128 the parallel directive and a copy-in operation after it. So, in
2129 this case we would have:
2132 .omp_data_o.1.i = iD.1562;
2133 #omp parallel shared(iD.1562) -> outer parallel
2134 .omp_data_i.1 = &.omp_data_o.1
2135 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2137 .omp_data_o.2.i = iD.1562; -> **
2138 #omp parallel shared(iD.1562) -> inner parallel
2139 .omp_data_i.2 = &.omp_data_o.2
2140 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2143 ** This is a problem. The symbol iD.1562 cannot be referenced
2144 inside the body of the outer parallel region. But since we are
2145 emitting this copy operation while expanding the inner parallel
2146 directive, we need to access the CTX structure of the outer
2147 parallel directive to get the correct mapping:
2149 .omp_data_o.2.i = .omp_data_i.1->i
2151 Since there may be other workshare or parallel directives enclosing
2152 the parallel directive, it may be necessary to walk up the context
2153 parent chain. This is not a problem in general because nested
2154 parallelism happens only rarely. */
2157 lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2162 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2163 t
= maybe_lookup_decl (decl
, up
);
2165 gcc_assert (!ctx
->is_nested
|| t
|| is_global_var (decl
));
2167 return t
? t
: decl
;
2171 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2172 in outer contexts. */
2175 maybe_lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2180 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2181 t
= maybe_lookup_decl (decl
, up
);
2183 return t
? t
: decl
;
2187 /* Construct the initialization value for reduction CLAUSE. */
2190 omp_reduction_init (tree clause
, tree type
)
2192 location_t loc
= OMP_CLAUSE_LOCATION (clause
);
2193 switch (OMP_CLAUSE_REDUCTION_CODE (clause
))
2200 case TRUTH_ORIF_EXPR
:
2201 case TRUTH_XOR_EXPR
:
2203 return build_zero_cst (type
);
2206 case TRUTH_AND_EXPR
:
2207 case TRUTH_ANDIF_EXPR
:
2209 return fold_convert_loc (loc
, type
, integer_one_node
);
2212 return fold_convert_loc (loc
, type
, integer_minus_one_node
);
2215 if (SCALAR_FLOAT_TYPE_P (type
))
2217 REAL_VALUE_TYPE max
, min
;
2218 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2221 real_arithmetic (&min
, NEGATE_EXPR
, &max
, NULL
);
2224 real_maxval (&min
, 1, TYPE_MODE (type
));
2225 return build_real (type
, min
);
2229 gcc_assert (INTEGRAL_TYPE_P (type
));
2230 return TYPE_MIN_VALUE (type
);
2234 if (SCALAR_FLOAT_TYPE_P (type
))
2236 REAL_VALUE_TYPE max
;
2237 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2240 real_maxval (&max
, 0, TYPE_MODE (type
));
2241 return build_real (type
, max
);
2245 gcc_assert (INTEGRAL_TYPE_P (type
));
2246 return TYPE_MAX_VALUE (type
);
2254 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2255 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2256 private variables. Initialization statements go in ILIST, while calls
2257 to destructors go in DLIST. */
2260 lower_rec_input_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*dlist
,
2263 tree c
, dtor
, copyin_seq
, x
, ptr
;
2264 bool copyin_by_ref
= false;
2265 bool lastprivate_firstprivate
= false;
2270 /* Do all the fixed sized types in the first pass, and the variable sized
2271 types in the second pass. This makes sure that the scalar arguments to
2272 the variable sized types are processed before we use them in the
2273 variable sized operations. */
2274 for (pass
= 0; pass
< 2; ++pass
)
2276 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2278 enum omp_clause_code c_kind
= OMP_CLAUSE_CODE (c
);
2281 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2285 case OMP_CLAUSE_PRIVATE
:
2286 if (OMP_CLAUSE_PRIVATE_DEBUG (c
))
2289 case OMP_CLAUSE_SHARED
:
2290 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c
), ctx
) == NULL
)
2292 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c
)));
2295 case OMP_CLAUSE_FIRSTPRIVATE
:
2296 case OMP_CLAUSE_COPYIN
:
2297 case OMP_CLAUSE_REDUCTION
:
2299 case OMP_CLAUSE_LASTPRIVATE
:
2300 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2302 lastprivate_firstprivate
= true;
2311 new_var
= var
= OMP_CLAUSE_DECL (c
);
2312 if (c_kind
!= OMP_CLAUSE_COPYIN
)
2313 new_var
= lookup_decl (var
, ctx
);
2315 if (c_kind
== OMP_CLAUSE_SHARED
|| c_kind
== OMP_CLAUSE_COPYIN
)
2320 else if (is_variable_sized (var
))
2322 /* For variable sized types, we need to allocate the
2323 actual storage here. Call alloca and store the
2324 result in the pointer decl that we created elsewhere. */
2328 if (c_kind
!= OMP_CLAUSE_FIRSTPRIVATE
|| !is_task_ctx (ctx
))
2333 ptr
= DECL_VALUE_EXPR (new_var
);
2334 gcc_assert (TREE_CODE (ptr
) == INDIRECT_REF
);
2335 ptr
= TREE_OPERAND (ptr
, 0);
2336 gcc_assert (DECL_P (ptr
));
2337 x
= TYPE_SIZE_UNIT (TREE_TYPE (new_var
));
2339 /* void *tmp = __builtin_alloca */
2340 atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA
);
2341 stmt
= gimple_build_call (atmp
, 1, x
);
2342 tmp
= create_tmp_var_raw (ptr_type_node
, NULL
);
2343 gimple_add_tmp_var (tmp
);
2344 gimple_call_set_lhs (stmt
, tmp
);
2346 gimple_seq_add_stmt (ilist
, stmt
);
2348 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ptr
), tmp
);
2349 gimplify_assign (ptr
, x
, ilist
);
2352 else if (is_reference (var
))
2354 /* For references that are being privatized for Fortran,
2355 allocate new backing storage for the new pointer
2356 variable. This allows us to avoid changing all the
2357 code that expects a pointer to something that expects
2358 a direct variable. Note that this doesn't apply to
2359 C++, since reference types are disallowed in data
2360 sharing clauses there, except for NRV optimized
2365 x
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var
)));
2366 if (c_kind
== OMP_CLAUSE_FIRSTPRIVATE
&& is_task_ctx (ctx
))
2368 x
= build_receiver_ref (var
, false, ctx
);
2369 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2371 else if (TREE_CONSTANT (x
))
2373 const char *name
= NULL
;
2374 if (DECL_NAME (var
))
2375 name
= IDENTIFIER_POINTER (DECL_NAME (new_var
));
2377 x
= create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var
)),
2379 gimple_add_tmp_var (x
);
2380 TREE_ADDRESSABLE (x
) = 1;
2381 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2385 tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA
);
2386 x
= build_call_expr_loc (clause_loc
, atmp
, 1, x
);
2389 x
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), x
);
2390 gimplify_assign (new_var
, x
, ilist
);
2392 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2394 else if (c_kind
== OMP_CLAUSE_REDUCTION
2395 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2403 switch (OMP_CLAUSE_CODE (c
))
2405 case OMP_CLAUSE_SHARED
:
2406 /* Shared global vars are just accessed directly. */
2407 if (is_global_var (new_var
))
2409 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2410 needs to be delayed until after fixup_child_record_type so
2411 that we get the correct type during the dereference. */
2412 by_ref
= use_pointer_for_field (var
, ctx
);
2413 x
= build_receiver_ref (var
, by_ref
, ctx
);
2414 SET_DECL_VALUE_EXPR (new_var
, x
);
2415 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2417 /* ??? If VAR is not passed by reference, and the variable
2418 hasn't been initialized yet, then we'll get a warning for
2419 the store into the omp_data_s structure. Ideally, we'd be
2420 able to notice this and not store anything at all, but
2421 we're generating code too early. Suppress the warning. */
2423 TREE_NO_WARNING (var
) = 1;
2426 case OMP_CLAUSE_LASTPRIVATE
:
2427 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2431 case OMP_CLAUSE_PRIVATE
:
2432 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_PRIVATE
)
2433 x
= build_outer_var_ref (var
, ctx
);
2434 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
2436 if (is_task_ctx (ctx
))
2437 x
= build_receiver_ref (var
, false, ctx
);
2439 x
= build_outer_var_ref (var
, ctx
);
2443 x
= lang_hooks
.decls
.omp_clause_default_ctor (c
, new_var
, x
);
2445 gimplify_and_add (x
, ilist
);
2449 x
= lang_hooks
.decls
.omp_clause_dtor (c
, new_var
);
2452 gimple_seq tseq
= NULL
;
2455 gimplify_stmt (&dtor
, &tseq
);
2456 gimple_seq_add_seq (dlist
, tseq
);
2460 case OMP_CLAUSE_FIRSTPRIVATE
:
2461 if (is_task_ctx (ctx
))
2463 if (is_reference (var
) || is_variable_sized (var
))
2465 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
,
2467 || use_pointer_for_field (var
, NULL
))
2469 x
= build_receiver_ref (var
, false, ctx
);
2470 SET_DECL_VALUE_EXPR (new_var
, x
);
2471 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2475 x
= build_outer_var_ref (var
, ctx
);
2476 x
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, new_var
, x
);
2477 gimplify_and_add (x
, ilist
);
2481 case OMP_CLAUSE_COPYIN
:
2482 by_ref
= use_pointer_for_field (var
, NULL
);
2483 x
= build_receiver_ref (var
, by_ref
, ctx
);
2484 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, x
);
2485 append_to_statement_list (x
, ©in_seq
);
2486 copyin_by_ref
|= by_ref
;
2489 case OMP_CLAUSE_REDUCTION
:
2490 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2492 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
2493 x
= build_outer_var_ref (var
, ctx
);
2495 if (is_reference (var
))
2496 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2497 SET_DECL_VALUE_EXPR (placeholder
, x
);
2498 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
2499 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
2500 gimple_seq_add_seq (ilist
,
2501 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
));
2502 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
) = NULL
;
2503 DECL_HAS_VALUE_EXPR_P (placeholder
) = 0;
2507 x
= omp_reduction_init (c
, TREE_TYPE (new_var
));
2508 gcc_assert (TREE_CODE (TREE_TYPE (new_var
)) != ARRAY_TYPE
);
2509 gimplify_assign (new_var
, x
, ilist
);
2519 /* The copyin sequence is not to be executed by the main thread, since
2520 that would result in self-copies. Perhaps not visible to scalars,
2521 but it certainly is to C++ operator=. */
2524 x
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
),
2526 x
= build2 (NE_EXPR
, boolean_type_node
, x
,
2527 build_int_cst (TREE_TYPE (x
), 0));
2528 x
= build3 (COND_EXPR
, void_type_node
, x
, copyin_seq
, NULL
);
2529 gimplify_and_add (x
, ilist
);
2532 /* If any copyin variable is passed by reference, we must ensure the
2533 master thread doesn't modify it before it is copied over in all
2534 threads. Similarly for variables in both firstprivate and
2535 lastprivate clauses we need to ensure the lastprivate copying
2536 happens after firstprivate copying in all threads. */
2537 if (copyin_by_ref
|| lastprivate_firstprivate
)
2538 gimplify_and_add (build_omp_barrier (), ilist
);
2542 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2543 both parallel and workshare constructs. PREDICATE may be NULL if it's
2547 lower_lastprivate_clauses (tree clauses
, tree predicate
, gimple_seq
*stmt_list
,
2550 tree x
, c
, label
= NULL
;
2551 bool par_clauses
= false;
2553 /* Early exit if there are no lastprivate clauses. */
2554 clauses
= find_omp_clause (clauses
, OMP_CLAUSE_LASTPRIVATE
);
2555 if (clauses
== NULL
)
2557 /* If this was a workshare clause, see if it had been combined
2558 with its parallel. In that case, look for the clauses on the
2559 parallel statement itself. */
2560 if (is_parallel_ctx (ctx
))
2564 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
2567 clauses
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2568 OMP_CLAUSE_LASTPRIVATE
);
2569 if (clauses
== NULL
)
2577 tree label_true
, arm1
, arm2
;
2579 label
= create_artificial_label (UNKNOWN_LOCATION
);
2580 label_true
= create_artificial_label (UNKNOWN_LOCATION
);
2581 arm1
= TREE_OPERAND (predicate
, 0);
2582 arm2
= TREE_OPERAND (predicate
, 1);
2583 gimplify_expr (&arm1
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
2584 gimplify_expr (&arm2
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
2585 stmt
= gimple_build_cond (TREE_CODE (predicate
), arm1
, arm2
,
2587 gimple_seq_add_stmt (stmt_list
, stmt
);
2588 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label_true
));
2591 for (c
= clauses
; c
;)
2594 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2596 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
)
2598 var
= OMP_CLAUSE_DECL (c
);
2599 new_var
= lookup_decl (var
, ctx
);
2601 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
2603 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
2604 gimple_seq_add_seq (stmt_list
,
2605 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
));
2607 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
) = NULL
;
2609 x
= build_outer_var_ref (var
, ctx
);
2610 if (is_reference (var
))
2611 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2612 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, x
, new_var
);
2613 gimplify_and_add (x
, stmt_list
);
2615 c
= OMP_CLAUSE_CHAIN (c
);
2616 if (c
== NULL
&& !par_clauses
)
2618 /* If this was a workshare clause, see if it had been combined
2619 with its parallel. In that case, continue looking for the
2620 clauses also on the parallel statement itself. */
2621 if (is_parallel_ctx (ctx
))
2625 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
2628 c
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2629 OMP_CLAUSE_LASTPRIVATE
);
2635 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label
));
2639 /* Generate code to implement the REDUCTION clauses. */
2642 lower_reduction_clauses (tree clauses
, gimple_seq
*stmt_seqp
, omp_context
*ctx
)
2644 gimple_seq sub_seq
= NULL
;
2649 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2650 update in that case, otherwise use a lock. */
2651 for (c
= clauses
; c
&& count
< 2; c
= OMP_CLAUSE_CHAIN (c
))
2652 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
)
2654 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2656 /* Never use OMP_ATOMIC for array reductions. */
2666 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2668 tree var
, ref
, new_var
;
2669 enum tree_code code
;
2670 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2672 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_REDUCTION
)
2675 var
= OMP_CLAUSE_DECL (c
);
2676 new_var
= lookup_decl (var
, ctx
);
2677 if (is_reference (var
))
2678 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2679 ref
= build_outer_var_ref (var
, ctx
);
2680 code
= OMP_CLAUSE_REDUCTION_CODE (c
);
2682 /* reduction(-:var) sums up the partial results, so it acts
2683 identically to reduction(+:var). */
2684 if (code
== MINUS_EXPR
)
2689 tree addr
= build_fold_addr_expr_loc (clause_loc
, ref
);
2691 addr
= save_expr (addr
);
2692 ref
= build1 (INDIRECT_REF
, TREE_TYPE (TREE_TYPE (addr
)), addr
);
2693 x
= fold_build2_loc (clause_loc
, code
, TREE_TYPE (ref
), ref
, new_var
);
2694 x
= build2 (OMP_ATOMIC
, void_type_node
, addr
, x
);
2695 gimplify_and_add (x
, stmt_seqp
);
2699 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2701 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
2703 if (is_reference (var
))
2704 ref
= build_fold_addr_expr_loc (clause_loc
, ref
);
2705 SET_DECL_VALUE_EXPR (placeholder
, ref
);
2706 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
2707 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
2708 gimple_seq_add_seq (&sub_seq
, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
));
2709 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
) = NULL
;
2710 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
) = NULL
;
2714 x
= build2 (code
, TREE_TYPE (ref
), ref
, new_var
);
2715 ref
= build_outer_var_ref (var
, ctx
);
2716 gimplify_assign (ref
, x
, &sub_seq
);
2720 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START
),
2722 gimple_seq_add_stmt (stmt_seqp
, stmt
);
2724 gimple_seq_add_seq (stmt_seqp
, sub_seq
);
2726 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END
),
2728 gimple_seq_add_stmt (stmt_seqp
, stmt
);
2732 /* Generate code to implement the COPYPRIVATE clauses. */
2735 lower_copyprivate_clauses (tree clauses
, gimple_seq
*slist
, gimple_seq
*rlist
,
2740 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2742 tree var
, new_var
, ref
, x
;
2744 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2746 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYPRIVATE
)
2749 var
= OMP_CLAUSE_DECL (c
);
2750 by_ref
= use_pointer_for_field (var
, NULL
);
2752 ref
= build_sender_ref (var
, ctx
);
2753 x
= new_var
= lookup_decl_in_outer_ctx (var
, ctx
);
2756 x
= build_fold_addr_expr_loc (clause_loc
, new_var
);
2757 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ref
), x
);
2759 gimplify_assign (ref
, x
, slist
);
2761 ref
= build_receiver_ref (var
, false, ctx
);
2764 ref
= fold_convert_loc (clause_loc
,
2765 build_pointer_type (TREE_TYPE (new_var
)),
2767 ref
= build_fold_indirect_ref_loc (clause_loc
, ref
);
2769 if (is_reference (var
))
2771 ref
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), ref
);
2772 ref
= build_simple_mem_ref_loc (clause_loc
, ref
);
2773 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2775 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, ref
);
2776 gimplify_and_add (x
, rlist
);
2781 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2782 and REDUCTION from the sender (aka parent) side. */
2785 lower_send_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*olist
,
2790 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2792 tree val
, ref
, x
, var
;
2793 bool by_ref
, do_in
= false, do_out
= false;
2794 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2796 switch (OMP_CLAUSE_CODE (c
))
2798 case OMP_CLAUSE_PRIVATE
:
2799 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
2802 case OMP_CLAUSE_FIRSTPRIVATE
:
2803 case OMP_CLAUSE_COPYIN
:
2804 case OMP_CLAUSE_LASTPRIVATE
:
2805 case OMP_CLAUSE_REDUCTION
:
2811 val
= OMP_CLAUSE_DECL (c
);
2812 var
= lookup_decl_in_outer_ctx (val
, ctx
);
2814 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYIN
2815 && is_global_var (var
))
2817 if (is_variable_sized (val
))
2819 by_ref
= use_pointer_for_field (val
, NULL
);
2821 switch (OMP_CLAUSE_CODE (c
))
2823 case OMP_CLAUSE_PRIVATE
:
2824 case OMP_CLAUSE_FIRSTPRIVATE
:
2825 case OMP_CLAUSE_COPYIN
:
2829 case OMP_CLAUSE_LASTPRIVATE
:
2830 if (by_ref
|| is_reference (val
))
2832 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2839 if (lang_hooks
.decls
.omp_private_outer_ref (val
))
2844 case OMP_CLAUSE_REDUCTION
:
2846 do_out
= !(by_ref
|| is_reference (val
));
2855 ref
= build_sender_ref (val
, ctx
);
2856 x
= by_ref
? build_fold_addr_expr_loc (clause_loc
, var
) : var
;
2857 gimplify_assign (ref
, x
, ilist
);
2858 if (is_task_ctx (ctx
))
2859 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref
, 1)) = NULL
;
2864 ref
= build_sender_ref (val
, ctx
);
2865 gimplify_assign (var
, ref
, olist
);
2870 /* Generate code to implement SHARED from the sender (aka parent)
2871 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2872 list things that got automatically shared. */
2875 lower_send_shared_vars (gimple_seq
*ilist
, gimple_seq
*olist
, omp_context
*ctx
)
2877 tree var
, ovar
, nvar
, f
, x
, record_type
;
2879 if (ctx
->record_type
== NULL
)
2882 record_type
= ctx
->srecord_type
? ctx
->srecord_type
: ctx
->record_type
;
2883 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
2885 ovar
= DECL_ABSTRACT_ORIGIN (f
);
2886 nvar
= maybe_lookup_decl (ovar
, ctx
);
2887 if (!nvar
|| !DECL_HAS_VALUE_EXPR_P (nvar
))
2890 /* If CTX is a nested parallel directive. Find the immediately
2891 enclosing parallel or workshare construct that contains a
2892 mapping for OVAR. */
2893 var
= lookup_decl_in_outer_ctx (ovar
, ctx
);
2895 if (use_pointer_for_field (ovar
, ctx
))
2897 x
= build_sender_ref (ovar
, ctx
);
2898 var
= build_fold_addr_expr (var
);
2899 gimplify_assign (x
, var
, ilist
);
2903 x
= build_sender_ref (ovar
, ctx
);
2904 gimplify_assign (x
, var
, ilist
);
2906 if (!TREE_READONLY (var
)
2907 /* We don't need to receive a new reference to a result
2908 or parm decl. In fact we may not store to it as we will
2909 invalidate any pending RSO and generate wrong gimple
2911 && !((TREE_CODE (var
) == RESULT_DECL
2912 || TREE_CODE (var
) == PARM_DECL
)
2913 && DECL_BY_REFERENCE (var
)))
2915 x
= build_sender_ref (ovar
, ctx
);
2916 gimplify_assign (var
, x
, olist
);
2923 /* A convenience function to build an empty GIMPLE_COND with just the
2927 gimple_build_cond_empty (tree cond
)
2929 enum tree_code pred_code
;
2932 gimple_cond_get_ops_from_tree (cond
, &pred_code
, &lhs
, &rhs
);
2933 return gimple_build_cond (pred_code
, lhs
, rhs
, NULL_TREE
, NULL_TREE
);
2937 /* Build the function calls to GOMP_parallel_start etc to actually
2938 generate the parallel operation. REGION is the parallel region
2939 being expanded. BB is the block where to insert the code. WS_ARGS
2940 will be set if this is a call to a combined parallel+workshare
2941 construct, it contains the list of additional arguments needed by
2942 the workshare construct. */
2945 expand_parallel_call (struct omp_region
*region
, basic_block bb
,
2946 gimple entry_stmt
, vec
<tree
, va_gc
> *ws_args
)
2948 tree t
, t1
, t2
, val
, cond
, c
, clauses
;
2949 gimple_stmt_iterator gsi
;
2951 enum built_in_function start_ix
;
2953 location_t clause_loc
;
2954 vec
<tree
, va_gc
> *args
;
2956 clauses
= gimple_omp_parallel_clauses (entry_stmt
);
2958 /* Determine what flavor of GOMP_parallel_start we will be
2960 start_ix
= BUILT_IN_GOMP_PARALLEL_START
;
2961 if (is_combined_parallel (region
))
2963 switch (region
->inner
->type
)
2965 case GIMPLE_OMP_FOR
:
2966 gcc_assert (region
->inner
->sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
2967 start_ix2
= ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2968 + (region
->inner
->sched_kind
2969 == OMP_CLAUSE_SCHEDULE_RUNTIME
2970 ? 3 : region
->inner
->sched_kind
));
2971 start_ix
= (enum built_in_function
)start_ix2
;
2973 case GIMPLE_OMP_SECTIONS
:
2974 start_ix
= BUILT_IN_GOMP_PARALLEL_SECTIONS_START
;
2981 /* By default, the value of NUM_THREADS is zero (selected at run time)
2982 and there is no conditional. */
2984 val
= build_int_cst (unsigned_type_node
, 0);
2986 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
2988 cond
= OMP_CLAUSE_IF_EXPR (c
);
2990 c
= find_omp_clause (clauses
, OMP_CLAUSE_NUM_THREADS
);
2993 val
= OMP_CLAUSE_NUM_THREADS_EXPR (c
);
2994 clause_loc
= OMP_CLAUSE_LOCATION (c
);
2997 clause_loc
= gimple_location (entry_stmt
);
2999 /* Ensure 'val' is of the correct type. */
3000 val
= fold_convert_loc (clause_loc
, unsigned_type_node
, val
);
3002 /* If we found the clause 'if (cond)', build either
3003 (cond != 0) or (cond ? val : 1u). */
3006 gimple_stmt_iterator gsi
;
3008 cond
= gimple_boolify (cond
);
3010 if (integer_zerop (val
))
3011 val
= fold_build2_loc (clause_loc
,
3012 EQ_EXPR
, unsigned_type_node
, cond
,
3013 build_int_cst (TREE_TYPE (cond
), 0));
3016 basic_block cond_bb
, then_bb
, else_bb
;
3017 edge e
, e_then
, e_else
;
3018 tree tmp_then
, tmp_else
, tmp_join
, tmp_var
;
3020 tmp_var
= create_tmp_var (TREE_TYPE (val
), NULL
);
3021 if (gimple_in_ssa_p (cfun
))
3023 tmp_then
= make_ssa_name (tmp_var
, NULL
);
3024 tmp_else
= make_ssa_name (tmp_var
, NULL
);
3025 tmp_join
= make_ssa_name (tmp_var
, NULL
);
3034 e
= split_block (bb
, NULL
);
3039 then_bb
= create_empty_bb (cond_bb
);
3040 else_bb
= create_empty_bb (then_bb
);
3041 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
3042 set_immediate_dominator (CDI_DOMINATORS
, else_bb
, cond_bb
);
3044 stmt
= gimple_build_cond_empty (cond
);
3045 gsi
= gsi_start_bb (cond_bb
);
3046 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3048 gsi
= gsi_start_bb (then_bb
);
3049 stmt
= gimple_build_assign (tmp_then
, val
);
3050 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3052 gsi
= gsi_start_bb (else_bb
);
3053 stmt
= gimple_build_assign
3054 (tmp_else
, build_int_cst (unsigned_type_node
, 1));
3055 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3057 make_edge (cond_bb
, then_bb
, EDGE_TRUE_VALUE
);
3058 make_edge (cond_bb
, else_bb
, EDGE_FALSE_VALUE
);
3061 add_bb_to_loop (then_bb
, cond_bb
->loop_father
);
3062 add_bb_to_loop (else_bb
, cond_bb
->loop_father
);
3064 e_then
= make_edge (then_bb
, bb
, EDGE_FALLTHRU
);
3065 e_else
= make_edge (else_bb
, bb
, EDGE_FALLTHRU
);
3067 if (gimple_in_ssa_p (cfun
))
3069 gimple phi
= create_phi_node (tmp_join
, bb
);
3070 add_phi_arg (phi
, tmp_then
, e_then
, UNKNOWN_LOCATION
);
3071 add_phi_arg (phi
, tmp_else
, e_else
, UNKNOWN_LOCATION
);
3077 gsi
= gsi_start_bb (bb
);
3078 val
= force_gimple_operand_gsi (&gsi
, val
, true, NULL_TREE
,
3079 false, GSI_CONTINUE_LINKING
);
3082 gsi
= gsi_last_bb (bb
);
3083 t
= gimple_omp_parallel_data_arg (entry_stmt
);
3085 t1
= null_pointer_node
;
3087 t1
= build_fold_addr_expr (t
);
3088 t2
= build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt
));
3090 vec_alloc (args
, 3 + vec_safe_length (ws_args
));
3091 args
->quick_push (t2
);
3092 args
->quick_push (t1
);
3093 args
->quick_push (val
);
3095 args
->splice (*ws_args
);
3097 t
= build_call_expr_loc_vec (UNKNOWN_LOCATION
,
3098 builtin_decl_explicit (start_ix
), args
);
3100 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3101 false, GSI_CONTINUE_LINKING
);
3103 t
= gimple_omp_parallel_data_arg (entry_stmt
);
3105 t
= null_pointer_node
;
3107 t
= build_fold_addr_expr (t
);
3108 t
= build_call_expr_loc (gimple_location (entry_stmt
),
3109 gimple_omp_parallel_child_fn (entry_stmt
), 1, t
);
3110 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3111 false, GSI_CONTINUE_LINKING
);
3113 t
= build_call_expr_loc (gimple_location (entry_stmt
),
3114 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END
),
3116 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3117 false, GSI_CONTINUE_LINKING
);
3121 /* Build the function call to GOMP_task to actually
3122 generate the task operation. BB is the block where to insert the code. */
3125 expand_task_call (basic_block bb
, gimple entry_stmt
)
3127 tree t
, t1
, t2
, t3
, flags
, cond
, c
, c2
, clauses
;
3128 gimple_stmt_iterator gsi
;
3129 location_t loc
= gimple_location (entry_stmt
);
3131 clauses
= gimple_omp_task_clauses (entry_stmt
);
3133 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
3135 cond
= gimple_boolify (OMP_CLAUSE_IF_EXPR (c
));
3137 cond
= boolean_true_node
;
3139 c
= find_omp_clause (clauses
, OMP_CLAUSE_UNTIED
);
3140 c2
= find_omp_clause (clauses
, OMP_CLAUSE_MERGEABLE
);
3141 flags
= build_int_cst (unsigned_type_node
,
3142 (c
? 1 : 0) + (c2
? 4 : 0));
3144 c
= find_omp_clause (clauses
, OMP_CLAUSE_FINAL
);
3147 c
= gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c
));
3148 c
= fold_build3_loc (loc
, COND_EXPR
, unsigned_type_node
, c
,
3149 build_int_cst (unsigned_type_node
, 2),
3150 build_int_cst (unsigned_type_node
, 0));
3151 flags
= fold_build2_loc (loc
, PLUS_EXPR
, unsigned_type_node
, flags
, c
);
3154 gsi
= gsi_last_bb (bb
);
3155 t
= gimple_omp_task_data_arg (entry_stmt
);
3157 t2
= null_pointer_node
;
3159 t2
= build_fold_addr_expr_loc (loc
, t
);
3160 t1
= build_fold_addr_expr_loc (loc
, gimple_omp_task_child_fn (entry_stmt
));
3161 t
= gimple_omp_task_copy_fn (entry_stmt
);
3163 t3
= null_pointer_node
;
3165 t3
= build_fold_addr_expr_loc (loc
, t
);
3167 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK
),
3169 gimple_omp_task_arg_size (entry_stmt
),
3170 gimple_omp_task_arg_align (entry_stmt
), cond
, flags
);
3172 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3173 false, GSI_CONTINUE_LINKING
);
3177 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3178 catch handler and return it. This prevents programs from violating the
3179 structured block semantics with throws. */
3182 maybe_catch_exception (gimple_seq body
)
3187 if (!flag_exceptions
)
3190 if (lang_hooks
.eh_protect_cleanup_actions
!= NULL
)
3191 decl
= lang_hooks
.eh_protect_cleanup_actions ();
3193 decl
= builtin_decl_explicit (BUILT_IN_TRAP
);
3195 g
= gimple_build_eh_must_not_throw (decl
);
3196 g
= gimple_build_try (body
, gimple_seq_alloc_with_stmt (g
),
3199 return gimple_seq_alloc_with_stmt (g
);
3202 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3205 vec2chain (vec
<tree
, va_gc
> *v
)
3207 tree chain
= NULL_TREE
, t
;
3210 FOR_EACH_VEC_SAFE_ELT_REVERSE (v
, ix
, t
)
3212 DECL_CHAIN (t
) = chain
;
3220 /* Remove barriers in REGION->EXIT's block. Note that this is only
3221 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3222 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3223 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3227 remove_exit_barrier (struct omp_region
*region
)
3229 gimple_stmt_iterator gsi
;
3230 basic_block exit_bb
;
3234 int any_addressable_vars
= -1;
3236 exit_bb
= region
->exit
;
3238 /* If the parallel region doesn't return, we don't have REGION->EXIT
3243 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3244 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3245 statements that can appear in between are extremely limited -- no
3246 memory operations at all. Here, we allow nothing at all, so the
3247 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3248 gsi
= gsi_last_bb (exit_bb
);
3249 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
3251 if (!gsi_end_p (gsi
) && gimple_code (gsi_stmt (gsi
)) != GIMPLE_LABEL
)
3254 FOR_EACH_EDGE (e
, ei
, exit_bb
->preds
)
3256 gsi
= gsi_last_bb (e
->src
);
3257 if (gsi_end_p (gsi
))
3259 stmt
= gsi_stmt (gsi
);
3260 if (gimple_code (stmt
) == GIMPLE_OMP_RETURN
3261 && !gimple_omp_return_nowait_p (stmt
))
3263 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3264 in many cases. If there could be tasks queued, the barrier
3265 might be needed to let the tasks run before some local
3266 variable of the parallel that the task uses as shared
3267 runs out of scope. The task can be spawned either
3268 from within current function (this would be easy to check)
3269 or from some function it calls and gets passed an address
3270 of such a variable. */
3271 if (any_addressable_vars
< 0)
3273 gimple parallel_stmt
= last_stmt (region
->entry
);
3274 tree child_fun
= gimple_omp_parallel_child_fn (parallel_stmt
);
3275 tree local_decls
, block
, decl
;
3278 any_addressable_vars
= 0;
3279 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun
), ix
, decl
)
3280 if (TREE_ADDRESSABLE (decl
))
3282 any_addressable_vars
= 1;
3285 for (block
= gimple_block (stmt
);
3286 !any_addressable_vars
3288 && TREE_CODE (block
) == BLOCK
;
3289 block
= BLOCK_SUPERCONTEXT (block
))
3291 for (local_decls
= BLOCK_VARS (block
);
3293 local_decls
= DECL_CHAIN (local_decls
))
3294 if (TREE_ADDRESSABLE (local_decls
))
3296 any_addressable_vars
= 1;
3299 if (block
== gimple_block (parallel_stmt
))
3303 if (!any_addressable_vars
)
3304 gimple_omp_return_set_nowait (stmt
);
3310 remove_exit_barriers (struct omp_region
*region
)
3312 if (region
->type
== GIMPLE_OMP_PARALLEL
)
3313 remove_exit_barrier (region
);
3317 region
= region
->inner
;
3318 remove_exit_barriers (region
);
3319 while (region
->next
)
3321 region
= region
->next
;
3322 remove_exit_barriers (region
);
3327 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3328 calls. These can't be declared as const functions, but
3329 within one parallel body they are constant, so they can be
3330 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3331 which are declared const. Similarly for task body, except
3332 that in untied task omp_get_thread_num () can change at any task
3333 scheduling point. */
3336 optimize_omp_library_calls (gimple entry_stmt
)
3339 gimple_stmt_iterator gsi
;
3340 tree thr_num_tree
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
3341 tree thr_num_id
= DECL_ASSEMBLER_NAME (thr_num_tree
);
3342 tree num_thr_tree
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
3343 tree num_thr_id
= DECL_ASSEMBLER_NAME (num_thr_tree
);
3344 bool untied_task
= (gimple_code (entry_stmt
) == GIMPLE_OMP_TASK
3345 && find_omp_clause (gimple_omp_task_clauses (entry_stmt
),
3346 OMP_CLAUSE_UNTIED
) != NULL
);
3349 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3351 gimple call
= gsi_stmt (gsi
);
3354 if (is_gimple_call (call
)
3355 && (decl
= gimple_call_fndecl (call
))
3356 && DECL_EXTERNAL (decl
)
3357 && TREE_PUBLIC (decl
)
3358 && DECL_INITIAL (decl
) == NULL
)
3362 if (DECL_NAME (decl
) == thr_num_id
)
3364 /* In #pragma omp task untied omp_get_thread_num () can change
3365 during the execution of the task region. */
3368 built_in
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
3370 else if (DECL_NAME (decl
) == num_thr_id
)
3371 built_in
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
3375 if (DECL_ASSEMBLER_NAME (decl
) != DECL_ASSEMBLER_NAME (built_in
)
3376 || gimple_call_num_args (call
) != 0)
3379 if (flag_exceptions
&& !TREE_NOTHROW (decl
))
3382 if (TREE_CODE (TREE_TYPE (decl
)) != FUNCTION_TYPE
3383 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl
)),
3384 TREE_TYPE (TREE_TYPE (built_in
))))
3387 gimple_call_set_fndecl (call
, built_in
);
3392 /* Expand the OpenMP parallel or task directive starting at REGION. */
3395 expand_omp_taskreg (struct omp_region
*region
)
3397 basic_block entry_bb
, exit_bb
, new_bb
;
3398 struct function
*child_cfun
;
3399 tree child_fn
, block
, t
;
3400 gimple_stmt_iterator gsi
;
3401 gimple entry_stmt
, stmt
;
3403 vec
<tree
, va_gc
> *ws_args
;
3405 entry_stmt
= last_stmt (region
->entry
);
3406 child_fn
= gimple_omp_taskreg_child_fn (entry_stmt
);
3407 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
3409 entry_bb
= region
->entry
;
3410 exit_bb
= region
->exit
;
3412 if (is_combined_parallel (region
))
3413 ws_args
= region
->ws_args
;
3417 if (child_cfun
->cfg
)
3419 /* Due to inlining, it may happen that we have already outlined
3420 the region, in which case all we need to do is make the
3421 sub-graph unreachable and emit the parallel call. */
3422 edge entry_succ_e
, exit_succ_e
;
3423 gimple_stmt_iterator gsi
;
3425 entry_succ_e
= single_succ_edge (entry_bb
);
3427 gsi
= gsi_last_bb (entry_bb
);
3428 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_PARALLEL
3429 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_TASK
);
3430 gsi_remove (&gsi
, true);
3435 exit_succ_e
= single_succ_edge (exit_bb
);
3436 make_edge (new_bb
, exit_succ_e
->dest
, EDGE_FALLTHRU
);
3438 remove_edge_and_dominated_blocks (entry_succ_e
);
3442 unsigned srcidx
, dstidx
, num
;
3444 /* If the parallel region needs data sent from the parent
3445 function, then the very first statement (except possible
3446 tree profile counter updates) of the parallel body
3447 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3448 &.OMP_DATA_O is passed as an argument to the child function,
3449 we need to replace it with the argument as seen by the child
3452 In most cases, this will end up being the identity assignment
3453 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3454 a function call that has been inlined, the original PARM_DECL
3455 .OMP_DATA_I may have been converted into a different local
3456 variable. In which case, we need to keep the assignment. */
3457 if (gimple_omp_taskreg_data_arg (entry_stmt
))
3459 basic_block entry_succ_bb
= single_succ (entry_bb
);
3460 gimple_stmt_iterator gsi
;
3462 gimple parcopy_stmt
= NULL
;
3464 for (gsi
= gsi_start_bb (entry_succ_bb
); ; gsi_next (&gsi
))
3468 gcc_assert (!gsi_end_p (gsi
));
3469 stmt
= gsi_stmt (gsi
);
3470 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
3473 if (gimple_num_ops (stmt
) == 2)
3475 tree arg
= gimple_assign_rhs1 (stmt
);
3477 /* We're ignore the subcode because we're
3478 effectively doing a STRIP_NOPS. */
3480 if (TREE_CODE (arg
) == ADDR_EXPR
3481 && TREE_OPERAND (arg
, 0)
3482 == gimple_omp_taskreg_data_arg (entry_stmt
))
3484 parcopy_stmt
= stmt
;
3490 gcc_assert (parcopy_stmt
!= NULL
);
3491 arg
= DECL_ARGUMENTS (child_fn
);
3493 if (!gimple_in_ssa_p (cfun
))
3495 if (gimple_assign_lhs (parcopy_stmt
) == arg
)
3496 gsi_remove (&gsi
, true);
3499 /* ?? Is setting the subcode really necessary ?? */
3500 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (arg
));
3501 gimple_assign_set_rhs1 (parcopy_stmt
, arg
);
3506 /* If we are in ssa form, we must load the value from the default
3507 definition of the argument. That should not be defined now,
3508 since the argument is not used uninitialized. */
3509 gcc_assert (ssa_default_def (cfun
, arg
) == NULL
);
3510 narg
= make_ssa_name (arg
, gimple_build_nop ());
3511 set_ssa_default_def (cfun
, arg
, narg
);
3512 /* ?? Is setting the subcode really necessary ?? */
3513 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (narg
));
3514 gimple_assign_set_rhs1 (parcopy_stmt
, narg
);
3515 update_stmt (parcopy_stmt
);
3519 /* Declare local variables needed in CHILD_CFUN. */
3520 block
= DECL_INITIAL (child_fn
);
3521 BLOCK_VARS (block
) = vec2chain (child_cfun
->local_decls
);
3522 /* The gimplifier could record temporaries in parallel/task block
3523 rather than in containing function's local_decls chain,
3524 which would mean cgraph missed finalizing them. Do it now. */
3525 for (t
= BLOCK_VARS (block
); t
; t
= DECL_CHAIN (t
))
3526 if (TREE_CODE (t
) == VAR_DECL
3528 && !DECL_EXTERNAL (t
))
3529 varpool_finalize_decl (t
);
3530 DECL_SAVED_TREE (child_fn
) = NULL
;
3531 /* We'll create a CFG for child_fn, so no gimple body is needed. */
3532 gimple_set_body (child_fn
, NULL
);
3533 TREE_USED (block
) = 1;
3535 /* Reset DECL_CONTEXT on function arguments. */
3536 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
3537 DECL_CONTEXT (t
) = child_fn
;
3539 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3540 so that it can be moved to the child function. */
3541 gsi
= gsi_last_bb (entry_bb
);
3542 stmt
= gsi_stmt (gsi
);
3543 gcc_assert (stmt
&& (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
3544 || gimple_code (stmt
) == GIMPLE_OMP_TASK
));
3545 gsi_remove (&gsi
, true);
3546 e
= split_block (entry_bb
, stmt
);
3548 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
3550 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3553 gsi
= gsi_last_bb (exit_bb
);
3554 gcc_assert (!gsi_end_p (gsi
)
3555 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
3556 stmt
= gimple_build_return (NULL
);
3557 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
3558 gsi_remove (&gsi
, true);
3561 /* Move the parallel region into CHILD_CFUN. */
3563 if (gimple_in_ssa_p (cfun
))
3565 init_tree_ssa (child_cfun
);
3566 init_ssa_operands (child_cfun
);
3567 child_cfun
->gimple_df
->in_ssa_p
= true;
3571 block
= gimple_block (entry_stmt
);
3573 new_bb
= move_sese_region_to_fn (child_cfun
, entry_bb
, exit_bb
, block
);
3575 single_succ_edge (new_bb
)->flags
= EDGE_FALLTHRU
;
3577 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3578 num
= vec_safe_length (child_cfun
->local_decls
);
3579 for (srcidx
= 0, dstidx
= 0; srcidx
< num
; srcidx
++)
3581 t
= (*child_cfun
->local_decls
)[srcidx
];
3582 if (DECL_CONTEXT (t
) == cfun
->decl
)
3584 if (srcidx
!= dstidx
)
3585 (*child_cfun
->local_decls
)[dstidx
] = t
;
3589 vec_safe_truncate (child_cfun
->local_decls
, dstidx
);
3591 /* Inform the callgraph about the new function. */
3592 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
3593 = cfun
->curr_properties
& ~PROP_loops
;
3594 cgraph_add_new_function (child_fn
, true);
3596 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3597 fixed in a following pass. */
3598 push_cfun (child_cfun
);
3600 optimize_omp_library_calls (entry_stmt
);
3601 rebuild_cgraph_edges ();
3603 /* Some EH regions might become dead, see PR34608. If
3604 pass_cleanup_cfg isn't the first pass to happen with the
3605 new child, these dead EH edges might cause problems.
3606 Clean them up now. */
3607 if (flag_exceptions
)
3610 bool changed
= false;
3613 changed
|= gimple_purge_dead_eh_edges (bb
);
3615 cleanup_tree_cfg ();
3617 if (gimple_in_ssa_p (cfun
))
3618 update_ssa (TODO_update_ssa
);
3622 /* Emit a library call to launch the children threads. */
3623 if (gimple_code (entry_stmt
) == GIMPLE_OMP_PARALLEL
)
3624 expand_parallel_call (region
, new_bb
, entry_stmt
, ws_args
);
3626 expand_task_call (new_bb
, entry_stmt
);
3627 if (gimple_in_ssa_p (cfun
))
3628 update_ssa (TODO_update_ssa_only_virtuals
);
3632 /* A subroutine of expand_omp_for. Generate code for a parallel
3633 loop with any schedule. Given parameters:
3635 for (V = N1; V cond N2; V += STEP) BODY;
3637 where COND is "<" or ">", we generate pseudocode
3639 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3640 if (more) goto L0; else goto L3;
3647 if (V cond iend) goto L1; else goto L2;
3649 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3652 If this is a combined omp parallel loop, instead of the call to
3653 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3655 For collapsed loops, given parameters:
3657 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3658 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3659 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3662 we generate pseudocode
3668 count3 = (adj + N32 - N31) / STEP3;
3673 count2 = (adj + N22 - N21) / STEP2;
3678 count1 = (adj + N12 - N11) / STEP1;
3679 count = count1 * count2 * count3;
3680 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3681 if (more) goto L0; else goto L3;
3685 V3 = N31 + (T % count3) * STEP3;
3687 V2 = N21 + (T % count2) * STEP2;
3689 V1 = N11 + T * STEP1;
3694 if (V < iend) goto L10; else goto L2;
3697 if (V3 cond3 N32) goto L1; else goto L11;
3701 if (V2 cond2 N22) goto L1; else goto L12;
3707 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3713 expand_omp_for_generic (struct omp_region
*region
,
3714 struct omp_for_data
*fd
,
3715 enum built_in_function start_fn
,
3716 enum built_in_function next_fn
)
3718 tree type
, istart0
, iend0
, iend
;
3719 tree t
, vmain
, vback
, bias
= NULL_TREE
;
3720 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
, collapse_bb
;
3721 basic_block l2_bb
= NULL
, l3_bb
= NULL
;
3722 gimple_stmt_iterator gsi
;
3724 bool in_combined_parallel
= is_combined_parallel (region
);
3725 bool broken_loop
= region
->cont
== NULL
;
3727 tree
*counts
= NULL
;
3730 gcc_assert (!broken_loop
|| !in_combined_parallel
);
3731 gcc_assert (fd
->iter_type
== long_integer_type_node
3732 || !in_combined_parallel
);
3734 type
= TREE_TYPE (fd
->loop
.v
);
3735 istart0
= create_tmp_var (fd
->iter_type
, ".istart0");
3736 iend0
= create_tmp_var (fd
->iter_type
, ".iend0");
3737 TREE_ADDRESSABLE (istart0
) = 1;
3738 TREE_ADDRESSABLE (iend0
) = 1;
3740 /* See if we need to bias by LLONG_MIN. */
3741 if (fd
->iter_type
== long_long_unsigned_type_node
3742 && TREE_CODE (type
) == INTEGER_TYPE
3743 && !TYPE_UNSIGNED (type
))
3747 if (fd
->loop
.cond_code
== LT_EXPR
)
3750 n2
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
3754 n1
= fold_build2 (MINUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
3757 if (TREE_CODE (n1
) != INTEGER_CST
3758 || TREE_CODE (n2
) != INTEGER_CST
3759 || ((tree_int_cst_sgn (n1
) < 0) ^ (tree_int_cst_sgn (n2
) < 0)))
3760 bias
= fold_convert (fd
->iter_type
, TYPE_MIN_VALUE (type
));
3763 entry_bb
= region
->entry
;
3764 cont_bb
= region
->cont
;
3766 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
3767 gcc_assert (broken_loop
3768 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
3769 l0_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
3770 l1_bb
= single_succ (l0_bb
);
3773 l2_bb
= create_empty_bb (cont_bb
);
3774 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l1_bb
);
3775 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
3779 l3_bb
= BRANCH_EDGE (entry_bb
)->dest
;
3780 exit_bb
= region
->exit
;
3782 gsi
= gsi_last_bb (entry_bb
);
3784 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
3785 if (fd
->collapse
> 1)
3787 /* collapsed loops need work for expansion in SSA form. */
3788 gcc_assert (!gimple_in_ssa_p (cfun
));
3789 counts
= (tree
*) alloca (fd
->collapse
* sizeof (tree
));
3790 for (i
= 0; i
< fd
->collapse
; i
++)
3792 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
3794 if (POINTER_TYPE_P (itype
))
3795 itype
= signed_type_for (itype
);
3796 t
= build_int_cst (itype
, (fd
->loops
[i
].cond_code
== LT_EXPR
3798 t
= fold_build2 (PLUS_EXPR
, itype
,
3799 fold_convert (itype
, fd
->loops
[i
].step
), t
);
3800 t
= fold_build2 (PLUS_EXPR
, itype
, t
,
3801 fold_convert (itype
, fd
->loops
[i
].n2
));
3802 t
= fold_build2 (MINUS_EXPR
, itype
, t
,
3803 fold_convert (itype
, fd
->loops
[i
].n1
));
3804 if (TYPE_UNSIGNED (itype
) && fd
->loops
[i
].cond_code
== GT_EXPR
)
3805 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
3806 fold_build1 (NEGATE_EXPR
, itype
, t
),
3807 fold_build1 (NEGATE_EXPR
, itype
,
3808 fold_convert (itype
,
3809 fd
->loops
[i
].step
)));
3811 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
,
3812 fold_convert (itype
, fd
->loops
[i
].step
));
3813 t
= fold_convert (type
, t
);
3814 if (TREE_CODE (t
) == INTEGER_CST
)
3818 counts
[i
] = create_tmp_reg (type
, ".count");
3819 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3820 true, GSI_SAME_STMT
);
3821 stmt
= gimple_build_assign (counts
[i
], t
);
3822 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3824 if (SSA_VAR_P (fd
->loop
.n2
))
3830 t
= fold_build2 (MULT_EXPR
, type
, fd
->loop
.n2
, counts
[i
]);
3831 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3832 true, GSI_SAME_STMT
);
3834 stmt
= gimple_build_assign (fd
->loop
.n2
, t
);
3835 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3839 if (in_combined_parallel
)
3841 /* In a combined parallel loop, emit a call to
3842 GOMP_loop_foo_next. */
3843 t
= build_call_expr (builtin_decl_explicit (next_fn
), 2,
3844 build_fold_addr_expr (istart0
),
3845 build_fold_addr_expr (iend0
));
3849 tree t0
, t1
, t2
, t3
, t4
;
3850 /* If this is not a combined parallel loop, emit a call to
3851 GOMP_loop_foo_start in ENTRY_BB. */
3852 t4
= build_fold_addr_expr (iend0
);
3853 t3
= build_fold_addr_expr (istart0
);
3854 t2
= fold_convert (fd
->iter_type
, fd
->loop
.step
);
3855 if (POINTER_TYPE_P (type
)
3856 && TYPE_PRECISION (type
) != TYPE_PRECISION (fd
->iter_type
))
3858 /* Avoid casting pointers to integer of a different size. */
3859 tree itype
= signed_type_for (type
);
3860 t1
= fold_convert (fd
->iter_type
, fold_convert (itype
, fd
->loop
.n2
));
3861 t0
= fold_convert (fd
->iter_type
, fold_convert (itype
, fd
->loop
.n1
));
3865 t1
= fold_convert (fd
->iter_type
, fd
->loop
.n2
);
3866 t0
= fold_convert (fd
->iter_type
, fd
->loop
.n1
);
3870 t1
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t1
, bias
);
3871 t0
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t0
, bias
);
3873 if (fd
->iter_type
== long_integer_type_node
)
3877 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
3878 t
= build_call_expr (builtin_decl_explicit (start_fn
),
3879 6, t0
, t1
, t2
, t
, t3
, t4
);
3882 t
= build_call_expr (builtin_decl_explicit (start_fn
),
3883 5, t0
, t1
, t2
, t3
, t4
);
3891 /* The GOMP_loop_ull_*start functions have additional boolean
3892 argument, true for < loops and false for > loops.
3893 In Fortran, the C bool type can be different from
3894 boolean_type_node. */
3895 bfn_decl
= builtin_decl_explicit (start_fn
);
3896 c_bool_type
= TREE_TYPE (TREE_TYPE (bfn_decl
));
3897 t5
= build_int_cst (c_bool_type
,
3898 fd
->loop
.cond_code
== LT_EXPR
? 1 : 0);
3901 tree bfn_decl
= builtin_decl_explicit (start_fn
);
3902 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
3903 t
= build_call_expr (bfn_decl
, 7, t5
, t0
, t1
, t2
, t
, t3
, t4
);
3906 t
= build_call_expr (builtin_decl_explicit (start_fn
),
3907 6, t5
, t0
, t1
, t2
, t3
, t4
);
3910 if (TREE_TYPE (t
) != boolean_type_node
)
3911 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
3912 t
, build_int_cst (TREE_TYPE (t
), 0));
3913 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3914 true, GSI_SAME_STMT
);
3915 gsi_insert_after (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
3917 /* Remove the GIMPLE_OMP_FOR statement. */
3918 gsi_remove (&gsi
, true);
3920 /* Iteration setup for sequential loop goes in L0_BB. */
3921 gsi
= gsi_start_bb (l0_bb
);
3924 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
3925 if (POINTER_TYPE_P (type
))
3926 t
= fold_convert (signed_type_for (type
), t
);
3927 t
= fold_convert (type
, t
);
3928 t
= force_gimple_operand_gsi (&gsi
, t
,
3930 && TREE_ADDRESSABLE (fd
->loop
.v
),
3931 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
3932 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
3933 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3937 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
3938 if (POINTER_TYPE_P (type
))
3939 t
= fold_convert (signed_type_for (type
), t
);
3940 t
= fold_convert (type
, t
);
3941 iend
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3942 false, GSI_CONTINUE_LINKING
);
3943 if (fd
->collapse
> 1)
3945 tree tem
= create_tmp_reg (type
, ".tem");
3946 stmt
= gimple_build_assign (tem
, fd
->loop
.v
);
3947 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3948 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
3950 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
), itype
;
3952 if (POINTER_TYPE_P (vtype
))
3953 itype
= signed_type_for (vtype
);
3954 t
= fold_build2 (TRUNC_MOD_EXPR
, type
, tem
, counts
[i
]);
3955 t
= fold_convert (itype
, t
);
3956 t
= fold_build2 (MULT_EXPR
, itype
, t
,
3957 fold_convert (itype
, fd
->loops
[i
].step
));
3958 if (POINTER_TYPE_P (vtype
))
3959 t
= fold_build_pointer_plus (fd
->loops
[i
].n1
, t
);
3961 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loops
[i
].n1
, t
);
3962 t
= force_gimple_operand_gsi (&gsi
, t
,
3963 DECL_P (fd
->loops
[i
].v
)
3964 && TREE_ADDRESSABLE (fd
->loops
[i
].v
),
3966 GSI_CONTINUE_LINKING
);
3967 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
3968 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3971 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, tem
, counts
[i
]);
3972 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3973 false, GSI_CONTINUE_LINKING
);
3974 stmt
= gimple_build_assign (tem
, t
);
3975 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3982 /* Code to control the increment and predicate for the sequential
3983 loop goes in the CONT_BB. */
3984 gsi
= gsi_last_bb (cont_bb
);
3985 stmt
= gsi_stmt (gsi
);
3986 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
3987 vmain
= gimple_omp_continue_control_use (stmt
);
3988 vback
= gimple_omp_continue_control_def (stmt
);
3990 if (POINTER_TYPE_P (type
))
3991 t
= fold_build_pointer_plus (vmain
, fd
->loop
.step
);
3993 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
3994 t
= force_gimple_operand_gsi (&gsi
, t
,
3995 DECL_P (vback
) && TREE_ADDRESSABLE (vback
),
3996 NULL_TREE
, true, GSI_SAME_STMT
);
3997 stmt
= gimple_build_assign (vback
, t
);
3998 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4000 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
4001 DECL_P (vback
) && TREE_ADDRESSABLE (vback
) ? t
: vback
,
4003 stmt
= gimple_build_cond_empty (t
);
4004 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4006 /* Remove GIMPLE_OMP_CONTINUE. */
4007 gsi_remove (&gsi
, true);
4009 if (fd
->collapse
> 1)
4011 basic_block last_bb
, bb
;
4014 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
4016 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
);
4018 bb
= create_empty_bb (last_bb
);
4020 add_bb_to_loop (bb
, last_bb
->loop_father
);
4021 gsi
= gsi_start_bb (bb
);
4023 if (i
< fd
->collapse
- 1)
4025 e
= make_edge (last_bb
, bb
, EDGE_FALSE_VALUE
);
4026 e
->probability
= REG_BR_PROB_BASE
/ 8;
4028 t
= fd
->loops
[i
+ 1].n1
;
4029 t
= force_gimple_operand_gsi (&gsi
, t
,
4030 DECL_P (fd
->loops
[i
+ 1].v
)
4032 (fd
->loops
[i
+ 1].v
),
4034 GSI_CONTINUE_LINKING
);
4035 stmt
= gimple_build_assign (fd
->loops
[i
+ 1].v
, t
);
4036 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4041 set_immediate_dominator (CDI_DOMINATORS
, bb
, last_bb
);
4043 if (POINTER_TYPE_P (vtype
))
4044 t
= fold_build_pointer_plus (fd
->loops
[i
].v
, fd
->loops
[i
].step
);
4046 t
= fold_build2 (PLUS_EXPR
, vtype
, fd
->loops
[i
].v
,
4048 t
= force_gimple_operand_gsi (&gsi
, t
,
4049 DECL_P (fd
->loops
[i
].v
)
4050 && TREE_ADDRESSABLE (fd
->loops
[i
].v
),
4052 GSI_CONTINUE_LINKING
);
4053 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
4054 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4058 t
= fd
->loops
[i
].n2
;
4059 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4060 false, GSI_CONTINUE_LINKING
);
4061 tree v
= fd
->loops
[i
].v
;
4062 if (DECL_P (v
) && TREE_ADDRESSABLE (v
))
4063 v
= force_gimple_operand_gsi (&gsi
, v
, true, NULL_TREE
,
4064 false, GSI_CONTINUE_LINKING
);
4065 t
= fold_build2 (fd
->loops
[i
].cond_code
, boolean_type_node
,
4067 stmt
= gimple_build_cond_empty (t
);
4068 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4069 e
= make_edge (bb
, l1_bb
, EDGE_TRUE_VALUE
);
4070 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4073 make_edge (bb
, l1_bb
, EDGE_FALLTHRU
);
4078 /* Emit code to get the next parallel iteration in L2_BB. */
4079 gsi
= gsi_start_bb (l2_bb
);
4081 t
= build_call_expr (builtin_decl_explicit (next_fn
), 2,
4082 build_fold_addr_expr (istart0
),
4083 build_fold_addr_expr (iend0
));
4084 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4085 false, GSI_CONTINUE_LINKING
);
4086 if (TREE_TYPE (t
) != boolean_type_node
)
4087 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
4088 t
, build_int_cst (TREE_TYPE (t
), 0));
4089 stmt
= gimple_build_cond_empty (t
);
4090 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4093 /* Add the loop cleanup function. */
4094 gsi
= gsi_last_bb (exit_bb
);
4095 if (gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
4096 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT
);
4098 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END
);
4099 stmt
= gimple_build_call (t
, 0);
4100 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
4101 gsi_remove (&gsi
, true);
4103 /* Connect the new blocks. */
4104 find_edge (entry_bb
, l0_bb
)->flags
= EDGE_TRUE_VALUE
;
4105 find_edge (entry_bb
, l3_bb
)->flags
= EDGE_FALSE_VALUE
;
4111 e
= find_edge (cont_bb
, l3_bb
);
4112 ne
= make_edge (l2_bb
, l3_bb
, EDGE_FALSE_VALUE
);
4114 phis
= phi_nodes (l3_bb
);
4115 for (gsi
= gsi_start (phis
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4117 gimple phi
= gsi_stmt (gsi
);
4118 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, ne
),
4119 PHI_ARG_DEF_FROM_EDGE (phi
, e
));
4123 make_edge (cont_bb
, l2_bb
, EDGE_FALSE_VALUE
);
4125 add_bb_to_loop (l2_bb
, cont_bb
->loop_father
);
4126 if (fd
->collapse
> 1)
4128 e
= find_edge (cont_bb
, l1_bb
);
4130 e
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
4134 e
= find_edge (cont_bb
, l1_bb
);
4135 e
->flags
= EDGE_TRUE_VALUE
;
4137 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4138 find_edge (cont_bb
, l2_bb
)->probability
= REG_BR_PROB_BASE
/ 8;
4139 make_edge (l2_bb
, l0_bb
, EDGE_TRUE_VALUE
);
4141 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
,
4142 recompute_dominator (CDI_DOMINATORS
, l2_bb
));
4143 set_immediate_dominator (CDI_DOMINATORS
, l3_bb
,
4144 recompute_dominator (CDI_DOMINATORS
, l3_bb
));
4145 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
,
4146 recompute_dominator (CDI_DOMINATORS
, l0_bb
));
4147 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
,
4148 recompute_dominator (CDI_DOMINATORS
, l1_bb
));
4153 /* A subroutine of expand_omp_for. Generate code for a parallel
4154 loop with static schedule and no specified chunk size. Given
4157 for (V = N1; V cond N2; V += STEP) BODY;
4159 where COND is "<" or ">", we generate pseudocode
4165 if ((__typeof (V)) -1 > 0 && cond is >)
4166 n = -(adj + N2 - N1) / -STEP;
4168 n = (adj + N2 - N1) / STEP;
4171 if (threadid < tt) goto L3; else goto L4;
4176 s0 = q * threadid + tt;
4179 if (s0 >= e0) goto L2; else goto L0;
4185 if (V cond e) goto L1;
4190 expand_omp_for_static_nochunk (struct omp_region
*region
,
4191 struct omp_for_data
*fd
)
4193 tree n
, q
, s0
, e0
, e
, t
, tt
, nthreads
, threadid
;
4194 tree type
, itype
, vmain
, vback
;
4195 basic_block entry_bb
, second_bb
, third_bb
, exit_bb
, seq_start_bb
;
4196 basic_block body_bb
, cont_bb
;
4198 gimple_stmt_iterator gsi
;
4202 itype
= type
= TREE_TYPE (fd
->loop
.v
);
4203 if (POINTER_TYPE_P (type
))
4204 itype
= signed_type_for (type
);
4206 entry_bb
= region
->entry
;
4207 cont_bb
= region
->cont
;
4208 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
4209 gcc_assert (BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
4210 seq_start_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
4211 body_bb
= single_succ (seq_start_bb
);
4212 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
4213 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4214 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
4215 exit_bb
= region
->exit
;
4217 /* Iteration space partitioning goes in ENTRY_BB. */
4218 gsi
= gsi_last_bb (entry_bb
);
4219 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4221 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
), 0);
4222 t
= fold_convert (itype
, t
);
4223 nthreads
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4224 true, GSI_SAME_STMT
);
4226 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
), 0);
4227 t
= fold_convert (itype
, t
);
4228 threadid
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4229 true, GSI_SAME_STMT
);
4232 = force_gimple_operand_gsi (&gsi
, fold_convert (type
, fd
->loop
.n1
),
4233 true, NULL_TREE
, true, GSI_SAME_STMT
);
4235 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->loop
.n2
),
4236 true, NULL_TREE
, true, GSI_SAME_STMT
);
4238 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->loop
.step
),
4239 true, NULL_TREE
, true, GSI_SAME_STMT
);
4241 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
4242 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loop
.step
, t
);
4243 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fd
->loop
.n2
);
4244 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, fd
->loop
.n1
));
4245 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
4246 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
4247 fold_build1 (NEGATE_EXPR
, itype
, t
),
4248 fold_build1 (NEGATE_EXPR
, itype
, fd
->loop
.step
));
4250 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, fd
->loop
.step
);
4251 t
= fold_convert (itype
, t
);
4252 n
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4254 q
= create_tmp_reg (itype
, "q");
4255 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, n
, nthreads
);
4256 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, true, GSI_SAME_STMT
);
4257 gsi_insert_before (&gsi
, gimple_build_assign (q
, t
), GSI_SAME_STMT
);
4259 tt
= create_tmp_reg (itype
, "tt");
4260 t
= fold_build2 (TRUNC_MOD_EXPR
, itype
, n
, nthreads
);
4261 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, true, GSI_SAME_STMT
);
4262 gsi_insert_before (&gsi
, gimple_build_assign (tt
, t
), GSI_SAME_STMT
);
4264 t
= build2 (LT_EXPR
, boolean_type_node
, threadid
, tt
);
4265 stmt
= gimple_build_cond_empty (t
);
4266 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4268 second_bb
= split_block (entry_bb
, stmt
)->dest
;
4269 gsi
= gsi_last_bb (second_bb
);
4270 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4272 gsi_insert_before (&gsi
, gimple_build_assign (tt
, build_int_cst (itype
, 0)),
4274 stmt
= gimple_build_assign_with_ops (PLUS_EXPR
, q
, q
,
4275 build_int_cst (itype
, 1));
4276 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4278 third_bb
= split_block (second_bb
, stmt
)->dest
;
4279 gsi
= gsi_last_bb (third_bb
);
4280 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4282 t
= build2 (MULT_EXPR
, itype
, q
, threadid
);
4283 t
= build2 (PLUS_EXPR
, itype
, t
, tt
);
4284 s0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4286 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, q
);
4287 e0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4289 t
= build2 (GE_EXPR
, boolean_type_node
, s0
, e0
);
4290 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4292 /* Remove the GIMPLE_OMP_FOR statement. */
4293 gsi_remove (&gsi
, true);
4295 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4296 gsi
= gsi_start_bb (seq_start_bb
);
4298 t
= fold_convert (itype
, s0
);
4299 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4300 if (POINTER_TYPE_P (type
))
4301 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4303 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4304 t
= force_gimple_operand_gsi (&gsi
, t
,
4306 && TREE_ADDRESSABLE (fd
->loop
.v
),
4307 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
4308 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
4309 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4311 t
= fold_convert (itype
, e0
);
4312 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4313 if (POINTER_TYPE_P (type
))
4314 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4316 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4317 e
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4318 false, GSI_CONTINUE_LINKING
);
4320 /* The code controlling the sequential loop replaces the
4321 GIMPLE_OMP_CONTINUE. */
4322 gsi
= gsi_last_bb (cont_bb
);
4323 stmt
= gsi_stmt (gsi
);
4324 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4325 vmain
= gimple_omp_continue_control_use (stmt
);
4326 vback
= gimple_omp_continue_control_def (stmt
);
4328 if (POINTER_TYPE_P (type
))
4329 t
= fold_build_pointer_plus (vmain
, fd
->loop
.step
);
4331 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
4332 t
= force_gimple_operand_gsi (&gsi
, t
,
4333 DECL_P (vback
) && TREE_ADDRESSABLE (vback
),
4334 NULL_TREE
, true, GSI_SAME_STMT
);
4335 stmt
= gimple_build_assign (vback
, t
);
4336 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4338 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
4339 DECL_P (vback
) && TREE_ADDRESSABLE (vback
) ? t
: vback
, e
);
4340 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4342 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4343 gsi_remove (&gsi
, true);
4345 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4346 gsi
= gsi_last_bb (exit_bb
);
4347 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
4348 force_gimple_operand_gsi (&gsi
, build_omp_barrier (), false, NULL_TREE
,
4349 false, GSI_SAME_STMT
);
4350 gsi_remove (&gsi
, true);
4352 /* Connect all the blocks. */
4353 ep
= make_edge (entry_bb
, third_bb
, EDGE_FALSE_VALUE
);
4354 ep
->probability
= REG_BR_PROB_BASE
/ 4 * 3;
4355 ep
= find_edge (entry_bb
, second_bb
);
4356 ep
->flags
= EDGE_TRUE_VALUE
;
4357 ep
->probability
= REG_BR_PROB_BASE
/ 4;
4358 find_edge (third_bb
, seq_start_bb
)->flags
= EDGE_FALSE_VALUE
;
4359 find_edge (third_bb
, fin_bb
)->flags
= EDGE_TRUE_VALUE
;
4361 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
4362 find_edge (cont_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
4364 set_immediate_dominator (CDI_DOMINATORS
, second_bb
, entry_bb
);
4365 set_immediate_dominator (CDI_DOMINATORS
, third_bb
, entry_bb
);
4366 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
, third_bb
);
4367 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
4368 recompute_dominator (CDI_DOMINATORS
, body_bb
));
4369 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
4370 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
4374 /* A subroutine of expand_omp_for. Generate code for a parallel
4375 loop with static schedule and a specified chunk size. Given
4378 for (V = N1; V cond N2; V += STEP) BODY;
4380 where COND is "<" or ">", we generate pseudocode
4386 if ((__typeof (V)) -1 > 0 && cond is >)
4387 n = -(adj + N2 - N1) / -STEP;
4389 n = (adj + N2 - N1) / STEP;
4391 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4392 here so that V is defined
4393 if the loop is not entered
4395 s0 = (trip * nthreads + threadid) * CHUNK;
4396 e0 = min(s0 + CHUNK, n);
4397 if (s0 < n) goto L1; else goto L4;
4404 if (V cond e) goto L2; else goto L3;
4412 expand_omp_for_static_chunk (struct omp_region
*region
, struct omp_for_data
*fd
)
4414 tree n
, s0
, e0
, e
, t
;
4415 tree trip_var
, trip_init
, trip_main
, trip_back
, nthreads
, threadid
;
4416 tree type
, itype
, v_main
, v_back
, v_extra
;
4417 basic_block entry_bb
, exit_bb
, body_bb
, seq_start_bb
, iter_part_bb
;
4418 basic_block trip_update_bb
, cont_bb
, fin_bb
;
4419 gimple_stmt_iterator si
;
4423 itype
= type
= TREE_TYPE (fd
->loop
.v
);
4424 if (POINTER_TYPE_P (type
))
4425 itype
= signed_type_for (type
);
4427 entry_bb
= region
->entry
;
4428 se
= split_block (entry_bb
, last_stmt (entry_bb
));
4430 iter_part_bb
= se
->dest
;
4431 cont_bb
= region
->cont
;
4432 gcc_assert (EDGE_COUNT (iter_part_bb
->succs
) == 2);
4433 gcc_assert (BRANCH_EDGE (iter_part_bb
)->dest
4434 == FALLTHRU_EDGE (cont_bb
)->dest
);
4435 seq_start_bb
= split_edge (FALLTHRU_EDGE (iter_part_bb
));
4436 body_bb
= single_succ (seq_start_bb
);
4437 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
4438 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4439 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
4440 trip_update_bb
= split_edge (FALLTHRU_EDGE (cont_bb
));
4441 exit_bb
= region
->exit
;
4443 /* Trip and adjustment setup goes in ENTRY_BB. */
4444 si
= gsi_last_bb (entry_bb
);
4445 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_FOR
);
4447 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
), 0);
4448 t
= fold_convert (itype
, t
);
4449 nthreads
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4450 true, GSI_SAME_STMT
);
4452 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
), 0);
4453 t
= fold_convert (itype
, t
);
4454 threadid
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4455 true, GSI_SAME_STMT
);
4458 = force_gimple_operand_gsi (&si
, fold_convert (type
, fd
->loop
.n1
),
4459 true, NULL_TREE
, true, GSI_SAME_STMT
);
4461 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->loop
.n2
),
4462 true, NULL_TREE
, true, GSI_SAME_STMT
);
4464 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->loop
.step
),
4465 true, NULL_TREE
, true, GSI_SAME_STMT
);
4467 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->chunk_size
),
4468 true, NULL_TREE
, true, GSI_SAME_STMT
);
4470 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
4471 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loop
.step
, t
);
4472 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fd
->loop
.n2
);
4473 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, fd
->loop
.n1
));
4474 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
4475 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
4476 fold_build1 (NEGATE_EXPR
, itype
, t
),
4477 fold_build1 (NEGATE_EXPR
, itype
, fd
->loop
.step
));
4479 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, fd
->loop
.step
);
4480 t
= fold_convert (itype
, t
);
4481 n
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4482 true, GSI_SAME_STMT
);
4484 trip_var
= create_tmp_reg (itype
, ".trip");
4485 if (gimple_in_ssa_p (cfun
))
4487 trip_init
= make_ssa_name (trip_var
, NULL
);
4488 trip_main
= make_ssa_name (trip_var
, NULL
);
4489 trip_back
= make_ssa_name (trip_var
, NULL
);
4493 trip_init
= trip_var
;
4494 trip_main
= trip_var
;
4495 trip_back
= trip_var
;
4498 stmt
= gimple_build_assign (trip_init
, build_int_cst (itype
, 0));
4499 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
4501 t
= fold_build2 (MULT_EXPR
, itype
, threadid
, fd
->chunk_size
);
4502 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4503 if (POINTER_TYPE_P (type
))
4504 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4506 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4507 v_extra
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4508 true, GSI_SAME_STMT
);
4510 /* Remove the GIMPLE_OMP_FOR. */
4511 gsi_remove (&si
, true);
4513 /* Iteration space partitioning goes in ITER_PART_BB. */
4514 si
= gsi_last_bb (iter_part_bb
);
4516 t
= fold_build2 (MULT_EXPR
, itype
, trip_main
, nthreads
);
4517 t
= fold_build2 (PLUS_EXPR
, itype
, t
, threadid
);
4518 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->chunk_size
);
4519 s0
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4520 false, GSI_CONTINUE_LINKING
);
4522 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, fd
->chunk_size
);
4523 t
= fold_build2 (MIN_EXPR
, itype
, t
, n
);
4524 e0
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4525 false, GSI_CONTINUE_LINKING
);
4527 t
= build2 (LT_EXPR
, boolean_type_node
, s0
, n
);
4528 gsi_insert_after (&si
, gimple_build_cond_empty (t
), GSI_CONTINUE_LINKING
);
4530 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4531 si
= gsi_start_bb (seq_start_bb
);
4533 t
= fold_convert (itype
, s0
);
4534 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4535 if (POINTER_TYPE_P (type
))
4536 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4538 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4539 t
= force_gimple_operand_gsi (&si
, t
,
4541 && TREE_ADDRESSABLE (fd
->loop
.v
),
4542 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
4543 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
4544 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4546 t
= fold_convert (itype
, e0
);
4547 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4548 if (POINTER_TYPE_P (type
))
4549 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4551 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4552 e
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4553 false, GSI_CONTINUE_LINKING
);
4555 /* The code controlling the sequential loop goes in CONT_BB,
4556 replacing the GIMPLE_OMP_CONTINUE. */
4557 si
= gsi_last_bb (cont_bb
);
4558 stmt
= gsi_stmt (si
);
4559 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4560 v_main
= gimple_omp_continue_control_use (stmt
);
4561 v_back
= gimple_omp_continue_control_def (stmt
);
4563 if (POINTER_TYPE_P (type
))
4564 t
= fold_build_pointer_plus (v_main
, fd
->loop
.step
);
4566 t
= fold_build2 (PLUS_EXPR
, type
, v_main
, fd
->loop
.step
);
4567 if (DECL_P (v_back
) && TREE_ADDRESSABLE (v_back
))
4568 t
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4569 true, GSI_SAME_STMT
);
4570 stmt
= gimple_build_assign (v_back
, t
);
4571 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
4573 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
4574 DECL_P (v_back
) && TREE_ADDRESSABLE (v_back
)
4576 gsi_insert_before (&si
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4578 /* Remove GIMPLE_OMP_CONTINUE. */
4579 gsi_remove (&si
, true);
4581 /* Trip update code goes into TRIP_UPDATE_BB. */
4582 si
= gsi_start_bb (trip_update_bb
);
4584 t
= build_int_cst (itype
, 1);
4585 t
= build2 (PLUS_EXPR
, itype
, trip_main
, t
);
4586 stmt
= gimple_build_assign (trip_back
, t
);
4587 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4589 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4590 si
= gsi_last_bb (exit_bb
);
4591 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)))
4592 force_gimple_operand_gsi (&si
, build_omp_barrier (), false, NULL_TREE
,
4593 false, GSI_SAME_STMT
);
4594 gsi_remove (&si
, true);
4596 /* Connect the new blocks. */
4597 find_edge (iter_part_bb
, seq_start_bb
)->flags
= EDGE_TRUE_VALUE
;
4598 find_edge (iter_part_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
4600 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
4601 find_edge (cont_bb
, trip_update_bb
)->flags
= EDGE_FALSE_VALUE
;
4603 redirect_edge_and_branch (single_succ_edge (trip_update_bb
), iter_part_bb
);
4605 if (gimple_in_ssa_p (cfun
))
4607 gimple_stmt_iterator psi
;
4610 edge_var_map_vector
*head
;
4614 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4615 remove arguments of the phi nodes in fin_bb. We need to create
4616 appropriate phi nodes in iter_part_bb instead. */
4617 se
= single_pred_edge (fin_bb
);
4618 re
= single_succ_edge (trip_update_bb
);
4619 head
= redirect_edge_var_map_vector (re
);
4620 ene
= single_succ_edge (entry_bb
);
4622 psi
= gsi_start_phis (fin_bb
);
4623 for (i
= 0; !gsi_end_p (psi
) && head
->iterate (i
, &vm
);
4624 gsi_next (&psi
), ++i
)
4627 source_location locus
;
4629 phi
= gsi_stmt (psi
);
4630 t
= gimple_phi_result (phi
);
4631 gcc_assert (t
== redirect_edge_var_map_result (vm
));
4632 nphi
= create_phi_node (t
, iter_part_bb
);
4634 t
= PHI_ARG_DEF_FROM_EDGE (phi
, se
);
4635 locus
= gimple_phi_arg_location_from_edge (phi
, se
);
4637 /* A special case -- fd->loop.v is not yet computed in
4638 iter_part_bb, we need to use v_extra instead. */
4639 if (t
== fd
->loop
.v
)
4641 add_phi_arg (nphi
, t
, ene
, locus
);
4642 locus
= redirect_edge_var_map_location (vm
);
4643 add_phi_arg (nphi
, redirect_edge_var_map_def (vm
), re
, locus
);
4645 gcc_assert (!gsi_end_p (psi
) && i
== head
->length ());
4646 redirect_edge_var_map_clear (re
);
4649 psi
= gsi_start_phis (fin_bb
);
4650 if (gsi_end_p (psi
))
4652 remove_phi_node (&psi
, false);
4655 /* Make phi node for trip. */
4656 phi
= create_phi_node (trip_main
, iter_part_bb
);
4657 add_phi_arg (phi
, trip_back
, single_succ_edge (trip_update_bb
),
4659 add_phi_arg (phi
, trip_init
, single_succ_edge (entry_bb
),
4663 set_immediate_dominator (CDI_DOMINATORS
, trip_update_bb
, cont_bb
);
4664 set_immediate_dominator (CDI_DOMINATORS
, iter_part_bb
,
4665 recompute_dominator (CDI_DOMINATORS
, iter_part_bb
));
4666 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
4667 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
4668 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
,
4669 recompute_dominator (CDI_DOMINATORS
, seq_start_bb
));
4670 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
4671 recompute_dominator (CDI_DOMINATORS
, body_bb
));
4675 /* Expand the OpenMP loop defined by REGION. */
4678 expand_omp_for (struct omp_region
*region
)
4680 struct omp_for_data fd
;
4681 struct omp_for_data_loop
*loops
;
4684 = (struct omp_for_data_loop
*)
4685 alloca (gimple_omp_for_collapse (last_stmt (region
->entry
))
4686 * sizeof (struct omp_for_data_loop
));
4687 extract_omp_for_data (last_stmt (region
->entry
), &fd
, loops
);
4688 region
->sched_kind
= fd
.sched_kind
;
4690 gcc_assert (EDGE_COUNT (region
->entry
->succs
) == 2);
4691 BRANCH_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
4692 FALLTHRU_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
4695 gcc_assert (EDGE_COUNT (region
->cont
->succs
) == 2);
4696 BRANCH_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
4697 FALLTHRU_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
4700 if (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
4703 && region
->cont
!= NULL
)
4705 if (fd
.chunk_size
== NULL
)
4706 expand_omp_for_static_nochunk (region
, &fd
);
4708 expand_omp_for_static_chunk (region
, &fd
);
4712 int fn_index
, start_ix
, next_ix
;
4714 if (fd
.chunk_size
== NULL
4715 && fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
4716 fd
.chunk_size
= integer_zero_node
;
4717 gcc_assert (fd
.sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
4718 fn_index
= (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
4719 ? 3 : fd
.sched_kind
;
4720 fn_index
+= fd
.have_ordered
* 4;
4721 start_ix
= ((int)BUILT_IN_GOMP_LOOP_STATIC_START
) + fn_index
;
4722 next_ix
= ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT
) + fn_index
;
4723 if (fd
.iter_type
== long_long_unsigned_type_node
)
4725 start_ix
+= ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4726 - (int)BUILT_IN_GOMP_LOOP_STATIC_START
);
4727 next_ix
+= ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4728 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT
);
4730 expand_omp_for_generic (region
, &fd
, (enum built_in_function
) start_ix
,
4731 (enum built_in_function
) next_ix
);
4734 if (gimple_in_ssa_p (cfun
))
4735 update_ssa (TODO_update_ssa_only_virtuals
);
4739 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4741 v = GOMP_sections_start (n);
4758 v = GOMP_sections_next ();
4763 If this is a combined parallel sections, replace the call to
4764 GOMP_sections_start with call to GOMP_sections_next. */
4767 expand_omp_sections (struct omp_region
*region
)
4769 tree t
, u
, vin
= NULL
, vmain
, vnext
, l2
;
4770 vec
<tree
> label_vec
;
4772 basic_block entry_bb
, l0_bb
, l1_bb
, l2_bb
, default_bb
;
4773 gimple_stmt_iterator si
, switch_si
;
4774 gimple sections_stmt
, stmt
, cont
;
4777 struct omp_region
*inner
;
4779 bool exit_reachable
= region
->cont
!= NULL
;
4781 gcc_assert (region
->exit
!= NULL
);
4782 entry_bb
= region
->entry
;
4783 l0_bb
= single_succ (entry_bb
);
4784 l1_bb
= region
->cont
;
4785 l2_bb
= region
->exit
;
4786 if (single_pred_p (l2_bb
) && single_pred (l2_bb
) == l0_bb
)
4787 l2
= gimple_block_label (l2_bb
);
4790 /* This can happen if there are reductions. */
4791 len
= EDGE_COUNT (l0_bb
->succs
);
4792 gcc_assert (len
> 0);
4793 e
= EDGE_SUCC (l0_bb
, len
- 1);
4794 si
= gsi_last_bb (e
->dest
);
4797 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
4798 l2
= gimple_block_label (e
->dest
);
4800 FOR_EACH_EDGE (e
, ei
, l0_bb
->succs
)
4802 si
= gsi_last_bb (e
->dest
);
4804 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
4806 l2
= gimple_block_label (e
->dest
);
4812 default_bb
= create_empty_bb (l1_bb
->prev_bb
);
4814 default_bb
= create_empty_bb (l0_bb
);
4816 /* We will build a switch() with enough cases for all the
4817 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4818 and a default case to abort if something goes wrong. */
4819 len
= EDGE_COUNT (l0_bb
->succs
);
4821 /* Use vec::quick_push on label_vec throughout, since we know the size
4823 label_vec
.create (len
);
4825 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4826 GIMPLE_OMP_SECTIONS statement. */
4827 si
= gsi_last_bb (entry_bb
);
4828 sections_stmt
= gsi_stmt (si
);
4829 gcc_assert (gimple_code (sections_stmt
) == GIMPLE_OMP_SECTIONS
);
4830 vin
= gimple_omp_sections_control (sections_stmt
);
4831 if (!is_combined_parallel (region
))
4833 /* If we are not inside a combined parallel+sections region,
4834 call GOMP_sections_start. */
4835 t
= build_int_cst (unsigned_type_node
,
4836 exit_reachable
? len
- 1 : len
);
4837 u
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START
);
4838 stmt
= gimple_build_call (u
, 1, t
);
4842 /* Otherwise, call GOMP_sections_next. */
4843 u
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT
);
4844 stmt
= gimple_build_call (u
, 0);
4846 gimple_call_set_lhs (stmt
, vin
);
4847 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4848 gsi_remove (&si
, true);
4850 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4852 switch_si
= gsi_last_bb (l0_bb
);
4853 gcc_assert (gimple_code (gsi_stmt (switch_si
)) == GIMPLE_OMP_SECTIONS_SWITCH
);
4856 cont
= last_stmt (l1_bb
);
4857 gcc_assert (gimple_code (cont
) == GIMPLE_OMP_CONTINUE
);
4858 vmain
= gimple_omp_continue_control_use (cont
);
4859 vnext
= gimple_omp_continue_control_def (cont
);
4867 t
= build_case_label (build_int_cst (unsigned_type_node
, 0), NULL
, l2
);
4868 label_vec
.quick_push (t
);
4871 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4872 for (inner
= region
->inner
, casei
= 1;
4874 inner
= inner
->next
, i
++, casei
++)
4876 basic_block s_entry_bb
, s_exit_bb
;
4878 /* Skip optional reduction region. */
4879 if (inner
->type
== GIMPLE_OMP_ATOMIC_LOAD
)
4886 s_entry_bb
= inner
->entry
;
4887 s_exit_bb
= inner
->exit
;
4889 t
= gimple_block_label (s_entry_bb
);
4890 u
= build_int_cst (unsigned_type_node
, casei
);
4891 u
= build_case_label (u
, NULL
, t
);
4892 label_vec
.quick_push (u
);
4894 si
= gsi_last_bb (s_entry_bb
);
4895 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SECTION
);
4896 gcc_assert (i
< len
|| gimple_omp_section_last_p (gsi_stmt (si
)));
4897 gsi_remove (&si
, true);
4898 single_succ_edge (s_entry_bb
)->flags
= EDGE_FALLTHRU
;
4900 if (s_exit_bb
== NULL
)
4903 si
= gsi_last_bb (s_exit_bb
);
4904 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
4905 gsi_remove (&si
, true);
4907 single_succ_edge (s_exit_bb
)->flags
= EDGE_FALLTHRU
;
4910 /* Error handling code goes in DEFAULT_BB. */
4911 t
= gimple_block_label (default_bb
);
4912 u
= build_case_label (NULL
, NULL
, t
);
4913 make_edge (l0_bb
, default_bb
, 0);
4915 add_bb_to_loop (default_bb
, l0_bb
->loop_father
);
4917 stmt
= gimple_build_switch (vmain
, u
, label_vec
);
4918 gsi_insert_after (&switch_si
, stmt
, GSI_SAME_STMT
);
4919 gsi_remove (&switch_si
, true);
4920 label_vec
.release ();
4922 si
= gsi_start_bb (default_bb
);
4923 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP
), 0);
4924 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4930 /* Code to get the next section goes in L1_BB. */
4931 si
= gsi_last_bb (l1_bb
);
4932 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CONTINUE
);
4934 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT
);
4935 stmt
= gimple_build_call (bfn_decl
, 0);
4936 gimple_call_set_lhs (stmt
, vnext
);
4937 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4938 gsi_remove (&si
, true);
4940 single_succ_edge (l1_bb
)->flags
= EDGE_FALLTHRU
;
4943 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4944 si
= gsi_last_bb (l2_bb
);
4945 if (gimple_omp_return_nowait_p (gsi_stmt (si
)))
4946 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT
);
4948 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END
);
4949 stmt
= gimple_build_call (t
, 0);
4950 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4951 gsi_remove (&si
, true);
4953 set_immediate_dominator (CDI_DOMINATORS
, default_bb
, l0_bb
);
4957 /* Expand code for an OpenMP single directive. We've already expanded
4958 much of the code, here we simply place the GOMP_barrier call. */
4961 expand_omp_single (struct omp_region
*region
)
4963 basic_block entry_bb
, exit_bb
;
4964 gimple_stmt_iterator si
;
4965 bool need_barrier
= false;
4967 entry_bb
= region
->entry
;
4968 exit_bb
= region
->exit
;
4970 si
= gsi_last_bb (entry_bb
);
4971 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4972 be removed. We need to ensure that the thread that entered the single
4973 does not exit before the data is copied out by the other threads. */
4974 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si
)),
4975 OMP_CLAUSE_COPYPRIVATE
))
4976 need_barrier
= true;
4977 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
);
4978 gsi_remove (&si
, true);
4979 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
4981 si
= gsi_last_bb (exit_bb
);
4982 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)) || need_barrier
)
4983 force_gimple_operand_gsi (&si
, build_omp_barrier (), false, NULL_TREE
,
4984 false, GSI_SAME_STMT
);
4985 gsi_remove (&si
, true);
4986 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
4990 /* Generic expansion for OpenMP synchronization directives: master,
4991 ordered and critical. All we need to do here is remove the entry
4992 and exit markers for REGION. */
4995 expand_omp_synch (struct omp_region
*region
)
4997 basic_block entry_bb
, exit_bb
;
4998 gimple_stmt_iterator si
;
5000 entry_bb
= region
->entry
;
5001 exit_bb
= region
->exit
;
5003 si
= gsi_last_bb (entry_bb
);
5004 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
5005 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_MASTER
5006 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ORDERED
5007 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CRITICAL
);
5008 gsi_remove (&si
, true);
5009 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
5013 si
= gsi_last_bb (exit_bb
);
5014 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
5015 gsi_remove (&si
, true);
5016 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
5020 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5021 operation as a normal volatile load. */
5024 expand_omp_atomic_load (basic_block load_bb
, tree addr
,
5025 tree loaded_val
, int index
)
5027 enum built_in_function tmpbase
;
5028 gimple_stmt_iterator gsi
;
5029 basic_block store_bb
;
5032 tree decl
, call
, type
, itype
;
5034 gsi
= gsi_last_bb (load_bb
);
5035 stmt
= gsi_stmt (gsi
);
5036 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_LOAD
);
5037 loc
= gimple_location (stmt
);
5039 /* ??? If the target does not implement atomic_load_optab[mode], and mode
5040 is smaller than word size, then expand_atomic_load assumes that the load
5041 is atomic. We could avoid the builtin entirely in this case. */
5043 tmpbase
= (enum built_in_function
) (BUILT_IN_ATOMIC_LOAD_N
+ index
+ 1);
5044 decl
= builtin_decl_explicit (tmpbase
);
5045 if (decl
== NULL_TREE
)
5048 type
= TREE_TYPE (loaded_val
);
5049 itype
= TREE_TYPE (TREE_TYPE (decl
));
5051 call
= build_call_expr_loc (loc
, decl
, 2, addr
,
5052 build_int_cst (NULL
, MEMMODEL_RELAXED
));
5053 if (!useless_type_conversion_p (type
, itype
))
5054 call
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, type
, call
);
5055 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, loaded_val
, call
);
5057 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5058 gsi_remove (&gsi
, true);
5060 store_bb
= single_succ (load_bb
);
5061 gsi
= gsi_last_bb (store_bb
);
5062 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
5063 gsi_remove (&gsi
, true);
5065 if (gimple_in_ssa_p (cfun
))
5066 update_ssa (TODO_update_ssa_no_phi
);
5071 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5072 operation as a normal volatile store. */
5075 expand_omp_atomic_store (basic_block load_bb
, tree addr
,
5076 tree loaded_val
, tree stored_val
, int index
)
5078 enum built_in_function tmpbase
;
5079 gimple_stmt_iterator gsi
;
5080 basic_block store_bb
= single_succ (load_bb
);
5083 tree decl
, call
, type
, itype
;
5084 enum machine_mode imode
;
5087 gsi
= gsi_last_bb (load_bb
);
5088 stmt
= gsi_stmt (gsi
);
5089 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_LOAD
);
5091 /* If the load value is needed, then this isn't a store but an exchange. */
5092 exchange
= gimple_omp_atomic_need_value_p (stmt
);
5094 gsi
= gsi_last_bb (store_bb
);
5095 stmt
= gsi_stmt (gsi
);
5096 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_STORE
);
5097 loc
= gimple_location (stmt
);
5099 /* ??? If the target does not implement atomic_store_optab[mode], and mode
5100 is smaller than word size, then expand_atomic_store assumes that the store
5101 is atomic. We could avoid the builtin entirely in this case. */
5103 tmpbase
= (exchange
? BUILT_IN_ATOMIC_EXCHANGE_N
: BUILT_IN_ATOMIC_STORE_N
);
5104 tmpbase
= (enum built_in_function
) ((int) tmpbase
+ index
+ 1);
5105 decl
= builtin_decl_explicit (tmpbase
);
5106 if (decl
== NULL_TREE
)
5109 type
= TREE_TYPE (stored_val
);
5111 /* Dig out the type of the function's second argument. */
5112 itype
= TREE_TYPE (decl
);
5113 itype
= TYPE_ARG_TYPES (itype
);
5114 itype
= TREE_CHAIN (itype
);
5115 itype
= TREE_VALUE (itype
);
5116 imode
= TYPE_MODE (itype
);
5118 if (exchange
&& !can_atomic_exchange_p (imode
, true))
5121 if (!useless_type_conversion_p (itype
, type
))
5122 stored_val
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, itype
, stored_val
);
5123 call
= build_call_expr_loc (loc
, decl
, 3, addr
, stored_val
,
5124 build_int_cst (NULL
, MEMMODEL_RELAXED
));
5127 if (!useless_type_conversion_p (type
, itype
))
5128 call
= build1_loc (loc
, VIEW_CONVERT_EXPR
, type
, call
);
5129 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, loaded_val
, call
);
5132 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5133 gsi_remove (&gsi
, true);
5135 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
5136 gsi
= gsi_last_bb (load_bb
);
5137 gsi_remove (&gsi
, true);
5139 if (gimple_in_ssa_p (cfun
))
5140 update_ssa (TODO_update_ssa_no_phi
);
5145 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5146 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
5147 size of the data type, and thus usable to find the index of the builtin
5148 decl. Returns false if the expression is not of the proper form. */
5151 expand_omp_atomic_fetch_op (basic_block load_bb
,
5152 tree addr
, tree loaded_val
,
5153 tree stored_val
, int index
)
5155 enum built_in_function oldbase
, newbase
, tmpbase
;
5156 tree decl
, itype
, call
;
5158 basic_block store_bb
= single_succ (load_bb
);
5159 gimple_stmt_iterator gsi
;
5162 enum tree_code code
;
5163 bool need_old
, need_new
;
5164 enum machine_mode imode
;
5166 /* We expect to find the following sequences:
5169 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
5172 val = tmp OP something; (or: something OP tmp)
5173 GIMPLE_OMP_STORE (val)
5175 ???FIXME: Allow a more flexible sequence.
5176 Perhaps use data flow to pick the statements.
5180 gsi
= gsi_after_labels (store_bb
);
5181 stmt
= gsi_stmt (gsi
);
5182 loc
= gimple_location (stmt
);
5183 if (!is_gimple_assign (stmt
))
5186 if (gimple_code (gsi_stmt (gsi
)) != GIMPLE_OMP_ATOMIC_STORE
)
5188 need_new
= gimple_omp_atomic_need_value_p (gsi_stmt (gsi
));
5189 need_old
= gimple_omp_atomic_need_value_p (last_stmt (load_bb
));
5190 gcc_checking_assert (!need_old
|| !need_new
);
5192 if (!operand_equal_p (gimple_assign_lhs (stmt
), stored_val
, 0))
5195 /* Check for one of the supported fetch-op operations. */
5196 code
= gimple_assign_rhs_code (stmt
);
5200 case POINTER_PLUS_EXPR
:
5201 oldbase
= BUILT_IN_ATOMIC_FETCH_ADD_N
;
5202 newbase
= BUILT_IN_ATOMIC_ADD_FETCH_N
;
5205 oldbase
= BUILT_IN_ATOMIC_FETCH_SUB_N
;
5206 newbase
= BUILT_IN_ATOMIC_SUB_FETCH_N
;
5209 oldbase
= BUILT_IN_ATOMIC_FETCH_AND_N
;
5210 newbase
= BUILT_IN_ATOMIC_AND_FETCH_N
;
5213 oldbase
= BUILT_IN_ATOMIC_FETCH_OR_N
;
5214 newbase
= BUILT_IN_ATOMIC_OR_FETCH_N
;
5217 oldbase
= BUILT_IN_ATOMIC_FETCH_XOR_N
;
5218 newbase
= BUILT_IN_ATOMIC_XOR_FETCH_N
;
5224 /* Make sure the expression is of the proper form. */
5225 if (operand_equal_p (gimple_assign_rhs1 (stmt
), loaded_val
, 0))
5226 rhs
= gimple_assign_rhs2 (stmt
);
5227 else if (commutative_tree_code (gimple_assign_rhs_code (stmt
))
5228 && operand_equal_p (gimple_assign_rhs2 (stmt
), loaded_val
, 0))
5229 rhs
= gimple_assign_rhs1 (stmt
);
5233 tmpbase
= ((enum built_in_function
)
5234 ((need_new
? newbase
: oldbase
) + index
+ 1));
5235 decl
= builtin_decl_explicit (tmpbase
);
5236 if (decl
== NULL_TREE
)
5238 itype
= TREE_TYPE (TREE_TYPE (decl
));
5239 imode
= TYPE_MODE (itype
);
5241 /* We could test all of the various optabs involved, but the fact of the
5242 matter is that (with the exception of i486 vs i586 and xadd) all targets
5243 that support any atomic operaton optab also implements compare-and-swap.
5244 Let optabs.c take care of expanding any compare-and-swap loop. */
5245 if (!can_compare_and_swap_p (imode
, true))
5248 gsi
= gsi_last_bb (load_bb
);
5249 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5251 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5252 It only requires that the operation happen atomically. Thus we can
5253 use the RELAXED memory model. */
5254 call
= build_call_expr_loc (loc
, decl
, 3, addr
,
5255 fold_convert_loc (loc
, itype
, rhs
),
5256 build_int_cst (NULL
, MEMMODEL_RELAXED
));
5258 if (need_old
|| need_new
)
5260 lhs
= need_old
? loaded_val
: stored_val
;
5261 call
= fold_convert_loc (loc
, TREE_TYPE (lhs
), call
);
5262 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, lhs
, call
);
5265 call
= fold_convert_loc (loc
, void_type_node
, call
);
5266 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5267 gsi_remove (&gsi
, true);
5269 gsi
= gsi_last_bb (store_bb
);
5270 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
5271 gsi_remove (&gsi
, true);
5272 gsi
= gsi_last_bb (store_bb
);
5273 gsi_remove (&gsi
, true);
5275 if (gimple_in_ssa_p (cfun
))
5276 update_ssa (TODO_update_ssa_no_phi
);
5281 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5285 newval = rhs; // with oldval replacing *addr in rhs
5286 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5287 if (oldval != newval)
5290 INDEX is log2 of the size of the data type, and thus usable to find the
5291 index of the builtin decl. */
5294 expand_omp_atomic_pipeline (basic_block load_bb
, basic_block store_bb
,
5295 tree addr
, tree loaded_val
, tree stored_val
,
5298 tree loadedi
, storedi
, initial
, new_storedi
, old_vali
;
5299 tree type
, itype
, cmpxchg
, iaddr
;
5300 gimple_stmt_iterator si
;
5301 basic_block loop_header
= single_succ (load_bb
);
5304 enum built_in_function fncode
;
5306 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5307 order to use the RELAXED memory model effectively. */
5308 fncode
= (enum built_in_function
)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5310 cmpxchg
= builtin_decl_explicit (fncode
);
5311 if (cmpxchg
== NULL_TREE
)
5313 type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
5314 itype
= TREE_TYPE (TREE_TYPE (cmpxchg
));
5316 if (!can_compare_and_swap_p (TYPE_MODE (itype
), true))
5319 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5320 si
= gsi_last_bb (load_bb
);
5321 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5323 /* For floating-point values, we'll need to view-convert them to integers
5324 so that we can perform the atomic compare and swap. Simplify the
5325 following code by always setting up the "i"ntegral variables. */
5326 if (!INTEGRAL_TYPE_P (type
) && !POINTER_TYPE_P (type
))
5330 iaddr
= create_tmp_reg (build_pointer_type_for_mode (itype
, ptr_mode
,
5333 = force_gimple_operand_gsi (&si
,
5334 fold_convert (TREE_TYPE (iaddr
), addr
),
5335 false, NULL_TREE
, true, GSI_SAME_STMT
);
5336 stmt
= gimple_build_assign (iaddr
, iaddr_val
);
5337 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5338 loadedi
= create_tmp_var (itype
, NULL
);
5339 if (gimple_in_ssa_p (cfun
))
5340 loadedi
= make_ssa_name (loadedi
, NULL
);
5345 loadedi
= loaded_val
;
5349 = force_gimple_operand_gsi (&si
,
5350 build2 (MEM_REF
, TREE_TYPE (TREE_TYPE (iaddr
)),
5352 build_int_cst (TREE_TYPE (iaddr
), 0)),
5353 true, NULL_TREE
, true, GSI_SAME_STMT
);
5355 /* Move the value to the LOADEDI temporary. */
5356 if (gimple_in_ssa_p (cfun
))
5358 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header
)));
5359 phi
= create_phi_node (loadedi
, loop_header
);
5360 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, single_succ_edge (load_bb
)),
5364 gsi_insert_before (&si
,
5365 gimple_build_assign (loadedi
, initial
),
5367 if (loadedi
!= loaded_val
)
5369 gimple_stmt_iterator gsi2
;
5372 x
= build1 (VIEW_CONVERT_EXPR
, type
, loadedi
);
5373 gsi2
= gsi_start_bb (loop_header
);
5374 if (gimple_in_ssa_p (cfun
))
5377 x
= force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
5378 true, GSI_SAME_STMT
);
5379 stmt
= gimple_build_assign (loaded_val
, x
);
5380 gsi_insert_before (&gsi2
, stmt
, GSI_SAME_STMT
);
5384 x
= build2 (MODIFY_EXPR
, TREE_TYPE (loaded_val
), loaded_val
, x
);
5385 force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
5386 true, GSI_SAME_STMT
);
5389 gsi_remove (&si
, true);
5391 si
= gsi_last_bb (store_bb
);
5392 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
5395 storedi
= stored_val
;
5398 force_gimple_operand_gsi (&si
,
5399 build1 (VIEW_CONVERT_EXPR
, itype
,
5400 stored_val
), true, NULL_TREE
, true,
5403 /* Build the compare&swap statement. */
5404 new_storedi
= build_call_expr (cmpxchg
, 3, iaddr
, loadedi
, storedi
);
5405 new_storedi
= force_gimple_operand_gsi (&si
,
5406 fold_convert (TREE_TYPE (loadedi
),
5409 true, GSI_SAME_STMT
);
5411 if (gimple_in_ssa_p (cfun
))
5415 old_vali
= create_tmp_var (TREE_TYPE (loadedi
), NULL
);
5416 stmt
= gimple_build_assign (old_vali
, loadedi
);
5417 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5419 stmt
= gimple_build_assign (loadedi
, new_storedi
);
5420 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5423 /* Note that we always perform the comparison as an integer, even for
5424 floating point. This allows the atomic operation to properly
5425 succeed even with NaNs and -0.0. */
5426 stmt
= gimple_build_cond_empty
5427 (build2 (NE_EXPR
, boolean_type_node
,
5428 new_storedi
, old_vali
));
5429 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5432 e
= single_succ_edge (store_bb
);
5433 e
->flags
&= ~EDGE_FALLTHRU
;
5434 e
->flags
|= EDGE_FALSE_VALUE
;
5436 e
= make_edge (store_bb
, loop_header
, EDGE_TRUE_VALUE
);
5438 /* Copy the new value to loadedi (we already did that before the condition
5439 if we are not in SSA). */
5440 if (gimple_in_ssa_p (cfun
))
5442 phi
= gimple_seq_first_stmt (phi_nodes (loop_header
));
5443 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
), new_storedi
);
5446 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5447 gsi_remove (&si
, true);
5449 if (gimple_in_ssa_p (cfun
))
5450 update_ssa (TODO_update_ssa_no_phi
);
5452 /* ??? The above could use loop construction primitives. */
5454 loops_state_set (LOOPS_NEED_FIXUP
);
5459 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5461 GOMP_atomic_start ();
5465 The result is not globally atomic, but works so long as all parallel
5466 references are within #pragma omp atomic directives. According to
5467 responses received from omp@openmp.org, appears to be within spec.
5468 Which makes sense, since that's how several other compilers handle
5469 this situation as well.
5470 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5471 expanding. STORED_VAL is the operand of the matching
5472 GIMPLE_OMP_ATOMIC_STORE.
5475 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5479 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
5484 expand_omp_atomic_mutex (basic_block load_bb
, basic_block store_bb
,
5485 tree addr
, tree loaded_val
, tree stored_val
)
5487 gimple_stmt_iterator si
;
5491 si
= gsi_last_bb (load_bb
);
5492 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5494 t
= builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START
);
5495 t
= build_call_expr (t
, 0);
5496 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5498 stmt
= gimple_build_assign (loaded_val
, build_simple_mem_ref (addr
));
5499 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5500 gsi_remove (&si
, true);
5502 si
= gsi_last_bb (store_bb
);
5503 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
5505 stmt
= gimple_build_assign (build_simple_mem_ref (unshare_expr (addr
)),
5507 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5509 t
= builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END
);
5510 t
= build_call_expr (t
, 0);
5511 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5512 gsi_remove (&si
, true);
5514 if (gimple_in_ssa_p (cfun
))
5515 update_ssa (TODO_update_ssa_no_phi
);
5519 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5520 using expand_omp_atomic_fetch_op. If it failed, we try to
5521 call expand_omp_atomic_pipeline, and if it fails too, the
5522 ultimate fallback is wrapping the operation in a mutex
5523 (expand_omp_atomic_mutex). REGION is the atomic region built
5524 by build_omp_regions_1(). */
5527 expand_omp_atomic (struct omp_region
*region
)
5529 basic_block load_bb
= region
->entry
, store_bb
= region
->exit
;
5530 gimple load
= last_stmt (load_bb
), store
= last_stmt (store_bb
);
5531 tree loaded_val
= gimple_omp_atomic_load_lhs (load
);
5532 tree addr
= gimple_omp_atomic_load_rhs (load
);
5533 tree stored_val
= gimple_omp_atomic_store_val (store
);
5534 tree type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
5535 HOST_WIDE_INT index
;
5537 /* Make sure the type is one of the supported sizes. */
5538 index
= tree_low_cst (TYPE_SIZE_UNIT (type
), 1);
5539 index
= exact_log2 (index
);
5540 if (index
>= 0 && index
<= 4)
5542 unsigned int align
= TYPE_ALIGN_UNIT (type
);
5544 /* __sync builtins require strict data alignment. */
5545 if (exact_log2 (align
) >= index
)
5548 if (loaded_val
== stored_val
5549 && (GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_INT
5550 || GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
)
5551 && GET_MODE_BITSIZE (TYPE_MODE (type
)) <= BITS_PER_WORD
5552 && expand_omp_atomic_load (load_bb
, addr
, loaded_val
, index
))
5556 if ((GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_INT
5557 || GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
)
5558 && GET_MODE_BITSIZE (TYPE_MODE (type
)) <= BITS_PER_WORD
5559 && store_bb
== single_succ (load_bb
)
5560 && first_stmt (store_bb
) == store
5561 && expand_omp_atomic_store (load_bb
, addr
, loaded_val
,
5565 /* When possible, use specialized atomic update functions. */
5566 if ((INTEGRAL_TYPE_P (type
) || POINTER_TYPE_P (type
))
5567 && store_bb
== single_succ (load_bb
)
5568 && expand_omp_atomic_fetch_op (load_bb
, addr
,
5569 loaded_val
, stored_val
, index
))
5572 /* If we don't have specialized __sync builtins, try and implement
5573 as a compare and swap loop. */
5574 if (expand_omp_atomic_pipeline (load_bb
, store_bb
, addr
,
5575 loaded_val
, stored_val
, index
))
5580 /* The ultimate fallback is wrapping the operation in a mutex. */
5581 expand_omp_atomic_mutex (load_bb
, store_bb
, addr
, loaded_val
, stored_val
);
5585 /* Expand the parallel region tree rooted at REGION. Expansion
5586 proceeds in depth-first order. Innermost regions are expanded
5587 first. This way, parallel regions that require a new function to
5588 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5589 internal dependencies in their body. */
5592 expand_omp (struct omp_region
*region
)
5596 location_t saved_location
;
5598 /* First, determine whether this is a combined parallel+workshare
5600 if (region
->type
== GIMPLE_OMP_PARALLEL
)
5601 determine_parallel_type (region
);
5604 expand_omp (region
->inner
);
5606 saved_location
= input_location
;
5607 if (gimple_has_location (last_stmt (region
->entry
)))
5608 input_location
= gimple_location (last_stmt (region
->entry
));
5610 switch (region
->type
)
5612 case GIMPLE_OMP_PARALLEL
:
5613 case GIMPLE_OMP_TASK
:
5614 expand_omp_taskreg (region
);
5617 case GIMPLE_OMP_FOR
:
5618 expand_omp_for (region
);
5621 case GIMPLE_OMP_SECTIONS
:
5622 expand_omp_sections (region
);
5625 case GIMPLE_OMP_SECTION
:
5626 /* Individual omp sections are handled together with their
5627 parent GIMPLE_OMP_SECTIONS region. */
5630 case GIMPLE_OMP_SINGLE
:
5631 expand_omp_single (region
);
5634 case GIMPLE_OMP_MASTER
:
5635 case GIMPLE_OMP_ORDERED
:
5636 case GIMPLE_OMP_CRITICAL
:
5637 expand_omp_synch (region
);
5640 case GIMPLE_OMP_ATOMIC_LOAD
:
5641 expand_omp_atomic (region
);
5648 input_location
= saved_location
;
5649 region
= region
->next
;
5654 /* Helper for build_omp_regions. Scan the dominator tree starting at
5655 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5656 true, the function ends once a single tree is built (otherwise, whole
5657 forest of OMP constructs may be built). */
5660 build_omp_regions_1 (basic_block bb
, struct omp_region
*parent
,
5663 gimple_stmt_iterator gsi
;
5667 gsi
= gsi_last_bb (bb
);
5668 if (!gsi_end_p (gsi
) && is_gimple_omp (gsi_stmt (gsi
)))
5670 struct omp_region
*region
;
5671 enum gimple_code code
;
5673 stmt
= gsi_stmt (gsi
);
5674 code
= gimple_code (stmt
);
5675 if (code
== GIMPLE_OMP_RETURN
)
5677 /* STMT is the return point out of region PARENT. Mark it
5678 as the exit point and make PARENT the immediately
5679 enclosing region. */
5680 gcc_assert (parent
);
5683 parent
= parent
->outer
;
5685 else if (code
== GIMPLE_OMP_ATOMIC_STORE
)
5687 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5688 GIMPLE_OMP_RETURN, but matches with
5689 GIMPLE_OMP_ATOMIC_LOAD. */
5690 gcc_assert (parent
);
5691 gcc_assert (parent
->type
== GIMPLE_OMP_ATOMIC_LOAD
);
5694 parent
= parent
->outer
;
5697 else if (code
== GIMPLE_OMP_CONTINUE
)
5699 gcc_assert (parent
);
5702 else if (code
== GIMPLE_OMP_SECTIONS_SWITCH
)
5704 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5705 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5710 /* Otherwise, this directive becomes the parent for a new
5712 region
= new_omp_region (bb
, code
, parent
);
5717 if (single_tree
&& !parent
)
5720 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
5722 son
= next_dom_son (CDI_DOMINATORS
, son
))
5723 build_omp_regions_1 (son
, parent
, single_tree
);
5726 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5730 build_omp_regions_root (basic_block root
)
5732 gcc_assert (root_omp_region
== NULL
);
5733 build_omp_regions_1 (root
, NULL
, true);
5734 gcc_assert (root_omp_region
!= NULL
);
5737 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5740 omp_expand_local (basic_block head
)
5742 build_omp_regions_root (head
);
5743 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5745 fprintf (dump_file
, "\nOMP region tree\n\n");
5746 dump_omp_region (dump_file
, root_omp_region
, 0);
5747 fprintf (dump_file
, "\n");
5750 remove_exit_barriers (root_omp_region
);
5751 expand_omp (root_omp_region
);
5753 free_omp_regions ();
5756 /* Scan the CFG and build a tree of OMP regions. Return the root of
5757 the OMP region tree. */
5760 build_omp_regions (void)
5762 gcc_assert (root_omp_region
== NULL
);
5763 calculate_dominance_info (CDI_DOMINATORS
);
5764 build_omp_regions_1 (ENTRY_BLOCK_PTR
, NULL
, false);
5767 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5770 execute_expand_omp (void)
5772 build_omp_regions ();
5774 if (!root_omp_region
)
5779 fprintf (dump_file
, "\nOMP region tree\n\n");
5780 dump_omp_region (dump_file
, root_omp_region
, 0);
5781 fprintf (dump_file
, "\n");
5784 remove_exit_barriers (root_omp_region
);
5786 expand_omp (root_omp_region
);
5788 cleanup_tree_cfg ();
5790 free_omp_regions ();
5795 /* OMP expansion -- the default pass, run before creation of SSA form. */
5798 gate_expand_omp (void)
5800 return (flag_openmp
!= 0 && !seen_error ());
5803 struct gimple_opt_pass pass_expand_omp
=
5807 "ompexp", /* name */
5808 OPTGROUP_NONE
, /* optinfo_flags */
5809 gate_expand_omp
, /* gate */
5810 execute_expand_omp
, /* execute */
5813 0, /* static_pass_number */
5814 TV_NONE
, /* tv_id */
5815 PROP_gimple_any
, /* properties_required */
5816 0, /* properties_provided */
5817 0, /* properties_destroyed */
5818 0, /* todo_flags_start */
5819 0 /* todo_flags_finish */
5823 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5825 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5826 CTX is the enclosing OMP context for the current statement. */
5829 lower_omp_sections (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5831 tree block
, control
;
5832 gimple_stmt_iterator tgsi
;
5833 gimple stmt
, new_stmt
, bind
, t
;
5834 gimple_seq ilist
, dlist
, olist
, new_body
;
5835 struct gimplify_ctx gctx
;
5837 stmt
= gsi_stmt (*gsi_p
);
5839 push_gimplify_context (&gctx
);
5843 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt
),
5844 &ilist
, &dlist
, ctx
);
5846 new_body
= gimple_omp_body (stmt
);
5847 gimple_omp_set_body (stmt
, NULL
);
5848 tgsi
= gsi_start (new_body
);
5849 for (; !gsi_end_p (tgsi
); gsi_next (&tgsi
))
5854 sec_start
= gsi_stmt (tgsi
);
5855 sctx
= maybe_lookup_ctx (sec_start
);
5858 lower_omp (gimple_omp_body_ptr (sec_start
), sctx
);
5859 gsi_insert_seq_after (&tgsi
, gimple_omp_body (sec_start
),
5860 GSI_CONTINUE_LINKING
);
5861 gimple_omp_set_body (sec_start
, NULL
);
5863 if (gsi_one_before_end_p (tgsi
))
5865 gimple_seq l
= NULL
;
5866 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt
), NULL
,
5868 gsi_insert_seq_after (&tgsi
, l
, GSI_CONTINUE_LINKING
);
5869 gimple_omp_section_set_last (sec_start
);
5872 gsi_insert_after (&tgsi
, gimple_build_omp_return (false),
5873 GSI_CONTINUE_LINKING
);
5876 block
= make_node (BLOCK
);
5877 bind
= gimple_build_bind (NULL
, new_body
, block
);
5880 lower_reduction_clauses (gimple_omp_sections_clauses (stmt
), &olist
, ctx
);
5882 block
= make_node (BLOCK
);
5883 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
5884 gsi_replace (gsi_p
, new_stmt
, true);
5886 pop_gimplify_context (new_stmt
);
5887 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
5888 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
5889 if (BLOCK_VARS (block
))
5890 TREE_USED (block
) = 1;
5893 gimple_seq_add_seq (&new_body
, ilist
);
5894 gimple_seq_add_stmt (&new_body
, stmt
);
5895 gimple_seq_add_stmt (&new_body
, gimple_build_omp_sections_switch ());
5896 gimple_seq_add_stmt (&new_body
, bind
);
5898 control
= create_tmp_var (unsigned_type_node
, ".section");
5899 t
= gimple_build_omp_continue (control
, control
);
5900 gimple_omp_sections_set_control (stmt
, control
);
5901 gimple_seq_add_stmt (&new_body
, t
);
5903 gimple_seq_add_seq (&new_body
, olist
);
5904 gimple_seq_add_seq (&new_body
, dlist
);
5906 new_body
= maybe_catch_exception (new_body
);
5908 t
= gimple_build_omp_return
5909 (!!find_omp_clause (gimple_omp_sections_clauses (stmt
),
5910 OMP_CLAUSE_NOWAIT
));
5911 gimple_seq_add_stmt (&new_body
, t
);
5913 gimple_bind_set_body (new_stmt
, new_body
);
5917 /* A subroutine of lower_omp_single. Expand the simple form of
5918 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5920 if (GOMP_single_start ())
5922 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5924 FIXME. It may be better to delay expanding the logic of this until
5925 pass_expand_omp. The expanded logic may make the job more difficult
5926 to a synchronization analysis pass. */
5929 lower_omp_single_simple (gimple single_stmt
, gimple_seq
*pre_p
)
5931 location_t loc
= gimple_location (single_stmt
);
5932 tree tlabel
= create_artificial_label (loc
);
5933 tree flabel
= create_artificial_label (loc
);
5937 decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START
);
5938 lhs
= create_tmp_var (TREE_TYPE (TREE_TYPE (decl
)), NULL
);
5939 call
= gimple_build_call (decl
, 0);
5940 gimple_call_set_lhs (call
, lhs
);
5941 gimple_seq_add_stmt (pre_p
, call
);
5943 cond
= gimple_build_cond (EQ_EXPR
, lhs
,
5944 fold_convert_loc (loc
, TREE_TYPE (lhs
),
5947 gimple_seq_add_stmt (pre_p
, cond
);
5948 gimple_seq_add_stmt (pre_p
, gimple_build_label (tlabel
));
5949 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
5950 gimple_seq_add_stmt (pre_p
, gimple_build_label (flabel
));
5954 /* A subroutine of lower_omp_single. Expand the simple form of
5955 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5957 #pragma omp single copyprivate (a, b, c)
5959 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5962 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5968 GOMP_single_copy_end (©out);
5979 FIXME. It may be better to delay expanding the logic of this until
5980 pass_expand_omp. The expanded logic may make the job more difficult
5981 to a synchronization analysis pass. */
5984 lower_omp_single_copy (gimple single_stmt
, gimple_seq
*pre_p
, omp_context
*ctx
)
5986 tree ptr_type
, t
, l0
, l1
, l2
, bfn_decl
;
5987 gimple_seq copyin_seq
;
5988 location_t loc
= gimple_location (single_stmt
);
5990 ctx
->sender_decl
= create_tmp_var (ctx
->record_type
, ".omp_copy_o");
5992 ptr_type
= build_pointer_type (ctx
->record_type
);
5993 ctx
->receiver_decl
= create_tmp_var (ptr_type
, ".omp_copy_i");
5995 l0
= create_artificial_label (loc
);
5996 l1
= create_artificial_label (loc
);
5997 l2
= create_artificial_label (loc
);
5999 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START
);
6000 t
= build_call_expr_loc (loc
, bfn_decl
, 0);
6001 t
= fold_convert_loc (loc
, ptr_type
, t
);
6002 gimplify_assign (ctx
->receiver_decl
, t
, pre_p
);
6004 t
= build2 (EQ_EXPR
, boolean_type_node
, ctx
->receiver_decl
,
6005 build_int_cst (ptr_type
, 0));
6006 t
= build3 (COND_EXPR
, void_type_node
, t
,
6007 build_and_jump (&l0
), build_and_jump (&l1
));
6008 gimplify_and_add (t
, pre_p
);
6010 gimple_seq_add_stmt (pre_p
, gimple_build_label (l0
));
6012 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
6015 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt
), pre_p
,
6018 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
6019 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END
);
6020 t
= build_call_expr_loc (loc
, bfn_decl
, 1, t
);
6021 gimplify_and_add (t
, pre_p
);
6023 t
= build_and_jump (&l2
);
6024 gimplify_and_add (t
, pre_p
);
6026 gimple_seq_add_stmt (pre_p
, gimple_build_label (l1
));
6028 gimple_seq_add_seq (pre_p
, copyin_seq
);
6030 gimple_seq_add_stmt (pre_p
, gimple_build_label (l2
));
6034 /* Expand code for an OpenMP single directive. */
6037 lower_omp_single (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6040 gimple t
, bind
, single_stmt
= gsi_stmt (*gsi_p
);
6041 gimple_seq bind_body
, dlist
;
6042 struct gimplify_ctx gctx
;
6044 push_gimplify_context (&gctx
);
6046 block
= make_node (BLOCK
);
6047 bind
= gimple_build_bind (NULL
, NULL
, block
);
6048 gsi_replace (gsi_p
, bind
, true);
6051 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt
),
6052 &bind_body
, &dlist
, ctx
);
6053 lower_omp (gimple_omp_body_ptr (single_stmt
), ctx
);
6055 gimple_seq_add_stmt (&bind_body
, single_stmt
);
6057 if (ctx
->record_type
)
6058 lower_omp_single_copy (single_stmt
, &bind_body
, ctx
);
6060 lower_omp_single_simple (single_stmt
, &bind_body
);
6062 gimple_omp_set_body (single_stmt
, NULL
);
6064 gimple_seq_add_seq (&bind_body
, dlist
);
6066 bind_body
= maybe_catch_exception (bind_body
);
6068 t
= gimple_build_omp_return
6069 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt
),
6070 OMP_CLAUSE_NOWAIT
));
6071 gimple_seq_add_stmt (&bind_body
, t
);
6072 gimple_bind_set_body (bind
, bind_body
);
6074 pop_gimplify_context (bind
);
6076 gimple_bind_append_vars (bind
, ctx
->block_vars
);
6077 BLOCK_VARS (block
) = ctx
->block_vars
;
6078 if (BLOCK_VARS (block
))
6079 TREE_USED (block
) = 1;
6083 /* Expand code for an OpenMP master directive. */
6086 lower_omp_master (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6088 tree block
, lab
= NULL
, x
, bfn_decl
;
6089 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
6090 location_t loc
= gimple_location (stmt
);
6092 struct gimplify_ctx gctx
;
6094 push_gimplify_context (&gctx
);
6096 block
= make_node (BLOCK
);
6097 bind
= gimple_build_bind (NULL
, NULL
, block
);
6098 gsi_replace (gsi_p
, bind
, true);
6099 gimple_bind_add_stmt (bind
, stmt
);
6101 bfn_decl
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
6102 x
= build_call_expr_loc (loc
, bfn_decl
, 0);
6103 x
= build2 (EQ_EXPR
, boolean_type_node
, x
, integer_zero_node
);
6104 x
= build3 (COND_EXPR
, void_type_node
, x
, NULL
, build_and_jump (&lab
));
6106 gimplify_and_add (x
, &tseq
);
6107 gimple_bind_add_seq (bind
, tseq
);
6109 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
6110 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
6111 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
6112 gimple_omp_set_body (stmt
, NULL
);
6114 gimple_bind_add_stmt (bind
, gimple_build_label (lab
));
6116 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
6118 pop_gimplify_context (bind
);
6120 gimple_bind_append_vars (bind
, ctx
->block_vars
);
6121 BLOCK_VARS (block
) = ctx
->block_vars
;
6125 /* Expand code for an OpenMP ordered directive. */
6128 lower_omp_ordered (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6131 gimple stmt
= gsi_stmt (*gsi_p
), bind
, x
;
6132 struct gimplify_ctx gctx
;
6134 push_gimplify_context (&gctx
);
6136 block
= make_node (BLOCK
);
6137 bind
= gimple_build_bind (NULL
, NULL
, block
);
6138 gsi_replace (gsi_p
, bind
, true);
6139 gimple_bind_add_stmt (bind
, stmt
);
6141 x
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START
),
6143 gimple_bind_add_stmt (bind
, x
);
6145 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
6146 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
6147 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
6148 gimple_omp_set_body (stmt
, NULL
);
6150 x
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END
), 0);
6151 gimple_bind_add_stmt (bind
, x
);
6153 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
6155 pop_gimplify_context (bind
);
6157 gimple_bind_append_vars (bind
, ctx
->block_vars
);
6158 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
6162 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
6163 substitution of a couple of function calls. But in the NAMED case,
6164 requires that languages coordinate a symbol name. It is therefore
6165 best put here in common code. */
6167 static GTY((param1_is (tree
), param2_is (tree
)))
6168 splay_tree critical_name_mutexes
;
6171 lower_omp_critical (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6174 tree name
, lock
, unlock
;
6175 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
6176 location_t loc
= gimple_location (stmt
);
6178 struct gimplify_ctx gctx
;
6180 name
= gimple_omp_critical_name (stmt
);
6186 if (!critical_name_mutexes
)
6187 critical_name_mutexes
6188 = splay_tree_new_ggc (splay_tree_compare_pointers
,
6189 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s
,
6190 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s
);
6192 n
= splay_tree_lookup (critical_name_mutexes
, (splay_tree_key
) name
);
6197 decl
= create_tmp_var_raw (ptr_type_node
, NULL
);
6199 new_str
= ACONCAT ((".gomp_critical_user_",
6200 IDENTIFIER_POINTER (name
), NULL
));
6201 DECL_NAME (decl
) = get_identifier (new_str
);
6202 TREE_PUBLIC (decl
) = 1;
6203 TREE_STATIC (decl
) = 1;
6204 DECL_COMMON (decl
) = 1;
6205 DECL_ARTIFICIAL (decl
) = 1;
6206 DECL_IGNORED_P (decl
) = 1;
6207 varpool_finalize_decl (decl
);
6209 splay_tree_insert (critical_name_mutexes
, (splay_tree_key
) name
,
6210 (splay_tree_value
) decl
);
6213 decl
= (tree
) n
->value
;
6215 lock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START
);
6216 lock
= build_call_expr_loc (loc
, lock
, 1, build_fold_addr_expr_loc (loc
, decl
));
6218 unlock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END
);
6219 unlock
= build_call_expr_loc (loc
, unlock
, 1,
6220 build_fold_addr_expr_loc (loc
, decl
));
6224 lock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START
);
6225 lock
= build_call_expr_loc (loc
, lock
, 0);
6227 unlock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END
);
6228 unlock
= build_call_expr_loc (loc
, unlock
, 0);
6231 push_gimplify_context (&gctx
);
6233 block
= make_node (BLOCK
);
6234 bind
= gimple_build_bind (NULL
, NULL
, block
);
6235 gsi_replace (gsi_p
, bind
, true);
6236 gimple_bind_add_stmt (bind
, stmt
);
6238 tbody
= gimple_bind_body (bind
);
6239 gimplify_and_add (lock
, &tbody
);
6240 gimple_bind_set_body (bind
, tbody
);
6242 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
6243 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
6244 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
6245 gimple_omp_set_body (stmt
, NULL
);
6247 tbody
= gimple_bind_body (bind
);
6248 gimplify_and_add (unlock
, &tbody
);
6249 gimple_bind_set_body (bind
, tbody
);
6251 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
6253 pop_gimplify_context (bind
);
6254 gimple_bind_append_vars (bind
, ctx
->block_vars
);
6255 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
6259 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6260 for a lastprivate clause. Given a loop control predicate of (V
6261 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6262 is appended to *DLIST, iterator initialization is appended to
6266 lower_omp_for_lastprivate (struct omp_for_data
*fd
, gimple_seq
*body_p
,
6267 gimple_seq
*dlist
, struct omp_context
*ctx
)
6269 tree clauses
, cond
, vinit
;
6270 enum tree_code cond_code
;
6273 cond_code
= fd
->loop
.cond_code
;
6274 cond_code
= cond_code
== LT_EXPR
? GE_EXPR
: LE_EXPR
;
6276 /* When possible, use a strict equality expression. This can let VRP
6277 type optimizations deduce the value and remove a copy. */
6278 if (host_integerp (fd
->loop
.step
, 0))
6280 HOST_WIDE_INT step
= TREE_INT_CST_LOW (fd
->loop
.step
);
6281 if (step
== 1 || step
== -1)
6282 cond_code
= EQ_EXPR
;
6285 cond
= build2 (cond_code
, boolean_type_node
, fd
->loop
.v
, fd
->loop
.n2
);
6287 clauses
= gimple_omp_for_clauses (fd
->for_stmt
);
6289 lower_lastprivate_clauses (clauses
, cond
, &stmts
, ctx
);
6290 if (!gimple_seq_empty_p (stmts
))
6292 gimple_seq_add_seq (&stmts
, *dlist
);
6295 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6296 vinit
= fd
->loop
.n1
;
6297 if (cond_code
== EQ_EXPR
6298 && host_integerp (fd
->loop
.n2
, 0)
6299 && ! integer_zerop (fd
->loop
.n2
))
6300 vinit
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
6302 /* Initialize the iterator variable, so that threads that don't execute
6303 any iterations don't execute the lastprivate clauses by accident. */
6304 gimplify_assign (fd
->loop
.v
, vinit
, body_p
);
6309 /* Lower code for an OpenMP loop directive. */
6312 lower_omp_for (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6315 struct omp_for_data fd
;
6316 gimple stmt
= gsi_stmt (*gsi_p
), new_stmt
;
6317 gimple_seq omp_for_body
, body
, dlist
;
6319 struct gimplify_ctx gctx
;
6321 push_gimplify_context (&gctx
);
6323 lower_omp (gimple_omp_for_pre_body_ptr (stmt
), ctx
);
6324 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
6326 block
= make_node (BLOCK
);
6327 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
6328 /* Replace at gsi right away, so that 'stmt' is no member
6329 of a sequence anymore as we're going to add to to a different
6331 gsi_replace (gsi_p
, new_stmt
, true);
6333 /* Move declaration of temporaries in the loop body before we make
6335 omp_for_body
= gimple_omp_body (stmt
);
6336 if (!gimple_seq_empty_p (omp_for_body
)
6337 && gimple_code (gimple_seq_first_stmt (omp_for_body
)) == GIMPLE_BIND
)
6339 tree vars
= gimple_bind_vars (gimple_seq_first_stmt (omp_for_body
));
6340 gimple_bind_append_vars (new_stmt
, vars
);
6343 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6346 lower_rec_input_clauses (gimple_omp_for_clauses (stmt
), &body
, &dlist
, ctx
);
6347 gimple_seq_add_seq (&body
, gimple_omp_for_pre_body (stmt
));
6349 /* Lower the header expressions. At this point, we can assume that
6350 the header is of the form:
6352 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6354 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6355 using the .omp_data_s mapping, if needed. */
6356 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
6358 rhs_p
= gimple_omp_for_initial_ptr (stmt
, i
);
6359 if (!is_gimple_min_invariant (*rhs_p
))
6360 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6362 rhs_p
= gimple_omp_for_final_ptr (stmt
, i
);
6363 if (!is_gimple_min_invariant (*rhs_p
))
6364 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6366 rhs_p
= &TREE_OPERAND (gimple_omp_for_incr (stmt
, i
), 1);
6367 if (!is_gimple_min_invariant (*rhs_p
))
6368 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6371 /* Once lowered, extract the bounds and clauses. */
6372 extract_omp_for_data (stmt
, &fd
, NULL
);
6374 lower_omp_for_lastprivate (&fd
, &body
, &dlist
, ctx
);
6376 gimple_seq_add_stmt (&body
, stmt
);
6377 gimple_seq_add_seq (&body
, gimple_omp_body (stmt
));
6379 gimple_seq_add_stmt (&body
, gimple_build_omp_continue (fd
.loop
.v
,
6382 /* After the loop, add exit clauses. */
6383 lower_reduction_clauses (gimple_omp_for_clauses (stmt
), &body
, ctx
);
6384 gimple_seq_add_seq (&body
, dlist
);
6386 body
= maybe_catch_exception (body
);
6388 /* Region exit marker goes at the end of the loop body. */
6389 gimple_seq_add_stmt (&body
, gimple_build_omp_return (fd
.have_nowait
));
6391 pop_gimplify_context (new_stmt
);
6393 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
6394 BLOCK_VARS (block
) = gimple_bind_vars (new_stmt
);
6395 if (BLOCK_VARS (block
))
6396 TREE_USED (block
) = 1;
6398 gimple_bind_set_body (new_stmt
, body
);
6399 gimple_omp_set_body (stmt
, NULL
);
6400 gimple_omp_for_set_pre_body (stmt
, NULL
);
6403 /* Callback for walk_stmts. Check if the current statement only contains
6404 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6407 check_combined_parallel (gimple_stmt_iterator
*gsi_p
,
6408 bool *handled_ops_p
,
6409 struct walk_stmt_info
*wi
)
6411 int *info
= (int *) wi
->info
;
6412 gimple stmt
= gsi_stmt (*gsi_p
);
6414 *handled_ops_p
= true;
6415 switch (gimple_code (stmt
))
6419 case GIMPLE_OMP_FOR
:
6420 case GIMPLE_OMP_SECTIONS
:
6421 *info
= *info
== 0 ? 1 : -1;
6430 struct omp_taskcopy_context
6432 /* This field must be at the beginning, as we do "inheritance": Some
6433 callback functions for tree-inline.c (e.g., omp_copy_decl)
6434 receive a copy_body_data pointer that is up-casted to an
6435 omp_context pointer. */
6441 task_copyfn_copy_decl (tree var
, copy_body_data
*cb
)
6443 struct omp_taskcopy_context
*tcctx
= (struct omp_taskcopy_context
*) cb
;
6445 if (splay_tree_lookup (tcctx
->ctx
->sfield_map
, (splay_tree_key
) var
))
6446 return create_tmp_var (TREE_TYPE (var
), NULL
);
6452 task_copyfn_remap_type (struct omp_taskcopy_context
*tcctx
, tree orig_type
)
6454 tree name
, new_fields
= NULL
, type
, f
;
6456 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
6457 name
= DECL_NAME (TYPE_NAME (orig_type
));
6458 name
= build_decl (gimple_location (tcctx
->ctx
->stmt
),
6459 TYPE_DECL
, name
, type
);
6460 TYPE_NAME (type
) = name
;
6462 for (f
= TYPE_FIELDS (orig_type
); f
; f
= TREE_CHAIN (f
))
6464 tree new_f
= copy_node (f
);
6465 DECL_CONTEXT (new_f
) = type
;
6466 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &tcctx
->cb
);
6467 TREE_CHAIN (new_f
) = new_fields
;
6468 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
6469 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
6470 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
6473 *pointer_map_insert (tcctx
->cb
.decl_map
, f
) = new_f
;
6475 TYPE_FIELDS (type
) = nreverse (new_fields
);
6480 /* Create task copyfn. */
6483 create_task_copyfn (gimple task_stmt
, omp_context
*ctx
)
6485 struct function
*child_cfun
;
6486 tree child_fn
, t
, c
, src
, dst
, f
, sf
, arg
, sarg
, decl
;
6487 tree record_type
, srecord_type
, bind
, list
;
6488 bool record_needs_remap
= false, srecord_needs_remap
= false;
6490 struct omp_taskcopy_context tcctx
;
6491 struct gimplify_ctx gctx
;
6492 location_t loc
= gimple_location (task_stmt
);
6494 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
6495 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
6496 gcc_assert (child_cfun
->cfg
== NULL
);
6497 DECL_SAVED_TREE (child_fn
) = alloc_stmt_list ();
6499 /* Reset DECL_CONTEXT on function arguments. */
6500 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
6501 DECL_CONTEXT (t
) = child_fn
;
6503 /* Populate the function. */
6504 push_gimplify_context (&gctx
);
6505 push_cfun (child_cfun
);
6507 bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, NULL
);
6508 TREE_SIDE_EFFECTS (bind
) = 1;
6510 DECL_SAVED_TREE (child_fn
) = bind
;
6511 DECL_SOURCE_LOCATION (child_fn
) = gimple_location (task_stmt
);
6513 /* Remap src and dst argument types if needed. */
6514 record_type
= ctx
->record_type
;
6515 srecord_type
= ctx
->srecord_type
;
6516 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
6517 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
6519 record_needs_remap
= true;
6522 for (f
= TYPE_FIELDS (srecord_type
); f
; f
= DECL_CHAIN (f
))
6523 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
6525 srecord_needs_remap
= true;
6529 if (record_needs_remap
|| srecord_needs_remap
)
6531 memset (&tcctx
, '\0', sizeof (tcctx
));
6532 tcctx
.cb
.src_fn
= ctx
->cb
.src_fn
;
6533 tcctx
.cb
.dst_fn
= child_fn
;
6534 tcctx
.cb
.src_node
= cgraph_get_node (tcctx
.cb
.src_fn
);
6535 gcc_checking_assert (tcctx
.cb
.src_node
);
6536 tcctx
.cb
.dst_node
= tcctx
.cb
.src_node
;
6537 tcctx
.cb
.src_cfun
= ctx
->cb
.src_cfun
;
6538 tcctx
.cb
.copy_decl
= task_copyfn_copy_decl
;
6539 tcctx
.cb
.eh_lp_nr
= 0;
6540 tcctx
.cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
6541 tcctx
.cb
.decl_map
= pointer_map_create ();
6544 if (record_needs_remap
)
6545 record_type
= task_copyfn_remap_type (&tcctx
, record_type
);
6546 if (srecord_needs_remap
)
6547 srecord_type
= task_copyfn_remap_type (&tcctx
, srecord_type
);
6550 tcctx
.cb
.decl_map
= NULL
;
6552 arg
= DECL_ARGUMENTS (child_fn
);
6553 TREE_TYPE (arg
) = build_pointer_type (record_type
);
6554 sarg
= DECL_CHAIN (arg
);
6555 TREE_TYPE (sarg
) = build_pointer_type (srecord_type
);
6557 /* First pass: initialize temporaries used in record_type and srecord_type
6558 sizes and field offsets. */
6559 if (tcctx
.cb
.decl_map
)
6560 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6561 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
6565 decl
= OMP_CLAUSE_DECL (c
);
6566 p
= (tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, decl
);
6569 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6570 sf
= (tree
) n
->value
;
6571 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6572 src
= build_simple_mem_ref_loc (loc
, sarg
);
6573 src
= omp_build_component_ref (src
, sf
);
6574 t
= build2 (MODIFY_EXPR
, TREE_TYPE (*p
), *p
, src
);
6575 append_to_statement_list (t
, &list
);
6578 /* Second pass: copy shared var pointers and copy construct non-VLA
6579 firstprivate vars. */
6580 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6581 switch (OMP_CLAUSE_CODE (c
))
6583 case OMP_CLAUSE_SHARED
:
6584 decl
= OMP_CLAUSE_DECL (c
);
6585 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6588 f
= (tree
) n
->value
;
6589 if (tcctx
.cb
.decl_map
)
6590 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6591 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6592 sf
= (tree
) n
->value
;
6593 if (tcctx
.cb
.decl_map
)
6594 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6595 src
= build_simple_mem_ref_loc (loc
, sarg
);
6596 src
= omp_build_component_ref (src
, sf
);
6597 dst
= build_simple_mem_ref_loc (loc
, arg
);
6598 dst
= omp_build_component_ref (dst
, f
);
6599 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
6600 append_to_statement_list (t
, &list
);
6602 case OMP_CLAUSE_FIRSTPRIVATE
:
6603 decl
= OMP_CLAUSE_DECL (c
);
6604 if (is_variable_sized (decl
))
6606 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6609 f
= (tree
) n
->value
;
6610 if (tcctx
.cb
.decl_map
)
6611 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6612 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6615 sf
= (tree
) n
->value
;
6616 if (tcctx
.cb
.decl_map
)
6617 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6618 src
= build_simple_mem_ref_loc (loc
, sarg
);
6619 src
= omp_build_component_ref (src
, sf
);
6620 if (use_pointer_for_field (decl
, NULL
) || is_reference (decl
))
6621 src
= build_simple_mem_ref_loc (loc
, src
);
6625 dst
= build_simple_mem_ref_loc (loc
, arg
);
6626 dst
= omp_build_component_ref (dst
, f
);
6627 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
6628 append_to_statement_list (t
, &list
);
6630 case OMP_CLAUSE_PRIVATE
:
6631 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
6633 decl
= OMP_CLAUSE_DECL (c
);
6634 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6635 f
= (tree
) n
->value
;
6636 if (tcctx
.cb
.decl_map
)
6637 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6638 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6641 sf
= (tree
) n
->value
;
6642 if (tcctx
.cb
.decl_map
)
6643 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6644 src
= build_simple_mem_ref_loc (loc
, sarg
);
6645 src
= omp_build_component_ref (src
, sf
);
6646 if (use_pointer_for_field (decl
, NULL
))
6647 src
= build_simple_mem_ref_loc (loc
, src
);
6651 dst
= build_simple_mem_ref_loc (loc
, arg
);
6652 dst
= omp_build_component_ref (dst
, f
);
6653 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
6654 append_to_statement_list (t
, &list
);
6660 /* Last pass: handle VLA firstprivates. */
6661 if (tcctx
.cb
.decl_map
)
6662 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6663 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
6667 decl
= OMP_CLAUSE_DECL (c
);
6668 if (!is_variable_sized (decl
))
6670 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6673 f
= (tree
) n
->value
;
6674 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6675 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl
));
6676 ind
= DECL_VALUE_EXPR (decl
);
6677 gcc_assert (TREE_CODE (ind
) == INDIRECT_REF
);
6678 gcc_assert (DECL_P (TREE_OPERAND (ind
, 0)));
6679 n
= splay_tree_lookup (ctx
->sfield_map
,
6680 (splay_tree_key
) TREE_OPERAND (ind
, 0));
6681 sf
= (tree
) n
->value
;
6682 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6683 src
= build_simple_mem_ref_loc (loc
, sarg
);
6684 src
= omp_build_component_ref (src
, sf
);
6685 src
= build_simple_mem_ref_loc (loc
, src
);
6686 dst
= build_simple_mem_ref_loc (loc
, arg
);
6687 dst
= omp_build_component_ref (dst
, f
);
6688 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
6689 append_to_statement_list (t
, &list
);
6690 n
= splay_tree_lookup (ctx
->field_map
,
6691 (splay_tree_key
) TREE_OPERAND (ind
, 0));
6692 df
= (tree
) n
->value
;
6693 df
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, df
);
6694 ptr
= build_simple_mem_ref_loc (loc
, arg
);
6695 ptr
= omp_build_component_ref (ptr
, df
);
6696 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ptr
), ptr
,
6697 build_fold_addr_expr_loc (loc
, dst
));
6698 append_to_statement_list (t
, &list
);
6701 t
= build1 (RETURN_EXPR
, void_type_node
, NULL
);
6702 append_to_statement_list (t
, &list
);
6704 if (tcctx
.cb
.decl_map
)
6705 pointer_map_destroy (tcctx
.cb
.decl_map
);
6706 pop_gimplify_context (NULL
);
6707 BIND_EXPR_BODY (bind
) = list
;
6711 /* Lower the OpenMP parallel or task directive in the current statement
6712 in GSI_P. CTX holds context information for the directive. */
6715 lower_omp_taskreg (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6719 gimple stmt
= gsi_stmt (*gsi_p
);
6720 gimple par_bind
, bind
;
6721 gimple_seq par_body
, olist
, ilist
, par_olist
, par_ilist
, new_body
;
6722 struct gimplify_ctx gctx
;
6723 location_t loc
= gimple_location (stmt
);
6725 clauses
= gimple_omp_taskreg_clauses (stmt
);
6726 par_bind
= gimple_seq_first_stmt (gimple_omp_body (stmt
));
6727 par_body
= gimple_bind_body (par_bind
);
6728 child_fn
= ctx
->cb
.dst_fn
;
6729 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
6730 && !gimple_omp_parallel_combined_p (stmt
))
6732 struct walk_stmt_info wi
;
6735 memset (&wi
, 0, sizeof (wi
));
6738 walk_gimple_seq (par_body
, check_combined_parallel
, NULL
, &wi
);
6740 gimple_omp_parallel_set_combined_p (stmt
, true);
6742 if (ctx
->srecord_type
)
6743 create_task_copyfn (stmt
, ctx
);
6745 push_gimplify_context (&gctx
);
6749 lower_rec_input_clauses (clauses
, &par_ilist
, &par_olist
, ctx
);
6750 lower_omp (&par_body
, ctx
);
6751 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
)
6752 lower_reduction_clauses (clauses
, &par_olist
, ctx
);
6754 /* Declare all the variables created by mapping and the variables
6755 declared in the scope of the parallel body. */
6756 record_vars_into (ctx
->block_vars
, child_fn
);
6757 record_vars_into (gimple_bind_vars (par_bind
), child_fn
);
6759 if (ctx
->record_type
)
6762 = create_tmp_var (ctx
->srecord_type
? ctx
->srecord_type
6763 : ctx
->record_type
, ".omp_data_o");
6764 DECL_NAMELESS (ctx
->sender_decl
) = 1;
6765 TREE_ADDRESSABLE (ctx
->sender_decl
) = 1;
6766 gimple_omp_taskreg_set_data_arg (stmt
, ctx
->sender_decl
);
6771 lower_send_clauses (clauses
, &ilist
, &olist
, ctx
);
6772 lower_send_shared_vars (&ilist
, &olist
, ctx
);
6774 /* Once all the expansions are done, sequence all the different
6775 fragments inside gimple_omp_body. */
6779 if (ctx
->record_type
)
6781 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
6782 /* fixup_child_record_type might have changed receiver_decl's type. */
6783 t
= fold_convert_loc (loc
, TREE_TYPE (ctx
->receiver_decl
), t
);
6784 gimple_seq_add_stmt (&new_body
,
6785 gimple_build_assign (ctx
->receiver_decl
, t
));
6788 gimple_seq_add_seq (&new_body
, par_ilist
);
6789 gimple_seq_add_seq (&new_body
, par_body
);
6790 gimple_seq_add_seq (&new_body
, par_olist
);
6791 new_body
= maybe_catch_exception (new_body
);
6792 gimple_seq_add_stmt (&new_body
, gimple_build_omp_return (false));
6793 gimple_omp_set_body (stmt
, new_body
);
6795 bind
= gimple_build_bind (NULL
, NULL
, gimple_bind_block (par_bind
));
6796 gsi_replace (gsi_p
, bind
, true);
6797 gimple_bind_add_seq (bind
, ilist
);
6798 gimple_bind_add_stmt (bind
, stmt
);
6799 gimple_bind_add_seq (bind
, olist
);
6801 pop_gimplify_context (NULL
);
6804 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6805 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6806 of OpenMP context, but with task_shared_vars set. */
6809 lower_omp_regimplify_p (tree
*tp
, int *walk_subtrees
,
6814 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6815 if (TREE_CODE (t
) == VAR_DECL
&& data
== NULL
&& DECL_HAS_VALUE_EXPR_P (t
))
6818 if (task_shared_vars
6820 && bitmap_bit_p (task_shared_vars
, DECL_UID (t
)))
6823 /* If a global variable has been privatized, TREE_CONSTANT on
6824 ADDR_EXPR might be wrong. */
6825 if (data
== NULL
&& TREE_CODE (t
) == ADDR_EXPR
)
6826 recompute_tree_invariant_for_addr_expr (t
);
6828 *walk_subtrees
= !TYPE_P (t
) && !DECL_P (t
);
6833 lower_omp_1 (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6835 gimple stmt
= gsi_stmt (*gsi_p
);
6836 struct walk_stmt_info wi
;
6838 if (gimple_has_location (stmt
))
6839 input_location
= gimple_location (stmt
);
6841 if (task_shared_vars
)
6842 memset (&wi
, '\0', sizeof (wi
));
6844 /* If we have issued syntax errors, avoid doing any heavy lifting.
6845 Just replace the OpenMP directives with a NOP to avoid
6846 confusing RTL expansion. */
6847 if (seen_error () && is_gimple_omp (stmt
))
6849 gsi_replace (gsi_p
, gimple_build_nop (), true);
6853 switch (gimple_code (stmt
))
6856 if ((ctx
|| task_shared_vars
)
6857 && (walk_tree (gimple_cond_lhs_ptr (stmt
), lower_omp_regimplify_p
,
6858 ctx
? NULL
: &wi
, NULL
)
6859 || walk_tree (gimple_cond_rhs_ptr (stmt
), lower_omp_regimplify_p
,
6860 ctx
? NULL
: &wi
, NULL
)))
6861 gimple_regimplify_operands (stmt
, gsi_p
);
6864 lower_omp (gimple_catch_handler_ptr (stmt
), ctx
);
6866 case GIMPLE_EH_FILTER
:
6867 lower_omp (gimple_eh_filter_failure_ptr (stmt
), ctx
);
6870 lower_omp (gimple_try_eval_ptr (stmt
), ctx
);
6871 lower_omp (gimple_try_cleanup_ptr (stmt
), ctx
);
6873 case GIMPLE_TRANSACTION
:
6874 lower_omp (gimple_transaction_body_ptr (stmt
), ctx
);
6877 lower_omp (gimple_bind_body_ptr (stmt
), ctx
);
6879 case GIMPLE_OMP_PARALLEL
:
6880 case GIMPLE_OMP_TASK
:
6881 ctx
= maybe_lookup_ctx (stmt
);
6882 lower_omp_taskreg (gsi_p
, ctx
);
6884 case GIMPLE_OMP_FOR
:
6885 ctx
= maybe_lookup_ctx (stmt
);
6887 lower_omp_for (gsi_p
, ctx
);
6889 case GIMPLE_OMP_SECTIONS
:
6890 ctx
= maybe_lookup_ctx (stmt
);
6892 lower_omp_sections (gsi_p
, ctx
);
6894 case GIMPLE_OMP_SINGLE
:
6895 ctx
= maybe_lookup_ctx (stmt
);
6897 lower_omp_single (gsi_p
, ctx
);
6899 case GIMPLE_OMP_MASTER
:
6900 ctx
= maybe_lookup_ctx (stmt
);
6902 lower_omp_master (gsi_p
, ctx
);
6904 case GIMPLE_OMP_ORDERED
:
6905 ctx
= maybe_lookup_ctx (stmt
);
6907 lower_omp_ordered (gsi_p
, ctx
);
6909 case GIMPLE_OMP_CRITICAL
:
6910 ctx
= maybe_lookup_ctx (stmt
);
6912 lower_omp_critical (gsi_p
, ctx
);
6914 case GIMPLE_OMP_ATOMIC_LOAD
:
6915 if ((ctx
|| task_shared_vars
)
6916 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt
),
6917 lower_omp_regimplify_p
, ctx
? NULL
: &wi
, NULL
))
6918 gimple_regimplify_operands (stmt
, gsi_p
);
6921 if ((ctx
|| task_shared_vars
)
6922 && walk_gimple_op (stmt
, lower_omp_regimplify_p
,
6924 gimple_regimplify_operands (stmt
, gsi_p
);
6930 lower_omp (gimple_seq
*body
, omp_context
*ctx
)
6932 location_t saved_location
= input_location
;
6933 gimple_stmt_iterator gsi
;
6934 for (gsi
= gsi_start (*body
); !gsi_end_p (gsi
); gsi_next (&gsi
))
6935 lower_omp_1 (&gsi
, ctx
);
6936 input_location
= saved_location
;
6939 /* Main entry point. */
6942 execute_lower_omp (void)
6946 /* This pass always runs, to provide PROP_gimple_lomp.
6947 But there is nothing to do unless -fopenmp is given. */
6948 if (flag_openmp
== 0)
6951 all_contexts
= splay_tree_new (splay_tree_compare_pointers
, 0,
6952 delete_omp_context
);
6954 body
= gimple_body (current_function_decl
);
6955 scan_omp (&body
, NULL
);
6956 gcc_assert (taskreg_nesting_level
== 0);
6958 if (all_contexts
->root
)
6960 struct gimplify_ctx gctx
;
6962 if (task_shared_vars
)
6963 push_gimplify_context (&gctx
);
6964 lower_omp (&body
, NULL
);
6965 if (task_shared_vars
)
6966 pop_gimplify_context (NULL
);
6971 splay_tree_delete (all_contexts
);
6972 all_contexts
= NULL
;
6974 BITMAP_FREE (task_shared_vars
);
6978 struct gimple_opt_pass pass_lower_omp
=
6982 "omplower", /* name */
6983 OPTGROUP_NONE
, /* optinfo_flags */
6985 execute_lower_omp
, /* execute */
6988 0, /* static_pass_number */
6989 TV_NONE
, /* tv_id */
6990 PROP_gimple_any
, /* properties_required */
6991 PROP_gimple_lomp
, /* properties_provided */
6992 0, /* properties_destroyed */
6993 0, /* todo_flags_start */
6994 0 /* todo_flags_finish */
6998 /* The following is a utility to diagnose OpenMP structured block violations.
6999 It is not part of the "omplower" pass, as that's invoked too late. It
7000 should be invoked by the respective front ends after gimplification. */
7002 static splay_tree all_labels
;
7004 /* Check for mismatched contexts and generate an error if needed. Return
7005 true if an error is detected. */
7008 diagnose_sb_0 (gimple_stmt_iterator
*gsi_p
,
7009 gimple branch_ctx
, gimple label_ctx
)
7011 if (label_ctx
== branch_ctx
)
7016 Previously we kept track of the label's entire context in diagnose_sb_[12]
7017 so we could traverse it and issue a correct "exit" or "enter" error
7018 message upon a structured block violation.
7020 We built the context by building a list with tree_cons'ing, but there is
7021 no easy counterpart in gimple tuples. It seems like far too much work
7022 for issuing exit/enter error messages. If someone really misses the
7023 distinct error message... patches welcome.
7027 /* Try to avoid confusing the user by producing and error message
7028 with correct "exit" or "enter" verbiage. We prefer "exit"
7029 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
7030 if (branch_ctx
== NULL
)
7036 if (TREE_VALUE (label_ctx
) == branch_ctx
)
7041 label_ctx
= TREE_CHAIN (label_ctx
);
7046 error ("invalid exit from OpenMP structured block");
7048 error ("invalid entry to OpenMP structured block");
7051 /* If it's obvious we have an invalid entry, be specific about the error. */
7052 if (branch_ctx
== NULL
)
7053 error ("invalid entry to OpenMP structured block");
7055 /* Otherwise, be vague and lazy, but efficient. */
7056 error ("invalid branch to/from an OpenMP structured block");
7058 gsi_replace (gsi_p
, gimple_build_nop (), false);
7062 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
7063 where each label is found. */
7066 diagnose_sb_1 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
7067 struct walk_stmt_info
*wi
)
7069 gimple context
= (gimple
) wi
->info
;
7070 gimple inner_context
;
7071 gimple stmt
= gsi_stmt (*gsi_p
);
7073 *handled_ops_p
= true;
7075 switch (gimple_code (stmt
))
7079 case GIMPLE_OMP_PARALLEL
:
7080 case GIMPLE_OMP_TASK
:
7081 case GIMPLE_OMP_SECTIONS
:
7082 case GIMPLE_OMP_SINGLE
:
7083 case GIMPLE_OMP_SECTION
:
7084 case GIMPLE_OMP_MASTER
:
7085 case GIMPLE_OMP_ORDERED
:
7086 case GIMPLE_OMP_CRITICAL
:
7087 /* The minimal context here is just the current OMP construct. */
7088 inner_context
= stmt
;
7089 wi
->info
= inner_context
;
7090 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
7094 case GIMPLE_OMP_FOR
:
7095 inner_context
= stmt
;
7096 wi
->info
= inner_context
;
7097 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7099 walk_gimple_seq (gimple_omp_for_pre_body (stmt
),
7100 diagnose_sb_1
, NULL
, wi
);
7101 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
7106 splay_tree_insert (all_labels
, (splay_tree_key
) gimple_label_label (stmt
),
7107 (splay_tree_value
) context
);
7117 /* Pass 2: Check each branch and see if its context differs from that of
7118 the destination label's context. */
7121 diagnose_sb_2 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
7122 struct walk_stmt_info
*wi
)
7124 gimple context
= (gimple
) wi
->info
;
7126 gimple stmt
= gsi_stmt (*gsi_p
);
7128 *handled_ops_p
= true;
7130 switch (gimple_code (stmt
))
7134 case GIMPLE_OMP_PARALLEL
:
7135 case GIMPLE_OMP_TASK
:
7136 case GIMPLE_OMP_SECTIONS
:
7137 case GIMPLE_OMP_SINGLE
:
7138 case GIMPLE_OMP_SECTION
:
7139 case GIMPLE_OMP_MASTER
:
7140 case GIMPLE_OMP_ORDERED
:
7141 case GIMPLE_OMP_CRITICAL
:
7143 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt
), diagnose_sb_2
, NULL
, wi
);
7147 case GIMPLE_OMP_FOR
:
7149 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7151 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt
),
7152 diagnose_sb_2
, NULL
, wi
);
7153 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt
), diagnose_sb_2
, NULL
, wi
);
7159 tree lab
= gimple_cond_true_label (stmt
);
7162 n
= splay_tree_lookup (all_labels
,
7163 (splay_tree_key
) lab
);
7164 diagnose_sb_0 (gsi_p
, context
,
7165 n
? (gimple
) n
->value
: NULL
);
7167 lab
= gimple_cond_false_label (stmt
);
7170 n
= splay_tree_lookup (all_labels
,
7171 (splay_tree_key
) lab
);
7172 diagnose_sb_0 (gsi_p
, context
,
7173 n
? (gimple
) n
->value
: NULL
);
7180 tree lab
= gimple_goto_dest (stmt
);
7181 if (TREE_CODE (lab
) != LABEL_DECL
)
7184 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
7185 diagnose_sb_0 (gsi_p
, context
, n
? (gimple
) n
->value
: NULL
);
7192 for (i
= 0; i
< gimple_switch_num_labels (stmt
); ++i
)
7194 tree lab
= CASE_LABEL (gimple_switch_label (stmt
, i
));
7195 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
7196 if (n
&& diagnose_sb_0 (gsi_p
, context
, (gimple
) n
->value
))
7203 diagnose_sb_0 (gsi_p
, context
, NULL
);
7214 diagnose_omp_structured_block_errors (void)
7216 struct walk_stmt_info wi
;
7217 gimple_seq body
= gimple_body (current_function_decl
);
7219 all_labels
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
7221 memset (&wi
, 0, sizeof (wi
));
7222 walk_gimple_seq (body
, diagnose_sb_1
, NULL
, &wi
);
7224 memset (&wi
, 0, sizeof (wi
));
7225 wi
.want_locations
= true;
7226 walk_gimple_seq_mod (&body
, diagnose_sb_2
, NULL
, &wi
);
7228 gimple_set_body (current_function_decl
, body
);
7230 splay_tree_delete (all_labels
);
7237 gate_diagnose_omp_blocks (void)
7239 return flag_openmp
!= 0;
7242 struct gimple_opt_pass pass_diagnose_omp_blocks
=
7246 "*diagnose_omp_blocks", /* name */
7247 OPTGROUP_NONE
, /* optinfo_flags */
7248 gate_diagnose_omp_blocks
, /* gate */
7249 diagnose_omp_structured_block_errors
, /* execute */
7252 0, /* static_pass_number */
7253 TV_NONE
, /* tv_id */
7254 PROP_gimple_any
, /* properties_required */
7255 0, /* properties_provided */
7256 0, /* properties_destroyed */
7257 0, /* todo_flags_start */
7258 0, /* todo_flags_finish */
7262 #include "gt-omp-low.h"