1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
7 Free Software Foundation, Inc.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
42 #include "tree-pass.h"
45 #include "splay-tree.h"
50 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
51 phases. The first phase scans the function looking for OMP statements
52 and then for variables that must be replaced to satisfy data sharing
53 clauses. The second phase expands code for the constructs, as well as
54 re-gimplifying things when variables have been replaced with complex
57 Final code generation is done by pass_expand_omp. The flowgraph is
58 scanned for parallel regions which are then moved to a new
59 function, to be invoked by the thread library. */
61 /* Context structure. Used to store information about each parallel
62 directive in the code. */
64 typedef struct omp_context
66 /* This field must be at the beginning, as we do "inheritance": Some
67 callback functions for tree-inline.c (e.g., omp_copy_decl)
68 receive a copy_body_data pointer that is up-casted to an
69 omp_context pointer. */
72 /* The tree of contexts corresponding to the encountered constructs. */
73 struct omp_context
*outer
;
76 /* Map variables to fields in a structure that allows communication
77 between sending and receiving threads. */
83 /* These are used just by task contexts, if task firstprivate fn is
84 needed. srecord_type is used to communicate from the thread
85 that encountered the task construct to task firstprivate fn,
86 record_type is allocated by GOMP_task, initialized by task firstprivate
87 fn and passed to the task body fn. */
88 splay_tree sfield_map
;
91 /* A chain of variables to add to the top-level block surrounding the
92 construct. In the case of a parallel, this is in the child function. */
95 /* What to do with variables with implicitly determined sharing
97 enum omp_clause_default_kind default_kind
;
99 /* Nesting depth of this context. Used to beautify error messages re
100 invalid gotos. The outermost ctx is depth 1, with depth 0 being
101 reserved for the main body of the function. */
104 /* True if this parallel directive is nested within another. */
109 struct omp_for_data_loop
111 tree v
, n1
, n2
, step
;
112 enum tree_code cond_code
;
115 /* A structure describing the main elements of a parallel loop. */
119 struct omp_for_data_loop loop
;
124 bool have_nowait
, have_ordered
;
125 enum omp_clause_schedule_kind sched_kind
;
126 struct omp_for_data_loop
*loops
;
130 static splay_tree all_contexts
;
131 static int taskreg_nesting_level
;
132 struct omp_region
*root_omp_region
;
133 static bitmap task_shared_vars
;
135 static void scan_omp (gimple_seq
, omp_context
*);
136 static tree
scan_omp_1_op (tree
*, int *, void *);
138 #define WALK_SUBSTMTS \
142 case GIMPLE_EH_FILTER: \
143 /* The sub-statements for these should be walked. */ \
144 *handled_ops_p = false; \
147 /* Convenience function for calling scan_omp_1_op on tree operands. */
150 scan_omp_op (tree
*tp
, omp_context
*ctx
)
152 struct walk_stmt_info wi
;
154 memset (&wi
, 0, sizeof (wi
));
156 wi
.want_locations
= true;
158 return walk_tree (tp
, scan_omp_1_op
, &wi
, NULL
);
161 static void lower_omp (gimple_seq
, omp_context
*);
162 static tree
lookup_decl_in_outer_ctx (tree
, omp_context
*);
163 static tree
maybe_lookup_decl_in_outer_ctx (tree
, omp_context
*);
165 /* Find an OpenMP clause of type KIND within CLAUSES. */
168 find_omp_clause (tree clauses
, enum omp_clause_code kind
)
170 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
171 if (OMP_CLAUSE_CODE (clauses
) == kind
)
177 /* Return true if CTX is for an omp parallel. */
180 is_parallel_ctx (omp_context
*ctx
)
182 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
;
186 /* Return true if CTX is for an omp task. */
189 is_task_ctx (omp_context
*ctx
)
191 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
195 /* Return true if CTX is for an omp parallel or omp task. */
198 is_taskreg_ctx (omp_context
*ctx
)
200 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
201 || gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
205 /* Return true if REGION is a combined parallel+workshare region. */
208 is_combined_parallel (struct omp_region
*region
)
210 return region
->is_combined_parallel
;
214 /* Extract the header elements of parallel loop FOR_STMT and store
218 extract_omp_for_data (gimple for_stmt
, struct omp_for_data
*fd
,
219 struct omp_for_data_loop
*loops
)
221 tree t
, var
, *collapse_iter
, *collapse_count
;
222 tree count
= NULL_TREE
, iter_type
= long_integer_type_node
;
223 struct omp_for_data_loop
*loop
;
225 struct omp_for_data_loop dummy_loop
;
226 location_t loc
= gimple_location (for_stmt
);
228 fd
->for_stmt
= for_stmt
;
230 fd
->collapse
= gimple_omp_for_collapse (for_stmt
);
231 if (fd
->collapse
> 1)
234 fd
->loops
= &fd
->loop
;
236 fd
->have_nowait
= fd
->have_ordered
= false;
237 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
238 fd
->chunk_size
= NULL_TREE
;
239 collapse_iter
= NULL
;
240 collapse_count
= NULL
;
242 for (t
= gimple_omp_for_clauses (for_stmt
); t
; t
= OMP_CLAUSE_CHAIN (t
))
243 switch (OMP_CLAUSE_CODE (t
))
245 case OMP_CLAUSE_NOWAIT
:
246 fd
->have_nowait
= true;
248 case OMP_CLAUSE_ORDERED
:
249 fd
->have_ordered
= true;
251 case OMP_CLAUSE_SCHEDULE
:
252 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_KIND (t
);
253 fd
->chunk_size
= OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t
);
255 case OMP_CLAUSE_COLLAPSE
:
256 if (fd
->collapse
> 1)
258 collapse_iter
= &OMP_CLAUSE_COLLAPSE_ITERVAR (t
);
259 collapse_count
= &OMP_CLAUSE_COLLAPSE_COUNT (t
);
265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
268 is best) or if it varies (then schedule(dynamic,N) is better). */
269 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_AUTO
)
271 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
272 gcc_assert (fd
->chunk_size
== NULL
);
274 gcc_assert (fd
->collapse
== 1 || collapse_iter
!= NULL
);
275 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
276 gcc_assert (fd
->chunk_size
== NULL
);
277 else if (fd
->chunk_size
== NULL
)
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
281 if (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
284 fd
->chunk_size
= (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
285 ? integer_zero_node
: integer_one_node
;
288 for (i
= 0; i
< fd
->collapse
; i
++)
290 if (fd
->collapse
== 1)
292 else if (loops
!= NULL
)
298 loop
->v
= gimple_omp_for_index (for_stmt
, i
);
299 gcc_assert (SSA_VAR_P (loop
->v
));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop
->v
)) == POINTER_TYPE
);
302 var
= TREE_CODE (loop
->v
) == SSA_NAME
? SSA_NAME_VAR (loop
->v
) : loop
->v
;
303 loop
->n1
= gimple_omp_for_initial (for_stmt
, i
);
305 loop
->cond_code
= gimple_omp_for_cond (for_stmt
, i
);
306 loop
->n2
= gimple_omp_for_final (for_stmt
, i
);
307 switch (loop
->cond_code
)
313 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
314 loop
->n2
= fold_build2_loc (loc
,
315 POINTER_PLUS_EXPR
, TREE_TYPE (loop
->n2
),
316 loop
->n2
, size_one_node
);
318 loop
->n2
= fold_build2_loc (loc
,
319 PLUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
320 build_int_cst (TREE_TYPE (loop
->n2
), 1));
321 loop
->cond_code
= LT_EXPR
;
324 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
325 loop
->n2
= fold_build2_loc (loc
,
326 POINTER_PLUS_EXPR
, TREE_TYPE (loop
->n2
),
327 loop
->n2
, size_int (-1));
329 loop
->n2
= fold_build2_loc (loc
,
330 MINUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
331 build_int_cst (TREE_TYPE (loop
->n2
), 1));
332 loop
->cond_code
= GT_EXPR
;
338 t
= gimple_omp_for_incr (for_stmt
, i
);
339 gcc_assert (TREE_OPERAND (t
, 0) == var
);
340 switch (TREE_CODE (t
))
343 case POINTER_PLUS_EXPR
:
344 loop
->step
= TREE_OPERAND (t
, 1);
347 loop
->step
= TREE_OPERAND (t
, 1);
348 loop
->step
= fold_build1_loc (loc
,
349 NEGATE_EXPR
, TREE_TYPE (loop
->step
),
356 if (iter_type
!= long_long_unsigned_type_node
)
358 if (POINTER_TYPE_P (TREE_TYPE (loop
->v
)))
359 iter_type
= long_long_unsigned_type_node
;
360 else if (TYPE_UNSIGNED (TREE_TYPE (loop
->v
))
361 && TYPE_PRECISION (TREE_TYPE (loop
->v
))
362 >= TYPE_PRECISION (iter_type
))
366 if (loop
->cond_code
== LT_EXPR
)
367 n
= fold_build2_loc (loc
,
368 PLUS_EXPR
, TREE_TYPE (loop
->v
),
369 loop
->n2
, loop
->step
);
372 if (TREE_CODE (n
) != INTEGER_CST
373 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type
), n
))
374 iter_type
= long_long_unsigned_type_node
;
376 else if (TYPE_PRECISION (TREE_TYPE (loop
->v
))
377 > TYPE_PRECISION (iter_type
))
381 if (loop
->cond_code
== LT_EXPR
)
384 n2
= fold_build2_loc (loc
,
385 PLUS_EXPR
, TREE_TYPE (loop
->v
),
386 loop
->n2
, loop
->step
);
390 n1
= fold_build2_loc (loc
,
391 MINUS_EXPR
, TREE_TYPE (loop
->v
),
392 loop
->n2
, loop
->step
);
395 if (TREE_CODE (n1
) != INTEGER_CST
396 || TREE_CODE (n2
) != INTEGER_CST
397 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type
), n1
)
398 || !tree_int_cst_lt (n2
, TYPE_MAX_VALUE (iter_type
)))
399 iter_type
= long_long_unsigned_type_node
;
403 if (collapse_count
&& *collapse_count
== NULL
)
405 if ((i
== 0 || count
!= NULL_TREE
)
406 && TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
407 && TREE_CONSTANT (loop
->n1
)
408 && TREE_CONSTANT (loop
->n2
)
409 && TREE_CODE (loop
->step
) == INTEGER_CST
)
411 tree itype
= TREE_TYPE (loop
->v
);
413 if (POINTER_TYPE_P (itype
))
415 = lang_hooks
.types
.type_for_size (TYPE_PRECISION (itype
), 0);
416 t
= build_int_cst (itype
, (loop
->cond_code
== LT_EXPR
? -1 : 1));
417 t
= fold_build2_loc (loc
,
419 fold_convert_loc (loc
, itype
, loop
->step
), t
);
420 t
= fold_build2_loc (loc
, PLUS_EXPR
, itype
, t
,
421 fold_convert_loc (loc
, itype
, loop
->n2
));
422 t
= fold_build2_loc (loc
, MINUS_EXPR
, itype
, t
,
423 fold_convert_loc (loc
, itype
, loop
->n1
));
424 if (TYPE_UNSIGNED (itype
) && loop
->cond_code
== GT_EXPR
)
425 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
,
426 fold_build1_loc (loc
, NEGATE_EXPR
, itype
, t
),
427 fold_build1_loc (loc
, NEGATE_EXPR
, itype
,
428 fold_convert_loc (loc
, itype
,
431 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
, t
,
432 fold_convert_loc (loc
, itype
, loop
->step
));
433 t
= fold_convert_loc (loc
, long_long_unsigned_type_node
, t
);
434 if (count
!= NULL_TREE
)
435 count
= fold_build2_loc (loc
,
436 MULT_EXPR
, long_long_unsigned_type_node
,
440 if (TREE_CODE (count
) != INTEGER_CST
)
450 if (!tree_int_cst_lt (count
, TYPE_MAX_VALUE (long_integer_type_node
)))
451 iter_type
= long_long_unsigned_type_node
;
453 iter_type
= long_integer_type_node
;
455 else if (collapse_iter
&& *collapse_iter
!= NULL
)
456 iter_type
= TREE_TYPE (*collapse_iter
);
457 fd
->iter_type
= iter_type
;
458 if (collapse_iter
&& *collapse_iter
== NULL
)
459 *collapse_iter
= create_tmp_var (iter_type
, ".iter");
460 if (collapse_count
&& *collapse_count
== NULL
)
463 *collapse_count
= fold_convert_loc (loc
, iter_type
, count
);
465 *collapse_count
= create_tmp_var (iter_type
, ".count");
468 if (fd
->collapse
> 1)
470 fd
->loop
.v
= *collapse_iter
;
471 fd
->loop
.n1
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
472 fd
->loop
.n2
= *collapse_count
;
473 fd
->loop
.step
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 1);
474 fd
->loop
.cond_code
= LT_EXPR
;
479 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
480 is the immediate dominator of PAR_ENTRY_BB, return true if there
481 are no data dependencies that would prevent expanding the parallel
482 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
484 When expanding a combined parallel+workshare region, the call to
485 the child function may need additional arguments in the case of
486 GIMPLE_OMP_FOR regions. In some cases, these arguments are
487 computed out of variables passed in from the parent to the child
488 via 'struct .omp_data_s'. For instance:
490 #pragma omp parallel for schedule (guided, i * 4)
495 # BLOCK 2 (PAR_ENTRY_BB)
497 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
499 # BLOCK 3 (WS_ENTRY_BB)
500 .omp_data_i = &.omp_data_o;
501 D.1667 = .omp_data_i->i;
503 #pragma omp for schedule (guided, D.1598)
505 When we outline the parallel region, the call to the child function
506 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
507 that value is computed *after* the call site. So, in principle we
508 cannot do the transformation.
510 To see whether the code in WS_ENTRY_BB blocks the combined
511 parallel+workshare call, we collect all the variables used in the
512 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
513 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
516 FIXME. If we had the SSA form built at this point, we could merely
517 hoist the code in block 3 into block 2 and be done with it. But at
518 this point we don't have dataflow information and though we could
519 hack something up here, it is really not worth the aggravation. */
522 workshare_safe_to_combine_p (basic_block ws_entry_bb
)
524 struct omp_for_data fd
;
525 gimple ws_stmt
= last_stmt (ws_entry_bb
);
527 if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
530 gcc_assert (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
);
532 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
534 if (fd
.collapse
> 1 && TREE_CODE (fd
.loop
.n2
) != INTEGER_CST
)
536 if (fd
.iter_type
!= long_integer_type_node
)
539 /* FIXME. We give up too easily here. If any of these arguments
540 are not constants, they will likely involve variables that have
541 been mapped into fields of .omp_data_s for sharing with the child
542 function. With appropriate data flow, it would be possible to
544 if (!is_gimple_min_invariant (fd
.loop
.n1
)
545 || !is_gimple_min_invariant (fd
.loop
.n2
)
546 || !is_gimple_min_invariant (fd
.loop
.step
)
547 || (fd
.chunk_size
&& !is_gimple_min_invariant (fd
.chunk_size
)))
554 /* Collect additional arguments needed to emit a combined
555 parallel+workshare call. WS_STMT is the workshare directive being
559 get_ws_args_for (gimple ws_stmt
)
562 location_t loc
= gimple_location (ws_stmt
);
564 if (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
)
566 struct omp_for_data fd
;
569 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
574 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.chunk_size
);
575 ws_args
= tree_cons (NULL
, t
, ws_args
);
578 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.step
);
579 ws_args
= tree_cons (NULL
, t
, ws_args
);
581 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.n2
);
582 ws_args
= tree_cons (NULL
, t
, ws_args
);
584 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.n1
);
585 ws_args
= tree_cons (NULL
, t
, ws_args
);
589 else if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
591 /* Number of sections is equal to the number of edges from the
592 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
593 the exit of the sections region. */
594 basic_block bb
= single_succ (gimple_bb (ws_stmt
));
595 t
= build_int_cst (unsigned_type_node
, EDGE_COUNT (bb
->succs
) - 1);
596 t
= tree_cons (NULL
, t
, NULL
);
604 /* Discover whether REGION is a combined parallel+workshare region. */
607 determine_parallel_type (struct omp_region
*region
)
609 basic_block par_entry_bb
, par_exit_bb
;
610 basic_block ws_entry_bb
, ws_exit_bb
;
612 if (region
== NULL
|| region
->inner
== NULL
613 || region
->exit
== NULL
|| region
->inner
->exit
== NULL
614 || region
->inner
->cont
== NULL
)
617 /* We only support parallel+for and parallel+sections. */
618 if (region
->type
!= GIMPLE_OMP_PARALLEL
619 || (region
->inner
->type
!= GIMPLE_OMP_FOR
620 && region
->inner
->type
!= GIMPLE_OMP_SECTIONS
))
623 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
624 WS_EXIT_BB -> PAR_EXIT_BB. */
625 par_entry_bb
= region
->entry
;
626 par_exit_bb
= region
->exit
;
627 ws_entry_bb
= region
->inner
->entry
;
628 ws_exit_bb
= region
->inner
->exit
;
630 if (single_succ (par_entry_bb
) == ws_entry_bb
631 && single_succ (ws_exit_bb
) == par_exit_bb
632 && workshare_safe_to_combine_p (ws_entry_bb
)
633 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb
))
634 || (last_and_only_stmt (ws_entry_bb
)
635 && last_and_only_stmt (par_exit_bb
))))
637 gimple ws_stmt
= last_stmt (ws_entry_bb
);
639 if (region
->inner
->type
== GIMPLE_OMP_FOR
)
641 /* If this is a combined parallel loop, we need to determine
642 whether or not to use the combined library calls. There
643 are two cases where we do not apply the transformation:
644 static loops and any kind of ordered loop. In the first
645 case, we already open code the loop so there is no need
646 to do anything else. In the latter case, the combined
647 parallel loop call would still need extra synchronization
648 to implement ordered semantics, so there would not be any
649 gain in using the combined call. */
650 tree clauses
= gimple_omp_for_clauses (ws_stmt
);
651 tree c
= find_omp_clause (clauses
, OMP_CLAUSE_SCHEDULE
);
653 || OMP_CLAUSE_SCHEDULE_KIND (c
) == OMP_CLAUSE_SCHEDULE_STATIC
654 || find_omp_clause (clauses
, OMP_CLAUSE_ORDERED
))
656 region
->is_combined_parallel
= false;
657 region
->inner
->is_combined_parallel
= false;
662 region
->is_combined_parallel
= true;
663 region
->inner
->is_combined_parallel
= true;
664 region
->ws_args
= get_ws_args_for (ws_stmt
);
669 /* Return true if EXPR is variable sized. */
672 is_variable_sized (const_tree expr
)
674 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
677 /* Return true if DECL is a reference type. */
680 is_reference (tree decl
)
682 return lang_hooks
.decls
.omp_privatize_by_reference (decl
);
685 /* Lookup variables in the decl or field splay trees. The "maybe" form
686 allows for the variable form to not have been entered, otherwise we
687 assert that the variable must have been entered. */
690 lookup_decl (tree var
, omp_context
*ctx
)
693 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
698 maybe_lookup_decl (const_tree var
, omp_context
*ctx
)
701 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
702 return n
? *n
: NULL_TREE
;
706 lookup_field (tree var
, omp_context
*ctx
)
709 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
710 return (tree
) n
->value
;
714 lookup_sfield (tree var
, omp_context
*ctx
)
717 n
= splay_tree_lookup (ctx
->sfield_map
718 ? ctx
->sfield_map
: ctx
->field_map
,
719 (splay_tree_key
) var
);
720 return (tree
) n
->value
;
724 maybe_lookup_field (tree var
, omp_context
*ctx
)
727 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
728 return n
? (tree
) n
->value
: NULL_TREE
;
731 /* Return true if DECL should be copied by pointer. SHARED_CTX is
732 the parallel context if DECL is to be shared. */
735 use_pointer_for_field (tree decl
, omp_context
*shared_ctx
)
737 if (AGGREGATE_TYPE_P (TREE_TYPE (decl
)))
740 /* We can only use copy-in/copy-out semantics for shared variables
741 when we know the value is not accessible from an outer scope. */
744 /* ??? Trivially accessible from anywhere. But why would we even
745 be passing an address in this case? Should we simply assert
746 this to be false, or should we have a cleanup pass that removes
747 these from the list of mappings? */
748 if (TREE_STATIC (decl
) || DECL_EXTERNAL (decl
))
751 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
752 without analyzing the expression whether or not its location
753 is accessible to anyone else. In the case of nested parallel
754 regions it certainly may be. */
755 if (TREE_CODE (decl
) != RESULT_DECL
&& DECL_HAS_VALUE_EXPR_P (decl
))
758 /* Do not use copy-in/copy-out for variables that have their
760 if (TREE_ADDRESSABLE (decl
))
763 /* Disallow copy-in/out in nested parallel if
764 decl is shared in outer parallel, otherwise
765 each thread could store the shared variable
766 in its own copy-in location, making the
767 variable no longer really shared. */
768 if (!TREE_READONLY (decl
) && shared_ctx
->is_nested
)
772 for (up
= shared_ctx
->outer
; up
; up
= up
->outer
)
773 if (is_taskreg_ctx (up
) && maybe_lookup_decl (decl
, up
))
780 for (c
= gimple_omp_taskreg_clauses (up
->stmt
);
781 c
; c
= OMP_CLAUSE_CHAIN (c
))
782 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_SHARED
783 && OMP_CLAUSE_DECL (c
) == decl
)
791 /* For tasks avoid using copy-in/out, unless they are readonly
792 (in which case just copy-in is used). As tasks can be
793 deferred or executed in different thread, when GOMP_task
794 returns, the task hasn't necessarily terminated. */
795 if (!TREE_READONLY (decl
) && is_task_ctx (shared_ctx
))
797 tree outer
= maybe_lookup_decl_in_outer_ctx (decl
, shared_ctx
);
798 if (is_gimple_reg (outer
))
800 /* Taking address of OUTER in lower_send_shared_vars
801 might need regimplification of everything that uses the
803 if (!task_shared_vars
)
804 task_shared_vars
= BITMAP_ALLOC (NULL
);
805 bitmap_set_bit (task_shared_vars
, DECL_UID (outer
));
806 TREE_ADDRESSABLE (outer
) = 1;
815 /* Create a new VAR_DECL and copy information from VAR to it. */
818 copy_var_decl (tree var
, tree name
, tree type
)
820 tree copy
= build_decl (DECL_SOURCE_LOCATION (var
), VAR_DECL
, name
, type
);
822 TREE_ADDRESSABLE (copy
) = TREE_ADDRESSABLE (var
);
823 TREE_THIS_VOLATILE (copy
) = TREE_THIS_VOLATILE (var
);
824 DECL_GIMPLE_REG_P (copy
) = DECL_GIMPLE_REG_P (var
);
825 DECL_ARTIFICIAL (copy
) = DECL_ARTIFICIAL (var
);
826 DECL_IGNORED_P (copy
) = DECL_IGNORED_P (var
);
827 DECL_CONTEXT (copy
) = DECL_CONTEXT (var
);
828 TREE_USED (copy
) = 1;
829 DECL_SEEN_IN_BIND_EXPR_P (copy
) = 1;
834 /* Construct a new automatic decl similar to VAR. */
837 omp_copy_decl_2 (tree var
, tree name
, tree type
, omp_context
*ctx
)
839 tree copy
= copy_var_decl (var
, name
, type
);
841 DECL_CONTEXT (copy
) = current_function_decl
;
842 TREE_CHAIN (copy
) = ctx
->block_vars
;
843 ctx
->block_vars
= copy
;
849 omp_copy_decl_1 (tree var
, omp_context
*ctx
)
851 return omp_copy_decl_2 (var
, DECL_NAME (var
), TREE_TYPE (var
), ctx
);
854 /* Build tree nodes to access the field for VAR on the receiver side. */
857 build_receiver_ref (tree var
, bool by_ref
, omp_context
*ctx
)
859 tree x
, field
= lookup_field (var
, ctx
);
861 /* If the receiver record type was remapped in the child function,
862 remap the field into the new record type. */
863 x
= maybe_lookup_field (field
, ctx
);
867 x
= build_fold_indirect_ref (ctx
->receiver_decl
);
868 x
= build3 (COMPONENT_REF
, TREE_TYPE (field
), x
, field
, NULL
);
870 x
= build_fold_indirect_ref (x
);
875 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
876 of a parallel, this is a component reference; for workshare constructs
877 this is some variable. */
880 build_outer_var_ref (tree var
, omp_context
*ctx
)
884 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
, ctx
)))
886 else if (is_variable_sized (var
))
888 x
= TREE_OPERAND (DECL_VALUE_EXPR (var
), 0);
889 x
= build_outer_var_ref (x
, ctx
);
890 x
= build_fold_indirect_ref (x
);
892 else if (is_taskreg_ctx (ctx
))
894 bool by_ref
= use_pointer_for_field (var
, NULL
);
895 x
= build_receiver_ref (var
, by_ref
, ctx
);
898 x
= lookup_decl (var
, ctx
->outer
);
899 else if (is_reference (var
))
900 /* This can happen with orphaned constructs. If var is reference, it is
901 possible it is shared and as such valid. */
906 if (is_reference (var
))
907 x
= build_fold_indirect_ref (x
);
912 /* Build tree nodes to access the field for VAR on the sender side. */
915 build_sender_ref (tree var
, omp_context
*ctx
)
917 tree field
= lookup_sfield (var
, ctx
);
918 return build3 (COMPONENT_REF
, TREE_TYPE (field
),
919 ctx
->sender_decl
, field
, NULL
);
922 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
925 install_var_field (tree var
, bool by_ref
, int mask
, omp_context
*ctx
)
927 tree field
, type
, sfield
= NULL_TREE
;
929 gcc_assert ((mask
& 1) == 0
930 || !splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
));
931 gcc_assert ((mask
& 2) == 0 || !ctx
->sfield_map
932 || !splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) var
));
934 type
= TREE_TYPE (var
);
936 type
= build_pointer_type (type
);
937 else if ((mask
& 3) == 1 && is_reference (var
))
938 type
= TREE_TYPE (type
);
940 field
= build_decl (DECL_SOURCE_LOCATION (var
),
941 FIELD_DECL
, DECL_NAME (var
), type
);
943 /* Remember what variable this field was created for. This does have a
944 side effect of making dwarf2out ignore this member, so for helpful
945 debugging we clear it later in delete_omp_context. */
946 DECL_ABSTRACT_ORIGIN (field
) = var
;
947 if (type
== TREE_TYPE (var
))
949 DECL_ALIGN (field
) = DECL_ALIGN (var
);
950 DECL_USER_ALIGN (field
) = DECL_USER_ALIGN (var
);
951 TREE_THIS_VOLATILE (field
) = TREE_THIS_VOLATILE (var
);
954 DECL_ALIGN (field
) = TYPE_ALIGN (type
);
958 insert_field_into_struct (ctx
->record_type
, field
);
959 if (ctx
->srecord_type
)
961 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
962 FIELD_DECL
, DECL_NAME (var
), type
);
963 DECL_ABSTRACT_ORIGIN (sfield
) = var
;
964 DECL_ALIGN (sfield
) = DECL_ALIGN (field
);
965 DECL_USER_ALIGN (sfield
) = DECL_USER_ALIGN (field
);
966 TREE_THIS_VOLATILE (sfield
) = TREE_THIS_VOLATILE (field
);
967 insert_field_into_struct (ctx
->srecord_type
, sfield
);
972 if (ctx
->srecord_type
== NULL_TREE
)
976 ctx
->srecord_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
977 ctx
->sfield_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
978 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= TREE_CHAIN (t
))
980 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
981 FIELD_DECL
, DECL_NAME (t
), TREE_TYPE (t
));
982 DECL_ABSTRACT_ORIGIN (sfield
) = DECL_ABSTRACT_ORIGIN (t
);
983 insert_field_into_struct (ctx
->srecord_type
, sfield
);
984 splay_tree_insert (ctx
->sfield_map
,
985 (splay_tree_key
) DECL_ABSTRACT_ORIGIN (t
),
986 (splay_tree_value
) sfield
);
990 insert_field_into_struct ((mask
& 1) ? ctx
->record_type
991 : ctx
->srecord_type
, field
);
995 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) var
,
996 (splay_tree_value
) field
);
997 if ((mask
& 2) && ctx
->sfield_map
)
998 splay_tree_insert (ctx
->sfield_map
, (splay_tree_key
) var
,
999 (splay_tree_value
) sfield
);
1003 install_var_local (tree var
, omp_context
*ctx
)
1005 tree new_var
= omp_copy_decl_1 (var
, ctx
);
1006 insert_decl_map (&ctx
->cb
, var
, new_var
);
1010 /* Adjust the replacement for DECL in CTX for the new context. This means
1011 copying the DECL_VALUE_EXPR, and fixing up the type. */
1014 fixup_remapped_decl (tree decl
, omp_context
*ctx
, bool private_debug
)
1016 tree new_decl
, size
;
1018 new_decl
= lookup_decl (decl
, ctx
);
1020 TREE_TYPE (new_decl
) = remap_type (TREE_TYPE (decl
), &ctx
->cb
);
1022 if ((!TREE_CONSTANT (DECL_SIZE (new_decl
)) || private_debug
)
1023 && DECL_HAS_VALUE_EXPR_P (decl
))
1025 tree ve
= DECL_VALUE_EXPR (decl
);
1026 walk_tree (&ve
, copy_tree_body_r
, &ctx
->cb
, NULL
);
1027 SET_DECL_VALUE_EXPR (new_decl
, ve
);
1028 DECL_HAS_VALUE_EXPR_P (new_decl
) = 1;
1031 if (!TREE_CONSTANT (DECL_SIZE (new_decl
)))
1033 size
= remap_decl (DECL_SIZE (decl
), &ctx
->cb
);
1034 if (size
== error_mark_node
)
1035 size
= TYPE_SIZE (TREE_TYPE (new_decl
));
1036 DECL_SIZE (new_decl
) = size
;
1038 size
= remap_decl (DECL_SIZE_UNIT (decl
), &ctx
->cb
);
1039 if (size
== error_mark_node
)
1040 size
= TYPE_SIZE_UNIT (TREE_TYPE (new_decl
));
1041 DECL_SIZE_UNIT (new_decl
) = size
;
1045 /* The callback for remap_decl. Search all containing contexts for a
1046 mapping of the variable; this avoids having to duplicate the splay
1047 tree ahead of time. We know a mapping doesn't already exist in the
1048 given context. Create new mappings to implement default semantics. */
1051 omp_copy_decl (tree var
, copy_body_data
*cb
)
1053 omp_context
*ctx
= (omp_context
*) cb
;
1056 if (TREE_CODE (var
) == LABEL_DECL
)
1058 new_var
= create_artificial_label (DECL_SOURCE_LOCATION (var
));
1059 DECL_CONTEXT (new_var
) = current_function_decl
;
1060 insert_decl_map (&ctx
->cb
, var
, new_var
);
1064 while (!is_taskreg_ctx (ctx
))
1069 new_var
= maybe_lookup_decl (var
, ctx
);
1074 if (is_global_var (var
) || decl_function_context (var
) != ctx
->cb
.src_fn
)
1077 return error_mark_node
;
1081 /* Return the parallel region associated with STMT. */
1083 /* Debugging dumps for parallel regions. */
1084 void dump_omp_region (FILE *, struct omp_region
*, int);
1085 void debug_omp_region (struct omp_region
*);
1086 void debug_all_omp_regions (void);
1088 /* Dump the parallel region tree rooted at REGION. */
1091 dump_omp_region (FILE *file
, struct omp_region
*region
, int indent
)
1093 fprintf (file
, "%*sbb %d: %s\n", indent
, "", region
->entry
->index
,
1094 gimple_code_name
[region
->type
]);
1097 dump_omp_region (file
, region
->inner
, indent
+ 4);
1101 fprintf (file
, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent
, "",
1102 region
->cont
->index
);
1106 fprintf (file
, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent
, "",
1107 region
->exit
->index
);
1109 fprintf (file
, "%*s[no exit marker]\n", indent
, "");
1112 dump_omp_region (file
, region
->next
, indent
);
1116 debug_omp_region (struct omp_region
*region
)
1118 dump_omp_region (stderr
, region
, 0);
1122 debug_all_omp_regions (void)
1124 dump_omp_region (stderr
, root_omp_region
, 0);
1128 /* Create a new parallel region starting at STMT inside region PARENT. */
1131 new_omp_region (basic_block bb
, enum gimple_code type
,
1132 struct omp_region
*parent
)
1134 struct omp_region
*region
= XCNEW (struct omp_region
);
1136 region
->outer
= parent
;
1138 region
->type
= type
;
1142 /* This is a nested region. Add it to the list of inner
1143 regions in PARENT. */
1144 region
->next
= parent
->inner
;
1145 parent
->inner
= region
;
1149 /* This is a toplevel region. Add it to the list of toplevel
1150 regions in ROOT_OMP_REGION. */
1151 region
->next
= root_omp_region
;
1152 root_omp_region
= region
;
1158 /* Release the memory associated with the region tree rooted at REGION. */
1161 free_omp_region_1 (struct omp_region
*region
)
1163 struct omp_region
*i
, *n
;
1165 for (i
= region
->inner
; i
; i
= n
)
1168 free_omp_region_1 (i
);
1174 /* Release the memory for the entire omp region tree. */
1177 free_omp_regions (void)
1179 struct omp_region
*r
, *n
;
1180 for (r
= root_omp_region
; r
; r
= n
)
1183 free_omp_region_1 (r
);
1185 root_omp_region
= NULL
;
1189 /* Create a new context, with OUTER_CTX being the surrounding context. */
1191 static omp_context
*
1192 new_omp_context (gimple stmt
, omp_context
*outer_ctx
)
1194 omp_context
*ctx
= XCNEW (omp_context
);
1196 splay_tree_insert (all_contexts
, (splay_tree_key
) stmt
,
1197 (splay_tree_value
) ctx
);
1202 ctx
->outer
= outer_ctx
;
1203 ctx
->cb
= outer_ctx
->cb
;
1204 ctx
->cb
.block
= NULL
;
1205 ctx
->depth
= outer_ctx
->depth
+ 1;
1209 ctx
->cb
.src_fn
= current_function_decl
;
1210 ctx
->cb
.dst_fn
= current_function_decl
;
1211 ctx
->cb
.src_node
= cgraph_node (current_function_decl
);
1212 ctx
->cb
.dst_node
= ctx
->cb
.src_node
;
1213 ctx
->cb
.src_cfun
= cfun
;
1214 ctx
->cb
.copy_decl
= omp_copy_decl
;
1215 ctx
->cb
.eh_lp_nr
= 0;
1216 ctx
->cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
1220 ctx
->cb
.decl_map
= pointer_map_create ();
1225 static gimple_seq
maybe_catch_exception (gimple_seq
);
1227 /* Finalize task copyfn. */
1230 finalize_task_copyfn (gimple task_stmt
)
1232 struct function
*child_cfun
;
1233 tree child_fn
, old_fn
;
1234 gimple_seq seq
, new_seq
;
1237 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
1238 if (child_fn
== NULL_TREE
)
1241 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
1243 /* Inform the callgraph about the new function. */
1244 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
1245 = cfun
->curr_properties
;
1247 old_fn
= current_function_decl
;
1248 push_cfun (child_cfun
);
1249 current_function_decl
= child_fn
;
1250 bind
= gimplify_body (&DECL_SAVED_TREE (child_fn
), child_fn
, false);
1251 seq
= gimple_seq_alloc ();
1252 gimple_seq_add_stmt (&seq
, bind
);
1253 new_seq
= maybe_catch_exception (seq
);
1256 bind
= gimple_build_bind (NULL
, new_seq
, NULL
);
1257 seq
= gimple_seq_alloc ();
1258 gimple_seq_add_stmt (&seq
, bind
);
1260 gimple_set_body (child_fn
, seq
);
1262 current_function_decl
= old_fn
;
1264 cgraph_add_new_function (child_fn
, false);
1267 /* Destroy a omp_context data structures. Called through the splay tree
1268 value delete callback. */
1271 delete_omp_context (splay_tree_value value
)
1273 omp_context
*ctx
= (omp_context
*) value
;
1275 pointer_map_destroy (ctx
->cb
.decl_map
);
1278 splay_tree_delete (ctx
->field_map
);
1279 if (ctx
->sfield_map
)
1280 splay_tree_delete (ctx
->sfield_map
);
1282 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1283 it produces corrupt debug information. */
1284 if (ctx
->record_type
)
1287 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= TREE_CHAIN (t
))
1288 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1290 if (ctx
->srecord_type
)
1293 for (t
= TYPE_FIELDS (ctx
->srecord_type
); t
; t
= TREE_CHAIN (t
))
1294 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1297 if (is_task_ctx (ctx
))
1298 finalize_task_copyfn (ctx
->stmt
);
1303 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1307 fixup_child_record_type (omp_context
*ctx
)
1309 tree f
, type
= ctx
->record_type
;
1311 /* ??? It isn't sufficient to just call remap_type here, because
1312 variably_modified_type_p doesn't work the way we expect for
1313 record types. Testing each field for whether it needs remapping
1314 and creating a new record by hand works, however. */
1315 for (f
= TYPE_FIELDS (type
); f
; f
= TREE_CHAIN (f
))
1316 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
1320 tree name
, new_fields
= NULL
;
1322 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1323 name
= DECL_NAME (TYPE_NAME (ctx
->record_type
));
1324 name
= build_decl (DECL_SOURCE_LOCATION (ctx
->receiver_decl
),
1325 TYPE_DECL
, name
, type
);
1326 TYPE_NAME (type
) = name
;
1328 for (f
= TYPE_FIELDS (ctx
->record_type
); f
; f
= TREE_CHAIN (f
))
1330 tree new_f
= copy_node (f
);
1331 DECL_CONTEXT (new_f
) = type
;
1332 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &ctx
->cb
);
1333 TREE_CHAIN (new_f
) = new_fields
;
1334 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &ctx
->cb
, NULL
);
1335 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
,
1337 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
1341 /* Arrange to be able to look up the receiver field
1342 given the sender field. */
1343 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) f
,
1344 (splay_tree_value
) new_f
);
1346 TYPE_FIELDS (type
) = nreverse (new_fields
);
1350 TREE_TYPE (ctx
->receiver_decl
) = build_pointer_type (type
);
1353 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1354 specified by CLAUSES. */
1357 scan_sharing_clauses (tree clauses
, omp_context
*ctx
)
1360 bool scan_array_reductions
= false;
1362 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1366 switch (OMP_CLAUSE_CODE (c
))
1368 case OMP_CLAUSE_PRIVATE
:
1369 decl
= OMP_CLAUSE_DECL (c
);
1370 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
1372 else if (!is_variable_sized (decl
))
1373 install_var_local (decl
, ctx
);
1376 case OMP_CLAUSE_SHARED
:
1377 gcc_assert (is_taskreg_ctx (ctx
));
1378 decl
= OMP_CLAUSE_DECL (c
);
1379 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl
))
1380 || !is_variable_sized (decl
));
1381 /* Global variables don't need to be copied,
1382 the receiver side will use them directly. */
1383 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1385 by_ref
= use_pointer_for_field (decl
, ctx
);
1386 if (! TREE_READONLY (decl
)
1387 || TREE_ADDRESSABLE (decl
)
1389 || is_reference (decl
))
1391 install_var_field (decl
, by_ref
, 3, ctx
);
1392 install_var_local (decl
, ctx
);
1395 /* We don't need to copy const scalar vars back. */
1396 OMP_CLAUSE_SET_CODE (c
, OMP_CLAUSE_FIRSTPRIVATE
);
1399 case OMP_CLAUSE_LASTPRIVATE
:
1400 /* Let the corresponding firstprivate clause create
1402 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1406 case OMP_CLAUSE_FIRSTPRIVATE
:
1407 case OMP_CLAUSE_REDUCTION
:
1408 decl
= OMP_CLAUSE_DECL (c
);
1410 if (is_variable_sized (decl
))
1412 if (is_task_ctx (ctx
))
1413 install_var_field (decl
, false, 1, ctx
);
1416 else if (is_taskreg_ctx (ctx
))
1419 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
));
1420 by_ref
= use_pointer_for_field (decl
, NULL
);
1422 if (is_task_ctx (ctx
)
1423 && (global
|| by_ref
|| is_reference (decl
)))
1425 install_var_field (decl
, false, 1, ctx
);
1427 install_var_field (decl
, by_ref
, 2, ctx
);
1430 install_var_field (decl
, by_ref
, 3, ctx
);
1432 install_var_local (decl
, ctx
);
1435 case OMP_CLAUSE_COPYPRIVATE
:
1436 case OMP_CLAUSE_COPYIN
:
1437 decl
= OMP_CLAUSE_DECL (c
);
1438 by_ref
= use_pointer_for_field (decl
, NULL
);
1439 install_var_field (decl
, by_ref
, 3, ctx
);
1442 case OMP_CLAUSE_DEFAULT
:
1443 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_KIND (c
);
1447 case OMP_CLAUSE_NUM_THREADS
:
1448 case OMP_CLAUSE_SCHEDULE
:
1450 scan_omp_op (&OMP_CLAUSE_OPERAND (c
, 0), ctx
->outer
);
1453 case OMP_CLAUSE_NOWAIT
:
1454 case OMP_CLAUSE_ORDERED
:
1455 case OMP_CLAUSE_COLLAPSE
:
1456 case OMP_CLAUSE_UNTIED
:
1464 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1466 switch (OMP_CLAUSE_CODE (c
))
1468 case OMP_CLAUSE_LASTPRIVATE
:
1469 /* Let the corresponding firstprivate clause create
1471 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1472 scan_array_reductions
= true;
1473 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1477 case OMP_CLAUSE_PRIVATE
:
1478 case OMP_CLAUSE_FIRSTPRIVATE
:
1479 case OMP_CLAUSE_REDUCTION
:
1480 decl
= OMP_CLAUSE_DECL (c
);
1481 if (is_variable_sized (decl
))
1482 install_var_local (decl
, ctx
);
1483 fixup_remapped_decl (decl
, ctx
,
1484 OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_PRIVATE
1485 && OMP_CLAUSE_PRIVATE_DEBUG (c
));
1486 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1487 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1488 scan_array_reductions
= true;
1491 case OMP_CLAUSE_SHARED
:
1492 decl
= OMP_CLAUSE_DECL (c
);
1493 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1494 fixup_remapped_decl (decl
, ctx
, false);
1497 case OMP_CLAUSE_COPYPRIVATE
:
1498 case OMP_CLAUSE_COPYIN
:
1499 case OMP_CLAUSE_DEFAULT
:
1501 case OMP_CLAUSE_NUM_THREADS
:
1502 case OMP_CLAUSE_SCHEDULE
:
1503 case OMP_CLAUSE_NOWAIT
:
1504 case OMP_CLAUSE_ORDERED
:
1505 case OMP_CLAUSE_COLLAPSE
:
1506 case OMP_CLAUSE_UNTIED
:
1514 if (scan_array_reductions
)
1515 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1516 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1517 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1519 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
1520 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
1522 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
1523 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1524 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
1527 /* Create a new name for omp child function. Returns an identifier. */
1529 static GTY(()) unsigned int tmp_ompfn_id_num
;
1532 create_omp_child_function_name (bool task_copy
)
1534 return (clone_function_name (current_function_decl
,
1535 task_copy
? "_omp_cpyfn" : "_omp_fn"));
1538 /* Build a decl for the omp child function. It'll not contain a body
1539 yet, just the bare decl. */
1542 create_omp_child_function (omp_context
*ctx
, bool task_copy
)
1544 tree decl
, type
, name
, t
;
1546 name
= create_omp_child_function_name (task_copy
);
1548 type
= build_function_type_list (void_type_node
, ptr_type_node
,
1549 ptr_type_node
, NULL_TREE
);
1551 type
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
1553 decl
= build_decl (gimple_location (ctx
->stmt
),
1554 FUNCTION_DECL
, name
, type
);
1557 ctx
->cb
.dst_fn
= decl
;
1559 gimple_omp_task_set_copy_fn (ctx
->stmt
, decl
);
1561 TREE_STATIC (decl
) = 1;
1562 TREE_USED (decl
) = 1;
1563 DECL_ARTIFICIAL (decl
) = 1;
1564 DECL_IGNORED_P (decl
) = 0;
1565 TREE_PUBLIC (decl
) = 0;
1566 DECL_UNINLINABLE (decl
) = 1;
1567 DECL_EXTERNAL (decl
) = 0;
1568 DECL_CONTEXT (decl
) = NULL_TREE
;
1569 DECL_INITIAL (decl
) = make_node (BLOCK
);
1571 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1572 RESULT_DECL
, NULL_TREE
, void_type_node
);
1573 DECL_ARTIFICIAL (t
) = 1;
1574 DECL_IGNORED_P (t
) = 1;
1575 DECL_CONTEXT (t
) = decl
;
1576 DECL_RESULT (decl
) = t
;
1578 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1579 PARM_DECL
, get_identifier (".omp_data_i"), ptr_type_node
);
1580 DECL_ARTIFICIAL (t
) = 1;
1581 DECL_ARG_TYPE (t
) = ptr_type_node
;
1582 DECL_CONTEXT (t
) = current_function_decl
;
1584 DECL_ARGUMENTS (decl
) = t
;
1586 ctx
->receiver_decl
= t
;
1589 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1590 PARM_DECL
, get_identifier (".omp_data_o"),
1592 DECL_ARTIFICIAL (t
) = 1;
1593 DECL_ARG_TYPE (t
) = ptr_type_node
;
1594 DECL_CONTEXT (t
) = current_function_decl
;
1596 TREE_ADDRESSABLE (t
) = 1;
1597 TREE_CHAIN (t
) = DECL_ARGUMENTS (decl
);
1598 DECL_ARGUMENTS (decl
) = t
;
1601 /* Allocate memory for the function structure. The call to
1602 allocate_struct_function clobbers CFUN, so we need to restore
1604 push_struct_function (decl
);
1605 cfun
->function_end_locus
= gimple_location (ctx
->stmt
);
1610 /* Scan an OpenMP parallel directive. */
1613 scan_omp_parallel (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1617 gimple stmt
= gsi_stmt (*gsi
);
1619 /* Ignore parallel directives with empty bodies, unless there
1620 are copyin clauses. */
1622 && empty_body_p (gimple_omp_body (stmt
))
1623 && find_omp_clause (gimple_omp_parallel_clauses (stmt
),
1624 OMP_CLAUSE_COPYIN
) == NULL
)
1626 gsi_replace (gsi
, gimple_build_nop (), false);
1630 ctx
= new_omp_context (stmt
, outer_ctx
);
1631 if (taskreg_nesting_level
> 1)
1632 ctx
->is_nested
= true;
1633 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1634 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1635 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1636 name
= create_tmp_var_name (".omp_data_s");
1637 name
= build_decl (gimple_location (stmt
),
1638 TYPE_DECL
, name
, ctx
->record_type
);
1639 TYPE_NAME (ctx
->record_type
) = name
;
1640 create_omp_child_function (ctx
, false);
1641 gimple_omp_parallel_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
1643 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt
), ctx
);
1644 scan_omp (gimple_omp_body (stmt
), ctx
);
1646 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1647 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1650 layout_type (ctx
->record_type
);
1651 fixup_child_record_type (ctx
);
1655 /* Scan an OpenMP task directive. */
1658 scan_omp_task (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1662 gimple stmt
= gsi_stmt (*gsi
);
1663 location_t loc
= gimple_location (stmt
);
1665 /* Ignore task directives with empty bodies. */
1667 && empty_body_p (gimple_omp_body (stmt
)))
1669 gsi_replace (gsi
, gimple_build_nop (), false);
1673 ctx
= new_omp_context (stmt
, outer_ctx
);
1674 if (taskreg_nesting_level
> 1)
1675 ctx
->is_nested
= true;
1676 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1677 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1678 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1679 name
= create_tmp_var_name (".omp_data_s");
1680 name
= build_decl (gimple_location (stmt
),
1681 TYPE_DECL
, name
, ctx
->record_type
);
1682 TYPE_NAME (ctx
->record_type
) = name
;
1683 create_omp_child_function (ctx
, false);
1684 gimple_omp_task_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
1686 scan_sharing_clauses (gimple_omp_task_clauses (stmt
), ctx
);
1688 if (ctx
->srecord_type
)
1690 name
= create_tmp_var_name (".omp_data_a");
1691 name
= build_decl (gimple_location (stmt
),
1692 TYPE_DECL
, name
, ctx
->srecord_type
);
1693 TYPE_NAME (ctx
->srecord_type
) = name
;
1694 create_omp_child_function (ctx
, true);
1697 scan_omp (gimple_omp_body (stmt
), ctx
);
1699 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1701 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1702 t
= build_int_cst (long_integer_type_node
, 0);
1703 gimple_omp_task_set_arg_size (stmt
, t
);
1704 t
= build_int_cst (long_integer_type_node
, 1);
1705 gimple_omp_task_set_arg_align (stmt
, t
);
1709 tree
*p
, vla_fields
= NULL_TREE
, *q
= &vla_fields
;
1710 /* Move VLA fields to the end. */
1711 p
= &TYPE_FIELDS (ctx
->record_type
);
1713 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p
))
1714 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p
))))
1717 *p
= TREE_CHAIN (*p
);
1718 TREE_CHAIN (*q
) = NULL_TREE
;
1719 q
= &TREE_CHAIN (*q
);
1722 p
= &TREE_CHAIN (*p
);
1724 layout_type (ctx
->record_type
);
1725 fixup_child_record_type (ctx
);
1726 if (ctx
->srecord_type
)
1727 layout_type (ctx
->srecord_type
);
1728 t
= fold_convert_loc (loc
, long_integer_type_node
,
1729 TYPE_SIZE_UNIT (ctx
->record_type
));
1730 gimple_omp_task_set_arg_size (stmt
, t
);
1731 t
= build_int_cst (long_integer_type_node
,
1732 TYPE_ALIGN_UNIT (ctx
->record_type
));
1733 gimple_omp_task_set_arg_align (stmt
, t
);
1738 /* Scan an OpenMP loop directive. */
1741 scan_omp_for (gimple stmt
, omp_context
*outer_ctx
)
1746 ctx
= new_omp_context (stmt
, outer_ctx
);
1748 scan_sharing_clauses (gimple_omp_for_clauses (stmt
), ctx
);
1750 scan_omp (gimple_omp_for_pre_body (stmt
), ctx
);
1751 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
1753 scan_omp_op (gimple_omp_for_index_ptr (stmt
, i
), ctx
);
1754 scan_omp_op (gimple_omp_for_initial_ptr (stmt
, i
), ctx
);
1755 scan_omp_op (gimple_omp_for_final_ptr (stmt
, i
), ctx
);
1756 scan_omp_op (gimple_omp_for_incr_ptr (stmt
, i
), ctx
);
1758 scan_omp (gimple_omp_body (stmt
), ctx
);
1761 /* Scan an OpenMP sections directive. */
1764 scan_omp_sections (gimple stmt
, omp_context
*outer_ctx
)
1768 ctx
= new_omp_context (stmt
, outer_ctx
);
1769 scan_sharing_clauses (gimple_omp_sections_clauses (stmt
), ctx
);
1770 scan_omp (gimple_omp_body (stmt
), ctx
);
1773 /* Scan an OpenMP single directive. */
1776 scan_omp_single (gimple stmt
, omp_context
*outer_ctx
)
1781 ctx
= new_omp_context (stmt
, outer_ctx
);
1782 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1783 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1784 name
= create_tmp_var_name (".omp_copy_s");
1785 name
= build_decl (gimple_location (stmt
),
1786 TYPE_DECL
, name
, ctx
->record_type
);
1787 TYPE_NAME (ctx
->record_type
) = name
;
1789 scan_sharing_clauses (gimple_omp_single_clauses (stmt
), ctx
);
1790 scan_omp (gimple_omp_body (stmt
), ctx
);
1792 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1793 ctx
->record_type
= NULL
;
1795 layout_type (ctx
->record_type
);
1799 /* Check OpenMP nesting restrictions. */
1801 check_omp_nesting_restrictions (gimple stmt
, omp_context
*ctx
)
1803 switch (gimple_code (stmt
))
1805 case GIMPLE_OMP_FOR
:
1806 case GIMPLE_OMP_SECTIONS
:
1807 case GIMPLE_OMP_SINGLE
:
1809 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1810 switch (gimple_code (ctx
->stmt
))
1812 case GIMPLE_OMP_FOR
:
1813 case GIMPLE_OMP_SECTIONS
:
1814 case GIMPLE_OMP_SINGLE
:
1815 case GIMPLE_OMP_ORDERED
:
1816 case GIMPLE_OMP_MASTER
:
1817 case GIMPLE_OMP_TASK
:
1818 if (is_gimple_call (stmt
))
1820 warning (0, "barrier region may not be closely nested inside "
1821 "of work-sharing, critical, ordered, master or "
1822 "explicit task region");
1825 warning (0, "work-sharing region may not be closely nested inside "
1826 "of work-sharing, critical, ordered, master or explicit "
1829 case GIMPLE_OMP_PARALLEL
:
1835 case GIMPLE_OMP_MASTER
:
1836 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1837 switch (gimple_code (ctx
->stmt
))
1839 case GIMPLE_OMP_FOR
:
1840 case GIMPLE_OMP_SECTIONS
:
1841 case GIMPLE_OMP_SINGLE
:
1842 case GIMPLE_OMP_TASK
:
1843 warning (0, "master region may not be closely nested inside "
1844 "of work-sharing or explicit task region");
1846 case GIMPLE_OMP_PARALLEL
:
1852 case GIMPLE_OMP_ORDERED
:
1853 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1854 switch (gimple_code (ctx
->stmt
))
1856 case GIMPLE_OMP_CRITICAL
:
1857 case GIMPLE_OMP_TASK
:
1858 warning (0, "ordered region may not be closely nested inside "
1859 "of critical or explicit task region");
1861 case GIMPLE_OMP_FOR
:
1862 if (find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
1863 OMP_CLAUSE_ORDERED
) == NULL
)
1864 warning (0, "ordered region must be closely nested inside "
1865 "a loop region with an ordered clause");
1867 case GIMPLE_OMP_PARALLEL
:
1873 case GIMPLE_OMP_CRITICAL
:
1874 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1875 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_CRITICAL
1876 && (gimple_omp_critical_name (stmt
)
1877 == gimple_omp_critical_name (ctx
->stmt
)))
1879 warning (0, "critical region may not be nested inside a critical "
1880 "region with the same name");
1890 /* Helper function scan_omp.
1892 Callback for walk_tree or operators in walk_gimple_stmt used to
1893 scan for OpenMP directives in TP. */
1896 scan_omp_1_op (tree
*tp
, int *walk_subtrees
, void *data
)
1898 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
1899 omp_context
*ctx
= (omp_context
*) wi
->info
;
1902 switch (TREE_CODE (t
))
1909 *tp
= remap_decl (t
, &ctx
->cb
);
1913 if (ctx
&& TYPE_P (t
))
1914 *tp
= remap_type (t
, &ctx
->cb
);
1915 else if (!DECL_P (t
))
1919 TREE_TYPE (t
) = remap_type (TREE_TYPE (t
), &ctx
->cb
);
1928 /* Helper function for scan_omp.
1930 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1931 the current statement in GSI. */
1934 scan_omp_1_stmt (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
1935 struct walk_stmt_info
*wi
)
1937 gimple stmt
= gsi_stmt (*gsi
);
1938 omp_context
*ctx
= (omp_context
*) wi
->info
;
1940 if (gimple_has_location (stmt
))
1941 input_location
= gimple_location (stmt
);
1943 /* Check the OpenMP nesting restrictions. */
1946 if (is_gimple_omp (stmt
))
1947 check_omp_nesting_restrictions (stmt
, ctx
);
1948 else if (is_gimple_call (stmt
))
1950 tree fndecl
= gimple_call_fndecl (stmt
);
1951 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
1952 && DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_GOMP_BARRIER
)
1953 check_omp_nesting_restrictions (stmt
, ctx
);
1957 *handled_ops_p
= true;
1959 switch (gimple_code (stmt
))
1961 case GIMPLE_OMP_PARALLEL
:
1962 taskreg_nesting_level
++;
1963 scan_omp_parallel (gsi
, ctx
);
1964 taskreg_nesting_level
--;
1967 case GIMPLE_OMP_TASK
:
1968 taskreg_nesting_level
++;
1969 scan_omp_task (gsi
, ctx
);
1970 taskreg_nesting_level
--;
1973 case GIMPLE_OMP_FOR
:
1974 scan_omp_for (stmt
, ctx
);
1977 case GIMPLE_OMP_SECTIONS
:
1978 scan_omp_sections (stmt
, ctx
);
1981 case GIMPLE_OMP_SINGLE
:
1982 scan_omp_single (stmt
, ctx
);
1985 case GIMPLE_OMP_SECTION
:
1986 case GIMPLE_OMP_MASTER
:
1987 case GIMPLE_OMP_ORDERED
:
1988 case GIMPLE_OMP_CRITICAL
:
1989 ctx
= new_omp_context (stmt
, ctx
);
1990 scan_omp (gimple_omp_body (stmt
), ctx
);
1997 *handled_ops_p
= false;
1999 for (var
= gimple_bind_vars (stmt
); var
; var
= TREE_CHAIN (var
))
2000 insert_decl_map (&ctx
->cb
, var
, var
);
2004 *handled_ops_p
= false;
2012 /* Scan all the statements starting at the current statement. CTX
2013 contains context information about the OpenMP directives and
2014 clauses found during the scan. */
2017 scan_omp (gimple_seq body
, omp_context
*ctx
)
2019 location_t saved_location
;
2020 struct walk_stmt_info wi
;
2022 memset (&wi
, 0, sizeof (wi
));
2024 wi
.want_locations
= true;
2026 saved_location
= input_location
;
2027 walk_gimple_seq (body
, scan_omp_1_stmt
, scan_omp_1_op
, &wi
);
2028 input_location
= saved_location
;
2031 /* Re-gimplification and code generation routines. */
2033 /* Build a call to GOMP_barrier. */
2036 build_omp_barrier (void)
2038 return build_call_expr (built_in_decls
[BUILT_IN_GOMP_BARRIER
], 0);
2041 /* If a context was created for STMT when it was scanned, return it. */
2043 static omp_context
*
2044 maybe_lookup_ctx (gimple stmt
)
2047 n
= splay_tree_lookup (all_contexts
, (splay_tree_key
) stmt
);
2048 return n
? (omp_context
*) n
->value
: NULL
;
2052 /* Find the mapping for DECL in CTX or the immediately enclosing
2053 context that has a mapping for DECL.
2055 If CTX is a nested parallel directive, we may have to use the decl
2056 mappings created in CTX's parent context. Suppose that we have the
2057 following parallel nesting (variable UIDs showed for clarity):
2060 #omp parallel shared(iD.1562) -> outer parallel
2061 iD.1562 = iD.1562 + 1;
2063 #omp parallel shared (iD.1562) -> inner parallel
2064 iD.1562 = iD.1562 - 1;
2066 Each parallel structure will create a distinct .omp_data_s structure
2067 for copying iD.1562 in/out of the directive:
2069 outer parallel .omp_data_s.1.i -> iD.1562
2070 inner parallel .omp_data_s.2.i -> iD.1562
2072 A shared variable mapping will produce a copy-out operation before
2073 the parallel directive and a copy-in operation after it. So, in
2074 this case we would have:
2077 .omp_data_o.1.i = iD.1562;
2078 #omp parallel shared(iD.1562) -> outer parallel
2079 .omp_data_i.1 = &.omp_data_o.1
2080 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2082 .omp_data_o.2.i = iD.1562; -> **
2083 #omp parallel shared(iD.1562) -> inner parallel
2084 .omp_data_i.2 = &.omp_data_o.2
2085 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2088 ** This is a problem. The symbol iD.1562 cannot be referenced
2089 inside the body of the outer parallel region. But since we are
2090 emitting this copy operation while expanding the inner parallel
2091 directive, we need to access the CTX structure of the outer
2092 parallel directive to get the correct mapping:
2094 .omp_data_o.2.i = .omp_data_i.1->i
2096 Since there may be other workshare or parallel directives enclosing
2097 the parallel directive, it may be necessary to walk up the context
2098 parent chain. This is not a problem in general because nested
2099 parallelism happens only rarely. */
2102 lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2107 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2108 t
= maybe_lookup_decl (decl
, up
);
2110 gcc_assert (!ctx
->is_nested
|| t
|| is_global_var (decl
));
2112 return t
? t
: decl
;
2116 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2117 in outer contexts. */
2120 maybe_lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2125 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2126 t
= maybe_lookup_decl (decl
, up
);
2128 return t
? t
: decl
;
2132 /* Construct the initialization value for reduction CLAUSE. */
2135 omp_reduction_init (tree clause
, tree type
)
2137 location_t loc
= OMP_CLAUSE_LOCATION (clause
);
2138 switch (OMP_CLAUSE_REDUCTION_CODE (clause
))
2145 case TRUTH_ORIF_EXPR
:
2146 case TRUTH_XOR_EXPR
:
2148 return fold_convert_loc (loc
, type
, integer_zero_node
);
2151 case TRUTH_AND_EXPR
:
2152 case TRUTH_ANDIF_EXPR
:
2154 return fold_convert_loc (loc
, type
, integer_one_node
);
2157 return fold_convert_loc (loc
, type
, integer_minus_one_node
);
2160 if (SCALAR_FLOAT_TYPE_P (type
))
2162 REAL_VALUE_TYPE max
, min
;
2163 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2166 real_arithmetic (&min
, NEGATE_EXPR
, &max
, NULL
);
2169 real_maxval (&min
, 1, TYPE_MODE (type
));
2170 return build_real (type
, min
);
2174 gcc_assert (INTEGRAL_TYPE_P (type
));
2175 return TYPE_MIN_VALUE (type
);
2179 if (SCALAR_FLOAT_TYPE_P (type
))
2181 REAL_VALUE_TYPE max
;
2182 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2185 real_maxval (&max
, 0, TYPE_MODE (type
));
2186 return build_real (type
, max
);
2190 gcc_assert (INTEGRAL_TYPE_P (type
));
2191 return TYPE_MAX_VALUE (type
);
2199 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2200 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2201 private variables. Initialization statements go in ILIST, while calls
2202 to destructors go in DLIST. */
2205 lower_rec_input_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*dlist
,
2208 gimple_stmt_iterator diter
;
2209 tree c
, dtor
, copyin_seq
, x
, ptr
;
2210 bool copyin_by_ref
= false;
2211 bool lastprivate_firstprivate
= false;
2214 *dlist
= gimple_seq_alloc ();
2215 diter
= gsi_start (*dlist
);
2218 /* Do all the fixed sized types in the first pass, and the variable sized
2219 types in the second pass. This makes sure that the scalar arguments to
2220 the variable sized types are processed before we use them in the
2221 variable sized operations. */
2222 for (pass
= 0; pass
< 2; ++pass
)
2224 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2226 enum omp_clause_code c_kind
= OMP_CLAUSE_CODE (c
);
2229 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2233 case OMP_CLAUSE_PRIVATE
:
2234 if (OMP_CLAUSE_PRIVATE_DEBUG (c
))
2237 case OMP_CLAUSE_SHARED
:
2238 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c
), ctx
) == NULL
)
2240 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c
)));
2243 case OMP_CLAUSE_FIRSTPRIVATE
:
2244 case OMP_CLAUSE_COPYIN
:
2245 case OMP_CLAUSE_REDUCTION
:
2247 case OMP_CLAUSE_LASTPRIVATE
:
2248 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2250 lastprivate_firstprivate
= true;
2259 new_var
= var
= OMP_CLAUSE_DECL (c
);
2260 if (c_kind
!= OMP_CLAUSE_COPYIN
)
2261 new_var
= lookup_decl (var
, ctx
);
2263 if (c_kind
== OMP_CLAUSE_SHARED
|| c_kind
== OMP_CLAUSE_COPYIN
)
2268 else if (is_variable_sized (var
))
2270 /* For variable sized types, we need to allocate the
2271 actual storage here. Call alloca and store the
2272 result in the pointer decl that we created elsewhere. */
2276 if (c_kind
!= OMP_CLAUSE_FIRSTPRIVATE
|| !is_task_ctx (ctx
))
2281 ptr
= DECL_VALUE_EXPR (new_var
);
2282 gcc_assert (TREE_CODE (ptr
) == INDIRECT_REF
);
2283 ptr
= TREE_OPERAND (ptr
, 0);
2284 gcc_assert (DECL_P (ptr
));
2285 x
= TYPE_SIZE_UNIT (TREE_TYPE (new_var
));
2287 /* void *tmp = __builtin_alloca */
2289 = gimple_build_call (built_in_decls
[BUILT_IN_ALLOCA
], 1, x
);
2290 tmp
= create_tmp_var_raw (ptr_type_node
, NULL
);
2291 gimple_add_tmp_var (tmp
);
2292 gimple_call_set_lhs (stmt
, tmp
);
2294 gimple_seq_add_stmt (ilist
, stmt
);
2296 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ptr
), tmp
);
2297 gimplify_assign (ptr
, x
, ilist
);
2300 else if (is_reference (var
))
2302 /* For references that are being privatized for Fortran,
2303 allocate new backing storage for the new pointer
2304 variable. This allows us to avoid changing all the
2305 code that expects a pointer to something that expects
2306 a direct variable. Note that this doesn't apply to
2307 C++, since reference types are disallowed in data
2308 sharing clauses there, except for NRV optimized
2313 x
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var
)));
2314 if (c_kind
== OMP_CLAUSE_FIRSTPRIVATE
&& is_task_ctx (ctx
))
2316 x
= build_receiver_ref (var
, false, ctx
);
2317 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2319 else if (TREE_CONSTANT (x
))
2321 const char *name
= NULL
;
2322 if (DECL_NAME (var
))
2323 name
= IDENTIFIER_POINTER (DECL_NAME (new_var
));
2325 x
= create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var
)),
2327 gimple_add_tmp_var (x
);
2328 TREE_ADDRESSABLE (x
) = 1;
2329 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2333 x
= build_call_expr_loc (clause_loc
,
2334 built_in_decls
[BUILT_IN_ALLOCA
], 1, x
);
2337 x
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), x
);
2338 gimplify_assign (new_var
, x
, ilist
);
2340 new_var
= build_fold_indirect_ref_loc (clause_loc
, new_var
);
2342 else if (c_kind
== OMP_CLAUSE_REDUCTION
2343 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2351 switch (OMP_CLAUSE_CODE (c
))
2353 case OMP_CLAUSE_SHARED
:
2354 /* Shared global vars are just accessed directly. */
2355 if (is_global_var (new_var
))
2357 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2358 needs to be delayed until after fixup_child_record_type so
2359 that we get the correct type during the dereference. */
2360 by_ref
= use_pointer_for_field (var
, ctx
);
2361 x
= build_receiver_ref (var
, by_ref
, ctx
);
2362 SET_DECL_VALUE_EXPR (new_var
, x
);
2363 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2365 /* ??? If VAR is not passed by reference, and the variable
2366 hasn't been initialized yet, then we'll get a warning for
2367 the store into the omp_data_s structure. Ideally, we'd be
2368 able to notice this and not store anything at all, but
2369 we're generating code too early. Suppress the warning. */
2371 TREE_NO_WARNING (var
) = 1;
2374 case OMP_CLAUSE_LASTPRIVATE
:
2375 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2379 case OMP_CLAUSE_PRIVATE
:
2380 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_PRIVATE
)
2381 x
= build_outer_var_ref (var
, ctx
);
2382 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
2384 if (is_task_ctx (ctx
))
2385 x
= build_receiver_ref (var
, false, ctx
);
2387 x
= build_outer_var_ref (var
, ctx
);
2391 x
= lang_hooks
.decls
.omp_clause_default_ctor (c
, new_var
, x
);
2393 gimplify_and_add (x
, ilist
);
2397 x
= lang_hooks
.decls
.omp_clause_dtor (c
, new_var
);
2400 gimple_seq tseq
= NULL
;
2403 gimplify_stmt (&dtor
, &tseq
);
2404 gsi_insert_seq_before (&diter
, tseq
, GSI_SAME_STMT
);
2408 case OMP_CLAUSE_FIRSTPRIVATE
:
2409 if (is_task_ctx (ctx
))
2411 if (is_reference (var
) || is_variable_sized (var
))
2413 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
,
2415 || use_pointer_for_field (var
, NULL
))
2417 x
= build_receiver_ref (var
, false, ctx
);
2418 SET_DECL_VALUE_EXPR (new_var
, x
);
2419 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2423 x
= build_outer_var_ref (var
, ctx
);
2424 x
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, new_var
, x
);
2425 gimplify_and_add (x
, ilist
);
2429 case OMP_CLAUSE_COPYIN
:
2430 by_ref
= use_pointer_for_field (var
, NULL
);
2431 x
= build_receiver_ref (var
, by_ref
, ctx
);
2432 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, x
);
2433 append_to_statement_list (x
, ©in_seq
);
2434 copyin_by_ref
|= by_ref
;
2437 case OMP_CLAUSE_REDUCTION
:
2438 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2440 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
2441 x
= build_outer_var_ref (var
, ctx
);
2443 if (is_reference (var
))
2444 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2445 SET_DECL_VALUE_EXPR (placeholder
, x
);
2446 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
2447 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
2448 gimple_seq_add_seq (ilist
,
2449 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
));
2450 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
) = NULL
;
2451 DECL_HAS_VALUE_EXPR_P (placeholder
) = 0;
2455 x
= omp_reduction_init (c
, TREE_TYPE (new_var
));
2456 gcc_assert (TREE_CODE (TREE_TYPE (new_var
)) != ARRAY_TYPE
);
2457 gimplify_assign (new_var
, x
, ilist
);
2467 /* The copyin sequence is not to be executed by the main thread, since
2468 that would result in self-copies. Perhaps not visible to scalars,
2469 but it certainly is to C++ operator=. */
2472 x
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
2473 x
= build2 (NE_EXPR
, boolean_type_node
, x
,
2474 build_int_cst (TREE_TYPE (x
), 0));
2475 x
= build3 (COND_EXPR
, void_type_node
, x
, copyin_seq
, NULL
);
2476 gimplify_and_add (x
, ilist
);
2479 /* If any copyin variable is passed by reference, we must ensure the
2480 master thread doesn't modify it before it is copied over in all
2481 threads. Similarly for variables in both firstprivate and
2482 lastprivate clauses we need to ensure the lastprivate copying
2483 happens after firstprivate copying in all threads. */
2484 if (copyin_by_ref
|| lastprivate_firstprivate
)
2485 gimplify_and_add (build_omp_barrier (), ilist
);
2489 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2490 both parallel and workshare constructs. PREDICATE may be NULL if it's
2494 lower_lastprivate_clauses (tree clauses
, tree predicate
, gimple_seq
*stmt_list
,
2497 tree x
, c
, label
= NULL
;
2498 bool par_clauses
= false;
2500 /* Early exit if there are no lastprivate clauses. */
2501 clauses
= find_omp_clause (clauses
, OMP_CLAUSE_LASTPRIVATE
);
2502 if (clauses
== NULL
)
2504 /* If this was a workshare clause, see if it had been combined
2505 with its parallel. In that case, look for the clauses on the
2506 parallel statement itself. */
2507 if (is_parallel_ctx (ctx
))
2511 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
2514 clauses
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2515 OMP_CLAUSE_LASTPRIVATE
);
2516 if (clauses
== NULL
)
2524 tree label_true
, arm1
, arm2
;
2526 label
= create_artificial_label (UNKNOWN_LOCATION
);
2527 label_true
= create_artificial_label (UNKNOWN_LOCATION
);
2528 arm1
= TREE_OPERAND (predicate
, 0);
2529 arm2
= TREE_OPERAND (predicate
, 1);
2530 gimplify_expr (&arm1
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
2531 gimplify_expr (&arm2
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
2532 stmt
= gimple_build_cond (TREE_CODE (predicate
), arm1
, arm2
,
2534 gimple_seq_add_stmt (stmt_list
, stmt
);
2535 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label_true
));
2538 for (c
= clauses
; c
;)
2541 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2543 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
)
2545 var
= OMP_CLAUSE_DECL (c
);
2546 new_var
= lookup_decl (var
, ctx
);
2548 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
2550 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
2551 gimple_seq_add_seq (stmt_list
,
2552 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
));
2554 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
) = NULL
;
2556 x
= build_outer_var_ref (var
, ctx
);
2557 if (is_reference (var
))
2558 new_var
= build_fold_indirect_ref_loc (clause_loc
, new_var
);
2559 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, x
, new_var
);
2560 gimplify_and_add (x
, stmt_list
);
2562 c
= OMP_CLAUSE_CHAIN (c
);
2563 if (c
== NULL
&& !par_clauses
)
2565 /* If this was a workshare clause, see if it had been combined
2566 with its parallel. In that case, continue looking for the
2567 clauses also on the parallel statement itself. */
2568 if (is_parallel_ctx (ctx
))
2572 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
2575 c
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2576 OMP_CLAUSE_LASTPRIVATE
);
2582 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label
));
2586 /* Generate code to implement the REDUCTION clauses. */
2589 lower_reduction_clauses (tree clauses
, gimple_seq
*stmt_seqp
, omp_context
*ctx
)
2591 gimple_seq sub_seq
= NULL
;
2596 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2597 update in that case, otherwise use a lock. */
2598 for (c
= clauses
; c
&& count
< 2; c
= OMP_CLAUSE_CHAIN (c
))
2599 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
)
2601 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2603 /* Never use OMP_ATOMIC for array reductions. */
2613 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2615 tree var
, ref
, new_var
;
2616 enum tree_code code
;
2617 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2619 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_REDUCTION
)
2622 var
= OMP_CLAUSE_DECL (c
);
2623 new_var
= lookup_decl (var
, ctx
);
2624 if (is_reference (var
))
2625 new_var
= build_fold_indirect_ref_loc (clause_loc
, new_var
);
2626 ref
= build_outer_var_ref (var
, ctx
);
2627 code
= OMP_CLAUSE_REDUCTION_CODE (c
);
2629 /* reduction(-:var) sums up the partial results, so it acts
2630 identically to reduction(+:var). */
2631 if (code
== MINUS_EXPR
)
2636 tree addr
= build_fold_addr_expr_loc (clause_loc
, ref
);
2638 addr
= save_expr (addr
);
2639 ref
= build1 (INDIRECT_REF
, TREE_TYPE (TREE_TYPE (addr
)), addr
);
2640 x
= fold_build2_loc (clause_loc
, code
, TREE_TYPE (ref
), ref
, new_var
);
2641 x
= build2 (OMP_ATOMIC
, void_type_node
, addr
, x
);
2642 gimplify_and_add (x
, stmt_seqp
);
2646 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2648 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
2650 if (is_reference (var
))
2651 ref
= build_fold_addr_expr_loc (clause_loc
, ref
);
2652 SET_DECL_VALUE_EXPR (placeholder
, ref
);
2653 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
2654 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
2655 gimple_seq_add_seq (&sub_seq
, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
));
2656 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
) = NULL
;
2657 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
) = NULL
;
2661 x
= build2 (code
, TREE_TYPE (ref
), ref
, new_var
);
2662 ref
= build_outer_var_ref (var
, ctx
);
2663 gimplify_assign (ref
, x
, &sub_seq
);
2667 stmt
= gimple_build_call (built_in_decls
[BUILT_IN_GOMP_ATOMIC_START
], 0);
2668 gimple_seq_add_stmt (stmt_seqp
, stmt
);
2670 gimple_seq_add_seq (stmt_seqp
, sub_seq
);
2672 stmt
= gimple_build_call (built_in_decls
[BUILT_IN_GOMP_ATOMIC_END
], 0);
2673 gimple_seq_add_stmt (stmt_seqp
, stmt
);
2677 /* Generate code to implement the COPYPRIVATE clauses. */
2680 lower_copyprivate_clauses (tree clauses
, gimple_seq
*slist
, gimple_seq
*rlist
,
2685 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2687 tree var
, new_var
, ref
, x
;
2689 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2691 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYPRIVATE
)
2694 var
= OMP_CLAUSE_DECL (c
);
2695 by_ref
= use_pointer_for_field (var
, NULL
);
2697 ref
= build_sender_ref (var
, ctx
);
2698 x
= new_var
= lookup_decl_in_outer_ctx (var
, ctx
);
2701 x
= build_fold_addr_expr_loc (clause_loc
, new_var
);
2702 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ref
), x
);
2704 gimplify_assign (ref
, x
, slist
);
2706 ref
= build_receiver_ref (var
, false, ctx
);
2709 ref
= fold_convert_loc (clause_loc
,
2710 build_pointer_type (TREE_TYPE (new_var
)),
2712 ref
= build_fold_indirect_ref_loc (clause_loc
, ref
);
2714 if (is_reference (var
))
2716 ref
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), ref
);
2717 ref
= build_fold_indirect_ref_loc (clause_loc
, ref
);
2718 new_var
= build_fold_indirect_ref_loc (clause_loc
, new_var
);
2720 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, ref
);
2721 gimplify_and_add (x
, rlist
);
2726 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2727 and REDUCTION from the sender (aka parent) side. */
2730 lower_send_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*olist
,
2735 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2737 tree val
, ref
, x
, var
;
2738 bool by_ref
, do_in
= false, do_out
= false;
2739 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2741 switch (OMP_CLAUSE_CODE (c
))
2743 case OMP_CLAUSE_PRIVATE
:
2744 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
2747 case OMP_CLAUSE_FIRSTPRIVATE
:
2748 case OMP_CLAUSE_COPYIN
:
2749 case OMP_CLAUSE_LASTPRIVATE
:
2750 case OMP_CLAUSE_REDUCTION
:
2756 val
= OMP_CLAUSE_DECL (c
);
2757 var
= lookup_decl_in_outer_ctx (val
, ctx
);
2759 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYIN
2760 && is_global_var (var
))
2762 if (is_variable_sized (val
))
2764 by_ref
= use_pointer_for_field (val
, NULL
);
2766 switch (OMP_CLAUSE_CODE (c
))
2768 case OMP_CLAUSE_PRIVATE
:
2769 case OMP_CLAUSE_FIRSTPRIVATE
:
2770 case OMP_CLAUSE_COPYIN
:
2774 case OMP_CLAUSE_LASTPRIVATE
:
2775 if (by_ref
|| is_reference (val
))
2777 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2784 if (lang_hooks
.decls
.omp_private_outer_ref (val
))
2789 case OMP_CLAUSE_REDUCTION
:
2791 do_out
= !(by_ref
|| is_reference (val
));
2800 ref
= build_sender_ref (val
, ctx
);
2801 x
= by_ref
? build_fold_addr_expr_loc (clause_loc
, var
) : var
;
2802 gimplify_assign (ref
, x
, ilist
);
2803 if (is_task_ctx (ctx
))
2804 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref
, 1)) = NULL
;
2809 ref
= build_sender_ref (val
, ctx
);
2810 gimplify_assign (var
, ref
, olist
);
2815 /* Generate code to implement SHARED from the sender (aka parent)
2816 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2817 list things that got automatically shared. */
2820 lower_send_shared_vars (gimple_seq
*ilist
, gimple_seq
*olist
, omp_context
*ctx
)
2822 tree var
, ovar
, nvar
, f
, x
, record_type
;
2824 if (ctx
->record_type
== NULL
)
2827 record_type
= ctx
->srecord_type
? ctx
->srecord_type
: ctx
->record_type
;
2828 for (f
= TYPE_FIELDS (record_type
); f
; f
= TREE_CHAIN (f
))
2830 ovar
= DECL_ABSTRACT_ORIGIN (f
);
2831 nvar
= maybe_lookup_decl (ovar
, ctx
);
2832 if (!nvar
|| !DECL_HAS_VALUE_EXPR_P (nvar
))
2835 /* If CTX is a nested parallel directive. Find the immediately
2836 enclosing parallel or workshare construct that contains a
2837 mapping for OVAR. */
2838 var
= lookup_decl_in_outer_ctx (ovar
, ctx
);
2840 if (use_pointer_for_field (ovar
, ctx
))
2842 x
= build_sender_ref (ovar
, ctx
);
2843 var
= build_fold_addr_expr (var
);
2844 gimplify_assign (x
, var
, ilist
);
2848 x
= build_sender_ref (ovar
, ctx
);
2849 gimplify_assign (x
, var
, ilist
);
2851 if (!TREE_READONLY (var
)
2852 /* We don't need to receive a new reference to a result
2853 or parm decl. In fact we may not store to it as we will
2854 invalidate any pending RSO and generate wrong gimple
2856 && !((TREE_CODE (var
) == RESULT_DECL
2857 || TREE_CODE (var
) == PARM_DECL
)
2858 && DECL_BY_REFERENCE (var
)))
2860 x
= build_sender_ref (ovar
, ctx
);
2861 gimplify_assign (var
, x
, olist
);
2868 /* A convenience function to build an empty GIMPLE_COND with just the
2872 gimple_build_cond_empty (tree cond
)
2874 enum tree_code pred_code
;
2877 gimple_cond_get_ops_from_tree (cond
, &pred_code
, &lhs
, &rhs
);
2878 return gimple_build_cond (pred_code
, lhs
, rhs
, NULL_TREE
, NULL_TREE
);
2882 /* Build the function calls to GOMP_parallel_start etc to actually
2883 generate the parallel operation. REGION is the parallel region
2884 being expanded. BB is the block where to insert the code. WS_ARGS
2885 will be set if this is a call to a combined parallel+workshare
2886 construct, it contains the list of additional arguments needed by
2887 the workshare construct. */
2890 expand_parallel_call (struct omp_region
*region
, basic_block bb
,
2891 gimple entry_stmt
, tree ws_args
)
2893 tree t
, t1
, t2
, val
, cond
, c
, clauses
;
2894 gimple_stmt_iterator gsi
;
2897 location_t clause_loc
;
2899 clauses
= gimple_omp_parallel_clauses (entry_stmt
);
2901 /* Determine what flavor of GOMP_parallel_start we will be
2903 start_ix
= BUILT_IN_GOMP_PARALLEL_START
;
2904 if (is_combined_parallel (region
))
2906 switch (region
->inner
->type
)
2908 case GIMPLE_OMP_FOR
:
2909 gcc_assert (region
->inner
->sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
2910 start_ix
= BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2911 + (region
->inner
->sched_kind
2912 == OMP_CLAUSE_SCHEDULE_RUNTIME
2913 ? 3 : region
->inner
->sched_kind
);
2915 case GIMPLE_OMP_SECTIONS
:
2916 start_ix
= BUILT_IN_GOMP_PARALLEL_SECTIONS_START
;
2923 /* By default, the value of NUM_THREADS is zero (selected at run time)
2924 and there is no conditional. */
2926 val
= build_int_cst (unsigned_type_node
, 0);
2928 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
2930 cond
= OMP_CLAUSE_IF_EXPR (c
);
2932 c
= find_omp_clause (clauses
, OMP_CLAUSE_NUM_THREADS
);
2935 val
= OMP_CLAUSE_NUM_THREADS_EXPR (c
);
2936 clause_loc
= OMP_CLAUSE_LOCATION (c
);
2939 clause_loc
= gimple_location (entry_stmt
);
2941 /* Ensure 'val' is of the correct type. */
2942 val
= fold_convert_loc (clause_loc
, unsigned_type_node
, val
);
2944 /* If we found the clause 'if (cond)', build either
2945 (cond != 0) or (cond ? val : 1u). */
2948 gimple_stmt_iterator gsi
;
2950 cond
= gimple_boolify (cond
);
2952 if (integer_zerop (val
))
2953 val
= fold_build2_loc (clause_loc
,
2954 EQ_EXPR
, unsigned_type_node
, cond
,
2955 build_int_cst (TREE_TYPE (cond
), 0));
2958 basic_block cond_bb
, then_bb
, else_bb
;
2959 edge e
, e_then
, e_else
;
2960 tree tmp_then
, tmp_else
, tmp_join
, tmp_var
;
2962 tmp_var
= create_tmp_var (TREE_TYPE (val
), NULL
);
2963 if (gimple_in_ssa_p (cfun
))
2965 tmp_then
= make_ssa_name (tmp_var
, NULL
);
2966 tmp_else
= make_ssa_name (tmp_var
, NULL
);
2967 tmp_join
= make_ssa_name (tmp_var
, NULL
);
2976 e
= split_block (bb
, NULL
);
2981 then_bb
= create_empty_bb (cond_bb
);
2982 else_bb
= create_empty_bb (then_bb
);
2983 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
2984 set_immediate_dominator (CDI_DOMINATORS
, else_bb
, cond_bb
);
2986 stmt
= gimple_build_cond_empty (cond
);
2987 gsi
= gsi_start_bb (cond_bb
);
2988 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2990 gsi
= gsi_start_bb (then_bb
);
2991 stmt
= gimple_build_assign (tmp_then
, val
);
2992 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2994 gsi
= gsi_start_bb (else_bb
);
2995 stmt
= gimple_build_assign
2996 (tmp_else
, build_int_cst (unsigned_type_node
, 1));
2997 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
2999 make_edge (cond_bb
, then_bb
, EDGE_TRUE_VALUE
);
3000 make_edge (cond_bb
, else_bb
, EDGE_FALSE_VALUE
);
3001 e_then
= make_edge (then_bb
, bb
, EDGE_FALLTHRU
);
3002 e_else
= make_edge (else_bb
, bb
, EDGE_FALLTHRU
);
3004 if (gimple_in_ssa_p (cfun
))
3006 gimple phi
= create_phi_node (tmp_join
, bb
);
3007 SSA_NAME_DEF_STMT (tmp_join
) = phi
;
3008 add_phi_arg (phi
, tmp_then
, e_then
, UNKNOWN_LOCATION
);
3009 add_phi_arg (phi
, tmp_else
, e_else
, UNKNOWN_LOCATION
);
3015 gsi
= gsi_start_bb (bb
);
3016 val
= force_gimple_operand_gsi (&gsi
, val
, true, NULL_TREE
,
3017 false, GSI_CONTINUE_LINKING
);
3020 gsi
= gsi_last_bb (bb
);
3021 t
= gimple_omp_parallel_data_arg (entry_stmt
);
3023 t1
= null_pointer_node
;
3025 t1
= build_fold_addr_expr (t
);
3026 t2
= build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt
));
3030 tree args
= tree_cons (NULL
, t2
,
3031 tree_cons (NULL
, t1
,
3032 tree_cons (NULL
, val
, ws_args
)));
3033 t
= build_function_call_expr (UNKNOWN_LOCATION
,
3034 built_in_decls
[start_ix
], args
);
3037 t
= build_call_expr (built_in_decls
[start_ix
], 3, t2
, t1
, val
);
3039 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3040 false, GSI_CONTINUE_LINKING
);
3042 t
= gimple_omp_parallel_data_arg (entry_stmt
);
3044 t
= null_pointer_node
;
3046 t
= build_fold_addr_expr (t
);
3047 t
= build_call_expr_loc (gimple_location (entry_stmt
),
3048 gimple_omp_parallel_child_fn (entry_stmt
), 1, t
);
3049 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3050 false, GSI_CONTINUE_LINKING
);
3052 t
= build_call_expr_loc (gimple_location (entry_stmt
),
3053 built_in_decls
[BUILT_IN_GOMP_PARALLEL_END
], 0);
3054 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3055 false, GSI_CONTINUE_LINKING
);
3059 /* Build the function call to GOMP_task to actually
3060 generate the task operation. BB is the block where to insert the code. */
3063 expand_task_call (basic_block bb
, gimple entry_stmt
)
3065 tree t
, t1
, t2
, t3
, flags
, cond
, c
, clauses
;
3066 gimple_stmt_iterator gsi
;
3067 location_t loc
= gimple_location (entry_stmt
);
3069 clauses
= gimple_omp_task_clauses (entry_stmt
);
3071 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
3073 cond
= gimple_boolify (OMP_CLAUSE_IF_EXPR (c
));
3075 cond
= boolean_true_node
;
3077 c
= find_omp_clause (clauses
, OMP_CLAUSE_UNTIED
);
3078 flags
= build_int_cst (unsigned_type_node
, (c
? 1 : 0));
3080 gsi
= gsi_last_bb (bb
);
3081 t
= gimple_omp_task_data_arg (entry_stmt
);
3083 t2
= null_pointer_node
;
3085 t2
= build_fold_addr_expr_loc (loc
, t
);
3086 t1
= build_fold_addr_expr_loc (loc
, gimple_omp_task_child_fn (entry_stmt
));
3087 t
= gimple_omp_task_copy_fn (entry_stmt
);
3089 t3
= null_pointer_node
;
3091 t3
= build_fold_addr_expr_loc (loc
, t
);
3093 t
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_TASK
], 7, t1
, t2
, t3
,
3094 gimple_omp_task_arg_size (entry_stmt
),
3095 gimple_omp_task_arg_align (entry_stmt
), cond
, flags
);
3097 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3098 false, GSI_CONTINUE_LINKING
);
3102 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3103 catch handler and return it. This prevents programs from violating the
3104 structured block semantics with throws. */
3107 maybe_catch_exception (gimple_seq body
)
3112 if (!flag_exceptions
)
3115 if (lang_protect_cleanup_actions
)
3116 decl
= lang_protect_cleanup_actions ();
3118 decl
= built_in_decls
[BUILT_IN_TRAP
];
3120 g
= gimple_build_eh_must_not_throw (decl
);
3121 g
= gimple_build_try (body
, gimple_seq_alloc_with_stmt (g
),
3124 return gimple_seq_alloc_with_stmt (g
);
3127 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3130 list2chain (tree list
)
3134 for (t
= list
; t
; t
= TREE_CHAIN (t
))
3136 tree var
= TREE_VALUE (t
);
3138 TREE_CHAIN (var
) = TREE_VALUE (TREE_CHAIN (t
));
3140 TREE_CHAIN (var
) = NULL_TREE
;
3143 return list
? TREE_VALUE (list
) : NULL_TREE
;
3147 /* Remove barriers in REGION->EXIT's block. Note that this is only
3148 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3149 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3150 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3154 remove_exit_barrier (struct omp_region
*region
)
3156 gimple_stmt_iterator gsi
;
3157 basic_block exit_bb
;
3161 int any_addressable_vars
= -1;
3163 exit_bb
= region
->exit
;
3165 /* If the parallel region doesn't return, we don't have REGION->EXIT
3170 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3171 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3172 statements that can appear in between are extremely limited -- no
3173 memory operations at all. Here, we allow nothing at all, so the
3174 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3175 gsi
= gsi_last_bb (exit_bb
);
3176 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
3178 if (!gsi_end_p (gsi
) && gimple_code (gsi_stmt (gsi
)) != GIMPLE_LABEL
)
3181 FOR_EACH_EDGE (e
, ei
, exit_bb
->preds
)
3183 gsi
= gsi_last_bb (e
->src
);
3184 if (gsi_end_p (gsi
))
3186 stmt
= gsi_stmt (gsi
);
3187 if (gimple_code (stmt
) == GIMPLE_OMP_RETURN
3188 && !gimple_omp_return_nowait_p (stmt
))
3190 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3191 in many cases. If there could be tasks queued, the barrier
3192 might be needed to let the tasks run before some local
3193 variable of the parallel that the task uses as shared
3194 runs out of scope. The task can be spawned either
3195 from within current function (this would be easy to check)
3196 or from some function it calls and gets passed an address
3197 of such a variable. */
3198 if (any_addressable_vars
< 0)
3200 gimple parallel_stmt
= last_stmt (region
->entry
);
3201 tree child_fun
= gimple_omp_parallel_child_fn (parallel_stmt
);
3202 tree local_decls
= DECL_STRUCT_FUNCTION (child_fun
)->local_decls
;
3205 any_addressable_vars
= 0;
3206 for (; local_decls
; local_decls
= TREE_CHAIN (local_decls
))
3207 if (TREE_ADDRESSABLE (TREE_VALUE (local_decls
)))
3209 any_addressable_vars
= 1;
3212 for (block
= gimple_block (stmt
);
3213 !any_addressable_vars
3215 && TREE_CODE (block
) == BLOCK
;
3216 block
= BLOCK_SUPERCONTEXT (block
))
3218 for (local_decls
= BLOCK_VARS (block
);
3220 local_decls
= TREE_CHAIN (local_decls
))
3221 if (TREE_ADDRESSABLE (local_decls
))
3223 any_addressable_vars
= 1;
3226 if (block
== gimple_block (parallel_stmt
))
3230 if (!any_addressable_vars
)
3231 gimple_omp_return_set_nowait (stmt
);
3237 remove_exit_barriers (struct omp_region
*region
)
3239 if (region
->type
== GIMPLE_OMP_PARALLEL
)
3240 remove_exit_barrier (region
);
3244 region
= region
->inner
;
3245 remove_exit_barriers (region
);
3246 while (region
->next
)
3248 region
= region
->next
;
3249 remove_exit_barriers (region
);
3254 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3255 calls. These can't be declared as const functions, but
3256 within one parallel body they are constant, so they can be
3257 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3258 which are declared const. Similarly for task body, except
3259 that in untied task omp_get_thread_num () can change at any task
3260 scheduling point. */
3263 optimize_omp_library_calls (gimple entry_stmt
)
3266 gimple_stmt_iterator gsi
;
3268 = DECL_ASSEMBLER_NAME (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
]);
3270 = DECL_ASSEMBLER_NAME (built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
]);
3271 bool untied_task
= (gimple_code (entry_stmt
) == GIMPLE_OMP_TASK
3272 && find_omp_clause (gimple_omp_task_clauses (entry_stmt
),
3273 OMP_CLAUSE_UNTIED
) != NULL
);
3276 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3278 gimple call
= gsi_stmt (gsi
);
3281 if (is_gimple_call (call
)
3282 && (decl
= gimple_call_fndecl (call
))
3283 && DECL_EXTERNAL (decl
)
3284 && TREE_PUBLIC (decl
)
3285 && DECL_INITIAL (decl
) == NULL
)
3289 if (DECL_NAME (decl
) == thr_num_id
)
3291 /* In #pragma omp task untied omp_get_thread_num () can change
3292 during the execution of the task region. */
3295 built_in
= built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
];
3297 else if (DECL_NAME (decl
) == num_thr_id
)
3298 built_in
= built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
];
3302 if (DECL_ASSEMBLER_NAME (decl
) != DECL_ASSEMBLER_NAME (built_in
)
3303 || gimple_call_num_args (call
) != 0)
3306 if (flag_exceptions
&& !TREE_NOTHROW (decl
))
3309 if (TREE_CODE (TREE_TYPE (decl
)) != FUNCTION_TYPE
3310 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl
)),
3311 TREE_TYPE (TREE_TYPE (built_in
))))
3314 gimple_call_set_fndecl (call
, built_in
);
3319 /* Expand the OpenMP parallel or task directive starting at REGION. */
3322 expand_omp_taskreg (struct omp_region
*region
)
3324 basic_block entry_bb
, exit_bb
, new_bb
;
3325 struct function
*child_cfun
;
3326 tree child_fn
, block
, t
, ws_args
, *tp
;
3328 gimple_stmt_iterator gsi
;
3329 gimple entry_stmt
, stmt
;
3332 entry_stmt
= last_stmt (region
->entry
);
3333 child_fn
= gimple_omp_taskreg_child_fn (entry_stmt
);
3334 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
3335 /* If this function has been already instrumented, make sure
3336 the child function isn't instrumented again. */
3337 child_cfun
->after_tree_profile
= cfun
->after_tree_profile
;
3339 entry_bb
= region
->entry
;
3340 exit_bb
= region
->exit
;
3342 if (is_combined_parallel (region
))
3343 ws_args
= region
->ws_args
;
3345 ws_args
= NULL_TREE
;
3347 if (child_cfun
->cfg
)
3349 /* Due to inlining, it may happen that we have already outlined
3350 the region, in which case all we need to do is make the
3351 sub-graph unreachable and emit the parallel call. */
3352 edge entry_succ_e
, exit_succ_e
;
3353 gimple_stmt_iterator gsi
;
3355 entry_succ_e
= single_succ_edge (entry_bb
);
3357 gsi
= gsi_last_bb (entry_bb
);
3358 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_PARALLEL
3359 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_TASK
);
3360 gsi_remove (&gsi
, true);
3365 exit_succ_e
= single_succ_edge (exit_bb
);
3366 make_edge (new_bb
, exit_succ_e
->dest
, EDGE_FALLTHRU
);
3368 remove_edge_and_dominated_blocks (entry_succ_e
);
3372 /* If the parallel region needs data sent from the parent
3373 function, then the very first statement (except possible
3374 tree profile counter updates) of the parallel body
3375 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3376 &.OMP_DATA_O is passed as an argument to the child function,
3377 we need to replace it with the argument as seen by the child
3380 In most cases, this will end up being the identity assignment
3381 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3382 a function call that has been inlined, the original PARM_DECL
3383 .OMP_DATA_I may have been converted into a different local
3384 variable. In which case, we need to keep the assignment. */
3385 if (gimple_omp_taskreg_data_arg (entry_stmt
))
3387 basic_block entry_succ_bb
= single_succ (entry_bb
);
3388 gimple_stmt_iterator gsi
;
3390 gimple parcopy_stmt
= NULL
;
3392 for (gsi
= gsi_start_bb (entry_succ_bb
); ; gsi_next (&gsi
))
3396 gcc_assert (!gsi_end_p (gsi
));
3397 stmt
= gsi_stmt (gsi
);
3398 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
3401 if (gimple_num_ops (stmt
) == 2)
3403 tree arg
= gimple_assign_rhs1 (stmt
);
3405 /* We're ignore the subcode because we're
3406 effectively doing a STRIP_NOPS. */
3408 if (TREE_CODE (arg
) == ADDR_EXPR
3409 && TREE_OPERAND (arg
, 0)
3410 == gimple_omp_taskreg_data_arg (entry_stmt
))
3412 parcopy_stmt
= stmt
;
3418 gcc_assert (parcopy_stmt
!= NULL
);
3419 arg
= DECL_ARGUMENTS (child_fn
);
3421 if (!gimple_in_ssa_p (cfun
))
3423 if (gimple_assign_lhs (parcopy_stmt
) == arg
)
3424 gsi_remove (&gsi
, true);
3427 /* ?? Is setting the subcode really necessary ?? */
3428 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (arg
));
3429 gimple_assign_set_rhs1 (parcopy_stmt
, arg
);
3434 /* If we are in ssa form, we must load the value from the default
3435 definition of the argument. That should not be defined now,
3436 since the argument is not used uninitialized. */
3437 gcc_assert (gimple_default_def (cfun
, arg
) == NULL
);
3438 narg
= make_ssa_name (arg
, gimple_build_nop ());
3439 set_default_def (arg
, narg
);
3440 /* ?? Is setting the subcode really necessary ?? */
3441 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (narg
));
3442 gimple_assign_set_rhs1 (parcopy_stmt
, narg
);
3443 update_stmt (parcopy_stmt
);
3447 /* Declare local variables needed in CHILD_CFUN. */
3448 block
= DECL_INITIAL (child_fn
);
3449 BLOCK_VARS (block
) = list2chain (child_cfun
->local_decls
);
3450 /* The gimplifier could record temporaries in parallel/task block
3451 rather than in containing function's local_decls chain,
3452 which would mean cgraph missed finalizing them. Do it now. */
3453 for (t
= BLOCK_VARS (block
); t
; t
= TREE_CHAIN (t
))
3454 if (TREE_CODE (t
) == VAR_DECL
3456 && !DECL_EXTERNAL (t
))
3457 varpool_finalize_decl (t
);
3458 DECL_SAVED_TREE (child_fn
) = NULL
;
3459 gimple_set_body (child_fn
, bb_seq (single_succ (entry_bb
)));
3460 TREE_USED (block
) = 1;
3462 /* Reset DECL_CONTEXT on function arguments. */
3463 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= TREE_CHAIN (t
))
3464 DECL_CONTEXT (t
) = child_fn
;
3466 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3467 so that it can be moved to the child function. */
3468 gsi
= gsi_last_bb (entry_bb
);
3469 stmt
= gsi_stmt (gsi
);
3470 gcc_assert (stmt
&& (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
3471 || gimple_code (stmt
) == GIMPLE_OMP_TASK
));
3472 gsi_remove (&gsi
, true);
3473 e
= split_block (entry_bb
, stmt
);
3475 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
3477 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3480 gsi
= gsi_last_bb (exit_bb
);
3481 gcc_assert (!gsi_end_p (gsi
)
3482 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
3483 stmt
= gimple_build_return (NULL
);
3484 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
3485 gsi_remove (&gsi
, true);
3488 /* Move the parallel region into CHILD_CFUN. */
3490 if (gimple_in_ssa_p (cfun
))
3492 push_cfun (child_cfun
);
3493 init_tree_ssa (child_cfun
);
3494 init_ssa_operands ();
3495 cfun
->gimple_df
->in_ssa_p
= true;
3500 block
= gimple_block (entry_stmt
);
3502 new_bb
= move_sese_region_to_fn (child_cfun
, entry_bb
, exit_bb
, block
);
3504 single_succ_edge (new_bb
)->flags
= EDGE_FALLTHRU
;
3506 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3507 for (tp
= &child_cfun
->local_decls
; *tp
; )
3508 if (DECL_CONTEXT (TREE_VALUE (*tp
)) != cfun
->decl
)
3509 tp
= &TREE_CHAIN (*tp
);
3511 *tp
= TREE_CHAIN (*tp
);
3513 /* Inform the callgraph about the new function. */
3514 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
3515 = cfun
->curr_properties
;
3516 cgraph_add_new_function (child_fn
, true);
3518 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3519 fixed in a following pass. */
3520 push_cfun (child_cfun
);
3521 save_current
= current_function_decl
;
3522 current_function_decl
= child_fn
;
3524 optimize_omp_library_calls (entry_stmt
);
3525 rebuild_cgraph_edges ();
3527 /* Some EH regions might become dead, see PR34608. If
3528 pass_cleanup_cfg isn't the first pass to happen with the
3529 new child, these dead EH edges might cause problems.
3530 Clean them up now. */
3531 if (flag_exceptions
)
3534 bool changed
= false;
3537 changed
|= gimple_purge_dead_eh_edges (bb
);
3539 cleanup_tree_cfg ();
3541 if (gimple_in_ssa_p (cfun
))
3542 update_ssa (TODO_update_ssa
);
3543 current_function_decl
= save_current
;
3547 /* Emit a library call to launch the children threads. */
3548 if (gimple_code (entry_stmt
) == GIMPLE_OMP_PARALLEL
)
3549 expand_parallel_call (region
, new_bb
, entry_stmt
, ws_args
);
3551 expand_task_call (new_bb
, entry_stmt
);
3552 update_ssa (TODO_update_ssa_only_virtuals
);
3556 /* A subroutine of expand_omp_for. Generate code for a parallel
3557 loop with any schedule. Given parameters:
3559 for (V = N1; V cond N2; V += STEP) BODY;
3561 where COND is "<" or ">", we generate pseudocode
3563 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3564 if (more) goto L0; else goto L3;
3571 if (V cond iend) goto L1; else goto L2;
3573 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3576 If this is a combined omp parallel loop, instead of the call to
3577 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3579 For collapsed loops, given parameters:
3581 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3582 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3583 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3586 we generate pseudocode
3592 count3 = (adj + N32 - N31) / STEP3;
3597 count2 = (adj + N22 - N21) / STEP2;
3602 count1 = (adj + N12 - N11) / STEP1;
3603 count = count1 * count2 * count3;
3604 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3605 if (more) goto L0; else goto L3;
3609 V3 = N31 + (T % count3) * STEP3;
3611 V2 = N21 + (T % count2) * STEP2;
3613 V1 = N11 + T * STEP1;
3618 if (V < iend) goto L10; else goto L2;
3621 if (V3 cond3 N32) goto L1; else goto L11;
3625 if (V2 cond2 N22) goto L1; else goto L12;
3631 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3637 expand_omp_for_generic (struct omp_region
*region
,
3638 struct omp_for_data
*fd
,
3639 enum built_in_function start_fn
,
3640 enum built_in_function next_fn
)
3642 tree type
, istart0
, iend0
, iend
;
3643 tree t
, vmain
, vback
, bias
= NULL_TREE
;
3644 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
, collapse_bb
;
3645 basic_block l2_bb
= NULL
, l3_bb
= NULL
;
3646 gimple_stmt_iterator gsi
;
3648 bool in_combined_parallel
= is_combined_parallel (region
);
3649 bool broken_loop
= region
->cont
== NULL
;
3651 tree
*counts
= NULL
;
3654 gcc_assert (!broken_loop
|| !in_combined_parallel
);
3655 gcc_assert (fd
->iter_type
== long_integer_type_node
3656 || !in_combined_parallel
);
3658 type
= TREE_TYPE (fd
->loop
.v
);
3659 istart0
= create_tmp_var (fd
->iter_type
, ".istart0");
3660 iend0
= create_tmp_var (fd
->iter_type
, ".iend0");
3661 TREE_ADDRESSABLE (istart0
) = 1;
3662 TREE_ADDRESSABLE (iend0
) = 1;
3663 if (gimple_in_ssa_p (cfun
))
3665 add_referenced_var (istart0
);
3666 add_referenced_var (iend0
);
3669 /* See if we need to bias by LLONG_MIN. */
3670 if (fd
->iter_type
== long_long_unsigned_type_node
3671 && TREE_CODE (type
) == INTEGER_TYPE
3672 && !TYPE_UNSIGNED (type
))
3676 if (fd
->loop
.cond_code
== LT_EXPR
)
3679 n2
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
3683 n1
= fold_build2 (MINUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
3686 if (TREE_CODE (n1
) != INTEGER_CST
3687 || TREE_CODE (n2
) != INTEGER_CST
3688 || ((tree_int_cst_sgn (n1
) < 0) ^ (tree_int_cst_sgn (n2
) < 0)))
3689 bias
= fold_convert (fd
->iter_type
, TYPE_MIN_VALUE (type
));
3692 entry_bb
= region
->entry
;
3693 cont_bb
= region
->cont
;
3695 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
3696 gcc_assert (broken_loop
3697 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
3698 l0_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
3699 l1_bb
= single_succ (l0_bb
);
3702 l2_bb
= create_empty_bb (cont_bb
);
3703 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l1_bb
);
3704 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
3708 l3_bb
= BRANCH_EDGE (entry_bb
)->dest
;
3709 exit_bb
= region
->exit
;
3711 gsi
= gsi_last_bb (entry_bb
);
3713 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
3714 if (fd
->collapse
> 1)
3716 /* collapsed loops need work for expansion in SSA form. */
3717 gcc_assert (!gimple_in_ssa_p (cfun
));
3718 counts
= (tree
*) alloca (fd
->collapse
* sizeof (tree
));
3719 for (i
= 0; i
< fd
->collapse
; i
++)
3721 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
3723 if (POINTER_TYPE_P (itype
))
3724 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (itype
), 0);
3725 t
= build_int_cst (itype
, (fd
->loops
[i
].cond_code
== LT_EXPR
3727 t
= fold_build2 (PLUS_EXPR
, itype
,
3728 fold_convert (itype
, fd
->loops
[i
].step
), t
);
3729 t
= fold_build2 (PLUS_EXPR
, itype
, t
,
3730 fold_convert (itype
, fd
->loops
[i
].n2
));
3731 t
= fold_build2 (MINUS_EXPR
, itype
, t
,
3732 fold_convert (itype
, fd
->loops
[i
].n1
));
3733 if (TYPE_UNSIGNED (itype
) && fd
->loops
[i
].cond_code
== GT_EXPR
)
3734 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
3735 fold_build1 (NEGATE_EXPR
, itype
, t
),
3736 fold_build1 (NEGATE_EXPR
, itype
,
3737 fold_convert (itype
,
3738 fd
->loops
[i
].step
)));
3740 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
,
3741 fold_convert (itype
, fd
->loops
[i
].step
));
3742 t
= fold_convert (type
, t
);
3743 if (TREE_CODE (t
) == INTEGER_CST
)
3747 counts
[i
] = create_tmp_var (type
, ".count");
3748 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3749 true, GSI_SAME_STMT
);
3750 stmt
= gimple_build_assign (counts
[i
], t
);
3751 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3753 if (SSA_VAR_P (fd
->loop
.n2
))
3759 t
= fold_build2 (MULT_EXPR
, type
, fd
->loop
.n2
, counts
[i
]);
3760 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3761 true, GSI_SAME_STMT
);
3763 stmt
= gimple_build_assign (fd
->loop
.n2
, t
);
3764 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3768 if (in_combined_parallel
)
3770 /* In a combined parallel loop, emit a call to
3771 GOMP_loop_foo_next. */
3772 t
= build_call_expr (built_in_decls
[next_fn
], 2,
3773 build_fold_addr_expr (istart0
),
3774 build_fold_addr_expr (iend0
));
3778 tree t0
, t1
, t2
, t3
, t4
;
3779 /* If this is not a combined parallel loop, emit a call to
3780 GOMP_loop_foo_start in ENTRY_BB. */
3781 t4
= build_fold_addr_expr (iend0
);
3782 t3
= build_fold_addr_expr (istart0
);
3783 t2
= fold_convert (fd
->iter_type
, fd
->loop
.step
);
3784 if (POINTER_TYPE_P (type
)
3785 && TYPE_PRECISION (type
) != TYPE_PRECISION (fd
->iter_type
))
3787 /* Avoid casting pointers to integer of a different size. */
3789 = lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
), 0);
3790 t1
= fold_convert (fd
->iter_type
, fold_convert (itype
, fd
->loop
.n2
));
3791 t0
= fold_convert (fd
->iter_type
, fold_convert (itype
, fd
->loop
.n1
));
3795 t1
= fold_convert (fd
->iter_type
, fd
->loop
.n2
);
3796 t0
= fold_convert (fd
->iter_type
, fd
->loop
.n1
);
3800 t1
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t1
, bias
);
3801 t0
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t0
, bias
);
3803 if (fd
->iter_type
== long_integer_type_node
)
3807 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
3808 t
= build_call_expr (built_in_decls
[start_fn
], 6,
3809 t0
, t1
, t2
, t
, t3
, t4
);
3812 t
= build_call_expr (built_in_decls
[start_fn
], 5,
3813 t0
, t1
, t2
, t3
, t4
);
3820 /* The GOMP_loop_ull_*start functions have additional boolean
3821 argument, true for < loops and false for > loops.
3822 In Fortran, the C bool type can be different from
3823 boolean_type_node. */
3824 c_bool_type
= TREE_TYPE (TREE_TYPE (built_in_decls
[start_fn
]));
3825 t5
= build_int_cst (c_bool_type
,
3826 fd
->loop
.cond_code
== LT_EXPR
? 1 : 0);
3829 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
3830 t
= build_call_expr (built_in_decls
[start_fn
], 7,
3831 t5
, t0
, t1
, t2
, t
, t3
, t4
);
3834 t
= build_call_expr (built_in_decls
[start_fn
], 6,
3835 t5
, t0
, t1
, t2
, t3
, t4
);
3838 if (TREE_TYPE (t
) != boolean_type_node
)
3839 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
3840 t
, build_int_cst (TREE_TYPE (t
), 0));
3841 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3842 true, GSI_SAME_STMT
);
3843 gsi_insert_after (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
3845 /* Remove the GIMPLE_OMP_FOR statement. */
3846 gsi_remove (&gsi
, true);
3848 /* Iteration setup for sequential loop goes in L0_BB. */
3849 gsi
= gsi_start_bb (l0_bb
);
3852 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
3853 if (POINTER_TYPE_P (type
))
3854 t
= fold_convert (lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
),
3856 t
= fold_convert (type
, t
);
3857 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3858 false, GSI_CONTINUE_LINKING
);
3859 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
3860 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3864 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
3865 if (POINTER_TYPE_P (type
))
3866 t
= fold_convert (lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
),
3868 t
= fold_convert (type
, t
);
3869 iend
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3870 false, GSI_CONTINUE_LINKING
);
3871 if (fd
->collapse
> 1)
3873 tree tem
= create_tmp_var (type
, ".tem");
3875 stmt
= gimple_build_assign (tem
, fd
->loop
.v
);
3876 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3877 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
3879 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
), itype
;
3881 if (POINTER_TYPE_P (vtype
))
3882 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (vtype
), 0);
3883 t
= fold_build2 (TRUNC_MOD_EXPR
, type
, tem
, counts
[i
]);
3884 t
= fold_convert (itype
, t
);
3885 t
= fold_build2 (MULT_EXPR
, itype
, t
,
3886 fold_convert (itype
, fd
->loops
[i
].step
));
3887 if (POINTER_TYPE_P (vtype
))
3888 t
= fold_build2 (POINTER_PLUS_EXPR
, vtype
,
3889 fd
->loops
[i
].n1
, fold_convert (sizetype
, t
));
3891 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loops
[i
].n1
, t
);
3892 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3893 false, GSI_CONTINUE_LINKING
);
3894 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
3895 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3898 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, tem
, counts
[i
]);
3899 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3900 false, GSI_CONTINUE_LINKING
);
3901 stmt
= gimple_build_assign (tem
, t
);
3902 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3909 /* Code to control the increment and predicate for the sequential
3910 loop goes in the CONT_BB. */
3911 gsi
= gsi_last_bb (cont_bb
);
3912 stmt
= gsi_stmt (gsi
);
3913 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
3914 vmain
= gimple_omp_continue_control_use (stmt
);
3915 vback
= gimple_omp_continue_control_def (stmt
);
3917 if (POINTER_TYPE_P (type
))
3918 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, vmain
,
3919 fold_convert (sizetype
, fd
->loop
.step
));
3921 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
3922 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3923 true, GSI_SAME_STMT
);
3924 stmt
= gimple_build_assign (vback
, t
);
3925 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3927 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, vback
, iend
);
3928 stmt
= gimple_build_cond_empty (t
);
3929 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3931 /* Remove GIMPLE_OMP_CONTINUE. */
3932 gsi_remove (&gsi
, true);
3934 if (fd
->collapse
> 1)
3936 basic_block last_bb
, bb
;
3939 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
3941 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
);
3943 bb
= create_empty_bb (last_bb
);
3944 gsi
= gsi_start_bb (bb
);
3946 if (i
< fd
->collapse
- 1)
3948 e
= make_edge (last_bb
, bb
, EDGE_FALSE_VALUE
);
3949 e
->probability
= REG_BR_PROB_BASE
/ 8;
3951 t
= fd
->loops
[i
+ 1].n1
;
3952 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3953 false, GSI_CONTINUE_LINKING
);
3954 stmt
= gimple_build_assign (fd
->loops
[i
+ 1].v
, t
);
3955 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3960 set_immediate_dominator (CDI_DOMINATORS
, bb
, last_bb
);
3962 if (POINTER_TYPE_P (vtype
))
3963 t
= fold_build2 (POINTER_PLUS_EXPR
, vtype
,
3965 fold_convert (sizetype
, fd
->loops
[i
].step
));
3967 t
= fold_build2 (PLUS_EXPR
, vtype
, fd
->loops
[i
].v
,
3969 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3970 false, GSI_CONTINUE_LINKING
);
3971 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
3972 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3976 t
= fd
->loops
[i
].n2
;
3977 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3978 false, GSI_CONTINUE_LINKING
);
3979 t
= fold_build2 (fd
->loops
[i
].cond_code
, boolean_type_node
,
3981 stmt
= gimple_build_cond_empty (t
);
3982 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3983 e
= make_edge (bb
, l1_bb
, EDGE_TRUE_VALUE
);
3984 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
3987 make_edge (bb
, l1_bb
, EDGE_FALLTHRU
);
3992 /* Emit code to get the next parallel iteration in L2_BB. */
3993 gsi
= gsi_start_bb (l2_bb
);
3995 t
= build_call_expr (built_in_decls
[next_fn
], 2,
3996 build_fold_addr_expr (istart0
),
3997 build_fold_addr_expr (iend0
));
3998 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3999 false, GSI_CONTINUE_LINKING
);
4000 if (TREE_TYPE (t
) != boolean_type_node
)
4001 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
4002 t
, build_int_cst (TREE_TYPE (t
), 0));
4003 stmt
= gimple_build_cond_empty (t
);
4004 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4007 /* Add the loop cleanup function. */
4008 gsi
= gsi_last_bb (exit_bb
);
4009 if (gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
4010 t
= built_in_decls
[BUILT_IN_GOMP_LOOP_END_NOWAIT
];
4012 t
= built_in_decls
[BUILT_IN_GOMP_LOOP_END
];
4013 stmt
= gimple_build_call (t
, 0);
4014 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
4015 gsi_remove (&gsi
, true);
4017 /* Connect the new blocks. */
4018 find_edge (entry_bb
, l0_bb
)->flags
= EDGE_TRUE_VALUE
;
4019 find_edge (entry_bb
, l3_bb
)->flags
= EDGE_FALSE_VALUE
;
4025 e
= find_edge (cont_bb
, l3_bb
);
4026 ne
= make_edge (l2_bb
, l3_bb
, EDGE_FALSE_VALUE
);
4028 phis
= phi_nodes (l3_bb
);
4029 for (gsi
= gsi_start (phis
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4031 gimple phi
= gsi_stmt (gsi
);
4032 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, ne
),
4033 PHI_ARG_DEF_FROM_EDGE (phi
, e
));
4037 make_edge (cont_bb
, l2_bb
, EDGE_FALSE_VALUE
);
4038 if (fd
->collapse
> 1)
4040 e
= find_edge (cont_bb
, l1_bb
);
4042 e
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
4046 e
= find_edge (cont_bb
, l1_bb
);
4047 e
->flags
= EDGE_TRUE_VALUE
;
4049 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4050 find_edge (cont_bb
, l2_bb
)->probability
= REG_BR_PROB_BASE
/ 8;
4051 make_edge (l2_bb
, l0_bb
, EDGE_TRUE_VALUE
);
4053 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
,
4054 recompute_dominator (CDI_DOMINATORS
, l2_bb
));
4055 set_immediate_dominator (CDI_DOMINATORS
, l3_bb
,
4056 recompute_dominator (CDI_DOMINATORS
, l3_bb
));
4057 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
,
4058 recompute_dominator (CDI_DOMINATORS
, l0_bb
));
4059 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
,
4060 recompute_dominator (CDI_DOMINATORS
, l1_bb
));
4065 /* A subroutine of expand_omp_for. Generate code for a parallel
4066 loop with static schedule and no specified chunk size. Given
4069 for (V = N1; V cond N2; V += STEP) BODY;
4071 where COND is "<" or ">", we generate pseudocode
4077 if ((__typeof (V)) -1 > 0 && cond is >)
4078 n = -(adj + N2 - N1) / -STEP;
4080 n = (adj + N2 - N1) / STEP;
4082 q += (q * nthreads != n);
4084 e0 = min(s0 + q, n);
4086 if (s0 >= e0) goto L2; else goto L0;
4092 if (V cond e) goto L1;
4097 expand_omp_for_static_nochunk (struct omp_region
*region
,
4098 struct omp_for_data
*fd
)
4100 tree n
, q
, s0
, e0
, e
, t
, nthreads
, threadid
;
4101 tree type
, itype
, vmain
, vback
;
4102 basic_block entry_bb
, exit_bb
, seq_start_bb
, body_bb
, cont_bb
;
4104 gimple_stmt_iterator gsi
;
4107 itype
= type
= TREE_TYPE (fd
->loop
.v
);
4108 if (POINTER_TYPE_P (type
))
4109 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
), 0);
4111 entry_bb
= region
->entry
;
4112 cont_bb
= region
->cont
;
4113 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
4114 gcc_assert (BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
4115 seq_start_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
4116 body_bb
= single_succ (seq_start_bb
);
4117 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
4118 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4119 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
4120 exit_bb
= region
->exit
;
4122 /* Iteration space partitioning goes in ENTRY_BB. */
4123 gsi
= gsi_last_bb (entry_bb
);
4124 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4126 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
], 0);
4127 t
= fold_convert (itype
, t
);
4128 nthreads
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4129 true, GSI_SAME_STMT
);
4131 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
4132 t
= fold_convert (itype
, t
);
4133 threadid
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4134 true, GSI_SAME_STMT
);
4137 = force_gimple_operand_gsi (&gsi
, fold_convert (type
, fd
->loop
.n1
),
4138 true, NULL_TREE
, true, GSI_SAME_STMT
);
4140 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->loop
.n2
),
4141 true, NULL_TREE
, true, GSI_SAME_STMT
);
4143 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->loop
.step
),
4144 true, NULL_TREE
, true, GSI_SAME_STMT
);
4146 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
4147 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loop
.step
, t
);
4148 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fd
->loop
.n2
);
4149 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, fd
->loop
.n1
));
4150 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
4151 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
4152 fold_build1 (NEGATE_EXPR
, itype
, t
),
4153 fold_build1 (NEGATE_EXPR
, itype
, fd
->loop
.step
));
4155 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, fd
->loop
.step
);
4156 t
= fold_convert (itype
, t
);
4157 n
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4159 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, n
, nthreads
);
4160 q
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4162 t
= fold_build2 (MULT_EXPR
, itype
, q
, nthreads
);
4163 t
= fold_build2 (NE_EXPR
, itype
, t
, n
);
4164 t
= fold_build2 (PLUS_EXPR
, itype
, q
, t
);
4165 q
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4167 t
= build2 (MULT_EXPR
, itype
, q
, threadid
);
4168 s0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4170 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, q
);
4171 t
= fold_build2 (MIN_EXPR
, itype
, t
, n
);
4172 e0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4174 t
= build2 (GE_EXPR
, boolean_type_node
, s0
, e0
);
4175 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4177 /* Remove the GIMPLE_OMP_FOR statement. */
4178 gsi_remove (&gsi
, true);
4180 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4181 gsi
= gsi_start_bb (seq_start_bb
);
4183 t
= fold_convert (itype
, s0
);
4184 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4185 if (POINTER_TYPE_P (type
))
4186 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, fd
->loop
.n1
,
4187 fold_convert (sizetype
, t
));
4189 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4190 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
4191 false, GSI_CONTINUE_LINKING
);
4192 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
4193 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4195 t
= fold_convert (itype
, e0
);
4196 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4197 if (POINTER_TYPE_P (type
))
4198 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, fd
->loop
.n1
,
4199 fold_convert (sizetype
, t
));
4201 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4202 e
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4203 false, GSI_CONTINUE_LINKING
);
4205 /* The code controlling the sequential loop replaces the
4206 GIMPLE_OMP_CONTINUE. */
4207 gsi
= gsi_last_bb (cont_bb
);
4208 stmt
= gsi_stmt (gsi
);
4209 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4210 vmain
= gimple_omp_continue_control_use (stmt
);
4211 vback
= gimple_omp_continue_control_def (stmt
);
4213 if (POINTER_TYPE_P (type
))
4214 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, vmain
,
4215 fold_convert (sizetype
, fd
->loop
.step
));
4217 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
4218 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
4219 true, GSI_SAME_STMT
);
4220 stmt
= gimple_build_assign (vback
, t
);
4221 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4223 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, vback
, e
);
4224 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4226 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4227 gsi_remove (&gsi
, true);
4229 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4230 gsi
= gsi_last_bb (exit_bb
);
4231 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
4232 force_gimple_operand_gsi (&gsi
, build_omp_barrier (), false, NULL_TREE
,
4233 false, GSI_SAME_STMT
);
4234 gsi_remove (&gsi
, true);
4236 /* Connect all the blocks. */
4237 find_edge (entry_bb
, seq_start_bb
)->flags
= EDGE_FALSE_VALUE
;
4238 find_edge (entry_bb
, fin_bb
)->flags
= EDGE_TRUE_VALUE
;
4240 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
4241 find_edge (cont_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
4243 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
, entry_bb
);
4244 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
4245 recompute_dominator (CDI_DOMINATORS
, body_bb
));
4246 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
4247 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
4251 /* A subroutine of expand_omp_for. Generate code for a parallel
4252 loop with static schedule and a specified chunk size. Given
4255 for (V = N1; V cond N2; V += STEP) BODY;
4257 where COND is "<" or ">", we generate pseudocode
4263 if ((__typeof (V)) -1 > 0 && cond is >)
4264 n = -(adj + N2 - N1) / -STEP;
4266 n = (adj + N2 - N1) / STEP;
4268 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4269 here so that V is defined
4270 if the loop is not entered
4272 s0 = (trip * nthreads + threadid) * CHUNK;
4273 e0 = min(s0 + CHUNK, n);
4274 if (s0 < n) goto L1; else goto L4;
4281 if (V cond e) goto L2; else goto L3;
4289 expand_omp_for_static_chunk (struct omp_region
*region
, struct omp_for_data
*fd
)
4291 tree n
, s0
, e0
, e
, t
;
4292 tree trip_var
, trip_init
, trip_main
, trip_back
, nthreads
, threadid
;
4293 tree type
, itype
, v_main
, v_back
, v_extra
;
4294 basic_block entry_bb
, exit_bb
, body_bb
, seq_start_bb
, iter_part_bb
;
4295 basic_block trip_update_bb
, cont_bb
, fin_bb
;
4296 gimple_stmt_iterator si
;
4300 itype
= type
= TREE_TYPE (fd
->loop
.v
);
4301 if (POINTER_TYPE_P (type
))
4302 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
), 0);
4304 entry_bb
= region
->entry
;
4305 se
= split_block (entry_bb
, last_stmt (entry_bb
));
4307 iter_part_bb
= se
->dest
;
4308 cont_bb
= region
->cont
;
4309 gcc_assert (EDGE_COUNT (iter_part_bb
->succs
) == 2);
4310 gcc_assert (BRANCH_EDGE (iter_part_bb
)->dest
4311 == FALLTHRU_EDGE (cont_bb
)->dest
);
4312 seq_start_bb
= split_edge (FALLTHRU_EDGE (iter_part_bb
));
4313 body_bb
= single_succ (seq_start_bb
);
4314 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
4315 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4316 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
4317 trip_update_bb
= split_edge (FALLTHRU_EDGE (cont_bb
));
4318 exit_bb
= region
->exit
;
4320 /* Trip and adjustment setup goes in ENTRY_BB. */
4321 si
= gsi_last_bb (entry_bb
);
4322 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_FOR
);
4324 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
], 0);
4325 t
= fold_convert (itype
, t
);
4326 nthreads
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4327 true, GSI_SAME_STMT
);
4329 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
4330 t
= fold_convert (itype
, t
);
4331 threadid
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4332 true, GSI_SAME_STMT
);
4335 = force_gimple_operand_gsi (&si
, fold_convert (type
, fd
->loop
.n1
),
4336 true, NULL_TREE
, true, GSI_SAME_STMT
);
4338 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->loop
.n2
),
4339 true, NULL_TREE
, true, GSI_SAME_STMT
);
4341 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->loop
.step
),
4342 true, NULL_TREE
, true, GSI_SAME_STMT
);
4344 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->chunk_size
),
4345 true, NULL_TREE
, true, GSI_SAME_STMT
);
4347 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
4348 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loop
.step
, t
);
4349 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fd
->loop
.n2
);
4350 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, fd
->loop
.n1
));
4351 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
4352 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
4353 fold_build1 (NEGATE_EXPR
, itype
, t
),
4354 fold_build1 (NEGATE_EXPR
, itype
, fd
->loop
.step
));
4356 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, fd
->loop
.step
);
4357 t
= fold_convert (itype
, t
);
4358 n
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4359 true, GSI_SAME_STMT
);
4361 trip_var
= create_tmp_var (itype
, ".trip");
4362 if (gimple_in_ssa_p (cfun
))
4364 add_referenced_var (trip_var
);
4365 trip_init
= make_ssa_name (trip_var
, NULL
);
4366 trip_main
= make_ssa_name (trip_var
, NULL
);
4367 trip_back
= make_ssa_name (trip_var
, NULL
);
4371 trip_init
= trip_var
;
4372 trip_main
= trip_var
;
4373 trip_back
= trip_var
;
4376 stmt
= gimple_build_assign (trip_init
, build_int_cst (itype
, 0));
4377 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
4379 t
= fold_build2 (MULT_EXPR
, itype
, threadid
, fd
->chunk_size
);
4380 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4381 if (POINTER_TYPE_P (type
))
4382 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, fd
->loop
.n1
,
4383 fold_convert (sizetype
, t
));
4385 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4386 v_extra
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4387 true, GSI_SAME_STMT
);
4389 /* Remove the GIMPLE_OMP_FOR. */
4390 gsi_remove (&si
, true);
4392 /* Iteration space partitioning goes in ITER_PART_BB. */
4393 si
= gsi_last_bb (iter_part_bb
);
4395 t
= fold_build2 (MULT_EXPR
, itype
, trip_main
, nthreads
);
4396 t
= fold_build2 (PLUS_EXPR
, itype
, t
, threadid
);
4397 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->chunk_size
);
4398 s0
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4399 false, GSI_CONTINUE_LINKING
);
4401 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, fd
->chunk_size
);
4402 t
= fold_build2 (MIN_EXPR
, itype
, t
, n
);
4403 e0
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4404 false, GSI_CONTINUE_LINKING
);
4406 t
= build2 (LT_EXPR
, boolean_type_node
, s0
, n
);
4407 gsi_insert_after (&si
, gimple_build_cond_empty (t
), GSI_CONTINUE_LINKING
);
4409 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4410 si
= gsi_start_bb (seq_start_bb
);
4412 t
= fold_convert (itype
, s0
);
4413 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4414 if (POINTER_TYPE_P (type
))
4415 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, fd
->loop
.n1
,
4416 fold_convert (sizetype
, t
));
4418 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4419 t
= force_gimple_operand_gsi (&si
, t
, false, NULL_TREE
,
4420 false, GSI_CONTINUE_LINKING
);
4421 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
4422 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4424 t
= fold_convert (itype
, e0
);
4425 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4426 if (POINTER_TYPE_P (type
))
4427 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, fd
->loop
.n1
,
4428 fold_convert (sizetype
, t
));
4430 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4431 e
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4432 false, GSI_CONTINUE_LINKING
);
4434 /* The code controlling the sequential loop goes in CONT_BB,
4435 replacing the GIMPLE_OMP_CONTINUE. */
4436 si
= gsi_last_bb (cont_bb
);
4437 stmt
= gsi_stmt (si
);
4438 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4439 v_main
= gimple_omp_continue_control_use (stmt
);
4440 v_back
= gimple_omp_continue_control_def (stmt
);
4442 if (POINTER_TYPE_P (type
))
4443 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, v_main
,
4444 fold_convert (sizetype
, fd
->loop
.step
));
4446 t
= fold_build2 (PLUS_EXPR
, type
, v_main
, fd
->loop
.step
);
4447 stmt
= gimple_build_assign (v_back
, t
);
4448 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
4450 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, v_back
, e
);
4451 gsi_insert_before (&si
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4453 /* Remove GIMPLE_OMP_CONTINUE. */
4454 gsi_remove (&si
, true);
4456 /* Trip update code goes into TRIP_UPDATE_BB. */
4457 si
= gsi_start_bb (trip_update_bb
);
4459 t
= build_int_cst (itype
, 1);
4460 t
= build2 (PLUS_EXPR
, itype
, trip_main
, t
);
4461 stmt
= gimple_build_assign (trip_back
, t
);
4462 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4464 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4465 si
= gsi_last_bb (exit_bb
);
4466 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)))
4467 force_gimple_operand_gsi (&si
, build_omp_barrier (), false, NULL_TREE
,
4468 false, GSI_SAME_STMT
);
4469 gsi_remove (&si
, true);
4471 /* Connect the new blocks. */
4472 find_edge (iter_part_bb
, seq_start_bb
)->flags
= EDGE_TRUE_VALUE
;
4473 find_edge (iter_part_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
4475 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
4476 find_edge (cont_bb
, trip_update_bb
)->flags
= EDGE_FALSE_VALUE
;
4478 redirect_edge_and_branch (single_succ_edge (trip_update_bb
), iter_part_bb
);
4480 if (gimple_in_ssa_p (cfun
))
4482 gimple_stmt_iterator psi
;
4485 edge_var_map_vector head
;
4489 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4490 remove arguments of the phi nodes in fin_bb. We need to create
4491 appropriate phi nodes in iter_part_bb instead. */
4492 se
= single_pred_edge (fin_bb
);
4493 re
= single_succ_edge (trip_update_bb
);
4494 head
= redirect_edge_var_map_vector (re
);
4495 ene
= single_succ_edge (entry_bb
);
4497 psi
= gsi_start_phis (fin_bb
);
4498 for (i
= 0; !gsi_end_p (psi
) && VEC_iterate (edge_var_map
, head
, i
, vm
);
4499 gsi_next (&psi
), ++i
)
4502 source_location locus
;
4504 phi
= gsi_stmt (psi
);
4505 t
= gimple_phi_result (phi
);
4506 gcc_assert (t
== redirect_edge_var_map_result (vm
));
4507 nphi
= create_phi_node (t
, iter_part_bb
);
4508 SSA_NAME_DEF_STMT (t
) = nphi
;
4510 t
= PHI_ARG_DEF_FROM_EDGE (phi
, se
);
4511 locus
= gimple_phi_arg_location_from_edge (phi
, se
);
4513 /* A special case -- fd->loop.v is not yet computed in
4514 iter_part_bb, we need to use v_extra instead. */
4515 if (t
== fd
->loop
.v
)
4517 add_phi_arg (nphi
, t
, ene
, locus
);
4518 locus
= redirect_edge_var_map_location (vm
);
4519 add_phi_arg (nphi
, redirect_edge_var_map_def (vm
), re
, locus
);
4521 gcc_assert (!gsi_end_p (psi
) && i
== VEC_length (edge_var_map
, head
));
4522 redirect_edge_var_map_clear (re
);
4525 psi
= gsi_start_phis (fin_bb
);
4526 if (gsi_end_p (psi
))
4528 remove_phi_node (&psi
, false);
4531 /* Make phi node for trip. */
4532 phi
= create_phi_node (trip_main
, iter_part_bb
);
4533 SSA_NAME_DEF_STMT (trip_main
) = phi
;
4534 add_phi_arg (phi
, trip_back
, single_succ_edge (trip_update_bb
),
4536 add_phi_arg (phi
, trip_init
, single_succ_edge (entry_bb
),
4540 set_immediate_dominator (CDI_DOMINATORS
, trip_update_bb
, cont_bb
);
4541 set_immediate_dominator (CDI_DOMINATORS
, iter_part_bb
,
4542 recompute_dominator (CDI_DOMINATORS
, iter_part_bb
));
4543 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
4544 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
4545 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
,
4546 recompute_dominator (CDI_DOMINATORS
, seq_start_bb
));
4547 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
4548 recompute_dominator (CDI_DOMINATORS
, body_bb
));
4552 /* Expand the OpenMP loop defined by REGION. */
4555 expand_omp_for (struct omp_region
*region
)
4557 struct omp_for_data fd
;
4558 struct omp_for_data_loop
*loops
;
4561 = (struct omp_for_data_loop
*)
4562 alloca (gimple_omp_for_collapse (last_stmt (region
->entry
))
4563 * sizeof (struct omp_for_data_loop
));
4564 extract_omp_for_data (last_stmt (region
->entry
), &fd
, loops
);
4565 region
->sched_kind
= fd
.sched_kind
;
4567 gcc_assert (EDGE_COUNT (region
->entry
->succs
) == 2);
4568 BRANCH_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
4569 FALLTHRU_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
4572 gcc_assert (EDGE_COUNT (region
->cont
->succs
) == 2);
4573 BRANCH_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
4574 FALLTHRU_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
4577 if (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
4580 && region
->cont
!= NULL
)
4582 if (fd
.chunk_size
== NULL
)
4583 expand_omp_for_static_nochunk (region
, &fd
);
4585 expand_omp_for_static_chunk (region
, &fd
);
4589 int fn_index
, start_ix
, next_ix
;
4591 gcc_assert (fd
.sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
4592 fn_index
= (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
4593 ? 3 : fd
.sched_kind
;
4594 fn_index
+= fd
.have_ordered
* 4;
4595 start_ix
= BUILT_IN_GOMP_LOOP_STATIC_START
+ fn_index
;
4596 next_ix
= BUILT_IN_GOMP_LOOP_STATIC_NEXT
+ fn_index
;
4597 if (fd
.iter_type
== long_long_unsigned_type_node
)
4599 start_ix
+= BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4600 - BUILT_IN_GOMP_LOOP_STATIC_START
;
4601 next_ix
+= BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4602 - BUILT_IN_GOMP_LOOP_STATIC_NEXT
;
4604 expand_omp_for_generic (region
, &fd
, (enum built_in_function
) start_ix
,
4605 (enum built_in_function
) next_ix
);
4608 update_ssa (TODO_update_ssa_only_virtuals
);
4612 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4614 v = GOMP_sections_start (n);
4631 v = GOMP_sections_next ();
4636 If this is a combined parallel sections, replace the call to
4637 GOMP_sections_start with call to GOMP_sections_next. */
4640 expand_omp_sections (struct omp_region
*region
)
4642 tree t
, u
, vin
= NULL
, vmain
, vnext
, l2
;
4643 VEC (tree
,heap
) *label_vec
;
4645 basic_block entry_bb
, l0_bb
, l1_bb
, l2_bb
, default_bb
;
4646 gimple_stmt_iterator si
, switch_si
;
4647 gimple sections_stmt
, stmt
, cont
;
4650 struct omp_region
*inner
;
4652 bool exit_reachable
= region
->cont
!= NULL
;
4654 gcc_assert (exit_reachable
== (region
->exit
!= NULL
));
4655 entry_bb
= region
->entry
;
4656 l0_bb
= single_succ (entry_bb
);
4657 l1_bb
= region
->cont
;
4658 l2_bb
= region
->exit
;
4661 if (single_pred_p (l2_bb
) && single_pred (l2_bb
) == l0_bb
)
4662 l2
= gimple_block_label (l2_bb
);
4665 /* This can happen if there are reductions. */
4666 len
= EDGE_COUNT (l0_bb
->succs
);
4667 gcc_assert (len
> 0);
4668 e
= EDGE_SUCC (l0_bb
, len
- 1);
4669 si
= gsi_last_bb (e
->dest
);
4672 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
4673 l2
= gimple_block_label (e
->dest
);
4675 FOR_EACH_EDGE (e
, ei
, l0_bb
->succs
)
4677 si
= gsi_last_bb (e
->dest
);
4679 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
4681 l2
= gimple_block_label (e
->dest
);
4686 default_bb
= create_empty_bb (l1_bb
->prev_bb
);
4690 default_bb
= create_empty_bb (l0_bb
);
4691 l2
= gimple_block_label (default_bb
);
4694 /* We will build a switch() with enough cases for all the
4695 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4696 and a default case to abort if something goes wrong. */
4697 len
= EDGE_COUNT (l0_bb
->succs
);
4699 /* Use VEC_quick_push on label_vec throughout, since we know the size
4701 label_vec
= VEC_alloc (tree
, heap
, len
);
4703 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4704 GIMPLE_OMP_SECTIONS statement. */
4705 si
= gsi_last_bb (entry_bb
);
4706 sections_stmt
= gsi_stmt (si
);
4707 gcc_assert (gimple_code (sections_stmt
) == GIMPLE_OMP_SECTIONS
);
4708 vin
= gimple_omp_sections_control (sections_stmt
);
4709 if (!is_combined_parallel (region
))
4711 /* If we are not inside a combined parallel+sections region,
4712 call GOMP_sections_start. */
4713 t
= build_int_cst (unsigned_type_node
,
4714 exit_reachable
? len
- 1 : len
);
4715 u
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_START
];
4716 stmt
= gimple_build_call (u
, 1, t
);
4720 /* Otherwise, call GOMP_sections_next. */
4721 u
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_NEXT
];
4722 stmt
= gimple_build_call (u
, 0);
4724 gimple_call_set_lhs (stmt
, vin
);
4725 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4726 gsi_remove (&si
, true);
4728 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4730 switch_si
= gsi_last_bb (l0_bb
);
4731 gcc_assert (gimple_code (gsi_stmt (switch_si
)) == GIMPLE_OMP_SECTIONS_SWITCH
);
4734 cont
= last_stmt (l1_bb
);
4735 gcc_assert (gimple_code (cont
) == GIMPLE_OMP_CONTINUE
);
4736 vmain
= gimple_omp_continue_control_use (cont
);
4737 vnext
= gimple_omp_continue_control_def (cont
);
4748 t
= build3 (CASE_LABEL_EXPR
, void_type_node
,
4749 build_int_cst (unsigned_type_node
, 0), NULL
, l2
);
4750 VEC_quick_push (tree
, label_vec
, t
);
4754 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4755 for (inner
= region
->inner
, casei
= 1;
4757 inner
= inner
->next
, i
++, casei
++)
4759 basic_block s_entry_bb
, s_exit_bb
;
4761 /* Skip optional reduction region. */
4762 if (inner
->type
== GIMPLE_OMP_ATOMIC_LOAD
)
4769 s_entry_bb
= inner
->entry
;
4770 s_exit_bb
= inner
->exit
;
4772 t
= gimple_block_label (s_entry_bb
);
4773 u
= build_int_cst (unsigned_type_node
, casei
);
4774 u
= build3 (CASE_LABEL_EXPR
, void_type_node
, u
, NULL
, t
);
4775 VEC_quick_push (tree
, label_vec
, u
);
4777 si
= gsi_last_bb (s_entry_bb
);
4778 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SECTION
);
4779 gcc_assert (i
< len
|| gimple_omp_section_last_p (gsi_stmt (si
)));
4780 gsi_remove (&si
, true);
4781 single_succ_edge (s_entry_bb
)->flags
= EDGE_FALLTHRU
;
4783 if (s_exit_bb
== NULL
)
4786 si
= gsi_last_bb (s_exit_bb
);
4787 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
4788 gsi_remove (&si
, true);
4790 single_succ_edge (s_exit_bb
)->flags
= EDGE_FALLTHRU
;
4793 /* Error handling code goes in DEFAULT_BB. */
4794 t
= gimple_block_label (default_bb
);
4795 u
= build3 (CASE_LABEL_EXPR
, void_type_node
, NULL
, NULL
, t
);
4796 make_edge (l0_bb
, default_bb
, 0);
4798 stmt
= gimple_build_switch_vec (vmain
, u
, label_vec
);
4799 gsi_insert_after (&switch_si
, stmt
, GSI_SAME_STMT
);
4800 gsi_remove (&switch_si
, true);
4801 VEC_free (tree
, heap
, label_vec
);
4803 si
= gsi_start_bb (default_bb
);
4804 stmt
= gimple_build_call (built_in_decls
[BUILT_IN_TRAP
], 0);
4805 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4809 /* Code to get the next section goes in L1_BB. */
4810 si
= gsi_last_bb (l1_bb
);
4811 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CONTINUE
);
4813 stmt
= gimple_build_call (built_in_decls
[BUILT_IN_GOMP_SECTIONS_NEXT
], 0);
4814 gimple_call_set_lhs (stmt
, vnext
);
4815 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4816 gsi_remove (&si
, true);
4818 single_succ_edge (l1_bb
)->flags
= EDGE_FALLTHRU
;
4820 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4821 si
= gsi_last_bb (l2_bb
);
4822 if (gimple_omp_return_nowait_p (gsi_stmt (si
)))
4823 t
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_END_NOWAIT
];
4825 t
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_END
];
4826 stmt
= gimple_build_call (t
, 0);
4827 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4828 gsi_remove (&si
, true);
4831 set_immediate_dominator (CDI_DOMINATORS
, default_bb
, l0_bb
);
4835 /* Expand code for an OpenMP single directive. We've already expanded
4836 much of the code, here we simply place the GOMP_barrier call. */
4839 expand_omp_single (struct omp_region
*region
)
4841 basic_block entry_bb
, exit_bb
;
4842 gimple_stmt_iterator si
;
4843 bool need_barrier
= false;
4845 entry_bb
= region
->entry
;
4846 exit_bb
= region
->exit
;
4848 si
= gsi_last_bb (entry_bb
);
4849 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4850 be removed. We need to ensure that the thread that entered the single
4851 does not exit before the data is copied out by the other threads. */
4852 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si
)),
4853 OMP_CLAUSE_COPYPRIVATE
))
4854 need_barrier
= true;
4855 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
);
4856 gsi_remove (&si
, true);
4857 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
4859 si
= gsi_last_bb (exit_bb
);
4860 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)) || need_barrier
)
4861 force_gimple_operand_gsi (&si
, build_omp_barrier (), false, NULL_TREE
,
4862 false, GSI_SAME_STMT
);
4863 gsi_remove (&si
, true);
4864 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
4868 /* Generic expansion for OpenMP synchronization directives: master,
4869 ordered and critical. All we need to do here is remove the entry
4870 and exit markers for REGION. */
4873 expand_omp_synch (struct omp_region
*region
)
4875 basic_block entry_bb
, exit_bb
;
4876 gimple_stmt_iterator si
;
4878 entry_bb
= region
->entry
;
4879 exit_bb
= region
->exit
;
4881 si
= gsi_last_bb (entry_bb
);
4882 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
4883 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_MASTER
4884 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ORDERED
4885 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CRITICAL
);
4886 gsi_remove (&si
, true);
4887 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
4891 si
= gsi_last_bb (exit_bb
);
4892 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
4893 gsi_remove (&si
, true);
4894 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
4898 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4899 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4900 size of the data type, and thus usable to find the index of the builtin
4901 decl. Returns false if the expression is not of the proper form. */
4904 expand_omp_atomic_fetch_op (basic_block load_bb
,
4905 tree addr
, tree loaded_val
,
4906 tree stored_val
, int index
)
4908 enum built_in_function base
;
4909 tree decl
, itype
, call
;
4910 enum insn_code
*optab
;
4912 basic_block store_bb
= single_succ (load_bb
);
4913 gimple_stmt_iterator gsi
;
4917 /* We expect to find the following sequences:
4920 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4923 val = tmp OP something; (or: something OP tmp)
4924 GIMPLE_OMP_STORE (val)
4926 ???FIXME: Allow a more flexible sequence.
4927 Perhaps use data flow to pick the statements.
4931 gsi
= gsi_after_labels (store_bb
);
4932 stmt
= gsi_stmt (gsi
);
4933 loc
= gimple_location (stmt
);
4934 if (!is_gimple_assign (stmt
))
4937 if (gimple_code (gsi_stmt (gsi
)) != GIMPLE_OMP_ATOMIC_STORE
)
4940 if (!operand_equal_p (gimple_assign_lhs (stmt
), stored_val
, 0))
4943 /* Check for one of the supported fetch-op operations. */
4944 switch (gimple_assign_rhs_code (stmt
))
4947 case POINTER_PLUS_EXPR
:
4948 base
= BUILT_IN_FETCH_AND_ADD_N
;
4949 optab
= sync_add_optab
;
4952 base
= BUILT_IN_FETCH_AND_SUB_N
;
4953 optab
= sync_add_optab
;
4956 base
= BUILT_IN_FETCH_AND_AND_N
;
4957 optab
= sync_and_optab
;
4960 base
= BUILT_IN_FETCH_AND_OR_N
;
4961 optab
= sync_ior_optab
;
4964 base
= BUILT_IN_FETCH_AND_XOR_N
;
4965 optab
= sync_xor_optab
;
4970 /* Make sure the expression is of the proper form. */
4971 if (operand_equal_p (gimple_assign_rhs1 (stmt
), loaded_val
, 0))
4972 rhs
= gimple_assign_rhs2 (stmt
);
4973 else if (commutative_tree_code (gimple_assign_rhs_code (stmt
))
4974 && operand_equal_p (gimple_assign_rhs2 (stmt
), loaded_val
, 0))
4975 rhs
= gimple_assign_rhs1 (stmt
);
4979 decl
= built_in_decls
[base
+ index
+ 1];
4980 itype
= TREE_TYPE (TREE_TYPE (decl
));
4982 if (optab
[TYPE_MODE (itype
)] == CODE_FOR_nothing
)
4985 gsi
= gsi_last_bb (load_bb
);
4986 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_LOAD
);
4987 call
= build_call_expr_loc (loc
,
4989 fold_convert_loc (loc
, itype
, rhs
));
4990 call
= fold_convert_loc (loc
, void_type_node
, call
);
4991 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4992 gsi_remove (&gsi
, true);
4994 gsi
= gsi_last_bb (store_bb
);
4995 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
4996 gsi_remove (&gsi
, true);
4997 gsi
= gsi_last_bb (store_bb
);
4998 gsi_remove (&gsi
, true);
5000 if (gimple_in_ssa_p (cfun
))
5001 update_ssa (TODO_update_ssa_no_phi
);
5006 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5010 newval = rhs; // with oldval replacing *addr in rhs
5011 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5012 if (oldval != newval)
5015 INDEX is log2 of the size of the data type, and thus usable to find the
5016 index of the builtin decl. */
5019 expand_omp_atomic_pipeline (basic_block load_bb
, basic_block store_bb
,
5020 tree addr
, tree loaded_val
, tree stored_val
,
5023 tree loadedi
, storedi
, initial
, new_storedi
, old_vali
;
5024 tree type
, itype
, cmpxchg
, iaddr
;
5025 gimple_stmt_iterator si
;
5026 basic_block loop_header
= single_succ (load_bb
);
5030 cmpxchg
= built_in_decls
[BUILT_IN_VAL_COMPARE_AND_SWAP_N
+ index
+ 1];
5031 type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
5032 itype
= TREE_TYPE (TREE_TYPE (cmpxchg
));
5034 if (sync_compare_and_swap
[TYPE_MODE (itype
)] == CODE_FOR_nothing
)
5037 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5038 si
= gsi_last_bb (load_bb
);
5039 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5041 /* For floating-point values, we'll need to view-convert them to integers
5042 so that we can perform the atomic compare and swap. Simplify the
5043 following code by always setting up the "i"ntegral variables. */
5044 if (!INTEGRAL_TYPE_P (type
) && !POINTER_TYPE_P (type
))
5048 iaddr
= create_tmp_var (build_pointer_type_for_mode (itype
, ptr_mode
,
5051 = force_gimple_operand_gsi (&si
,
5052 fold_convert (TREE_TYPE (iaddr
), addr
),
5053 false, NULL_TREE
, true, GSI_SAME_STMT
);
5054 stmt
= gimple_build_assign (iaddr
, iaddr_val
);
5055 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5056 loadedi
= create_tmp_var (itype
, NULL
);
5057 if (gimple_in_ssa_p (cfun
))
5059 add_referenced_var (iaddr
);
5060 add_referenced_var (loadedi
);
5061 loadedi
= make_ssa_name (loadedi
, NULL
);
5067 loadedi
= loaded_val
;
5070 initial
= force_gimple_operand_gsi (&si
, build_fold_indirect_ref (iaddr
),
5071 true, NULL_TREE
, true, GSI_SAME_STMT
);
5073 /* Move the value to the LOADEDI temporary. */
5074 if (gimple_in_ssa_p (cfun
))
5076 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header
)));
5077 phi
= create_phi_node (loadedi
, loop_header
);
5078 SSA_NAME_DEF_STMT (loadedi
) = phi
;
5079 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, single_succ_edge (load_bb
)),
5083 gsi_insert_before (&si
,
5084 gimple_build_assign (loadedi
, initial
),
5086 if (loadedi
!= loaded_val
)
5088 gimple_stmt_iterator gsi2
;
5091 x
= build1 (VIEW_CONVERT_EXPR
, type
, loadedi
);
5092 gsi2
= gsi_start_bb (loop_header
);
5093 if (gimple_in_ssa_p (cfun
))
5096 x
= force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
5097 true, GSI_SAME_STMT
);
5098 stmt
= gimple_build_assign (loaded_val
, x
);
5099 gsi_insert_before (&gsi2
, stmt
, GSI_SAME_STMT
);
5103 x
= build2 (MODIFY_EXPR
, TREE_TYPE (loaded_val
), loaded_val
, x
);
5104 force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
5105 true, GSI_SAME_STMT
);
5108 gsi_remove (&si
, true);
5110 si
= gsi_last_bb (store_bb
);
5111 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
5114 storedi
= stored_val
;
5117 force_gimple_operand_gsi (&si
,
5118 build1 (VIEW_CONVERT_EXPR
, itype
,
5119 stored_val
), true, NULL_TREE
, true,
5122 /* Build the compare&swap statement. */
5123 new_storedi
= build_call_expr (cmpxchg
, 3, iaddr
, loadedi
, storedi
);
5124 new_storedi
= force_gimple_operand_gsi (&si
,
5125 fold_convert (TREE_TYPE (loadedi
),
5128 true, GSI_SAME_STMT
);
5130 if (gimple_in_ssa_p (cfun
))
5134 old_vali
= create_tmp_var (TREE_TYPE (loadedi
), NULL
);
5135 if (gimple_in_ssa_p (cfun
))
5136 add_referenced_var (old_vali
);
5137 stmt
= gimple_build_assign (old_vali
, loadedi
);
5138 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5140 stmt
= gimple_build_assign (loadedi
, new_storedi
);
5141 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5144 /* Note that we always perform the comparison as an integer, even for
5145 floating point. This allows the atomic operation to properly
5146 succeed even with NaNs and -0.0. */
5147 stmt
= gimple_build_cond_empty
5148 (build2 (NE_EXPR
, boolean_type_node
,
5149 new_storedi
, old_vali
));
5150 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5153 e
= single_succ_edge (store_bb
);
5154 e
->flags
&= ~EDGE_FALLTHRU
;
5155 e
->flags
|= EDGE_FALSE_VALUE
;
5157 e
= make_edge (store_bb
, loop_header
, EDGE_TRUE_VALUE
);
5159 /* Copy the new value to loadedi (we already did that before the condition
5160 if we are not in SSA). */
5161 if (gimple_in_ssa_p (cfun
))
5163 phi
= gimple_seq_first_stmt (phi_nodes (loop_header
));
5164 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
), new_storedi
);
5167 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5168 gsi_remove (&si
, true);
5170 if (gimple_in_ssa_p (cfun
))
5171 update_ssa (TODO_update_ssa_no_phi
);
5176 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5178 GOMP_atomic_start ();
5182 The result is not globally atomic, but works so long as all parallel
5183 references are within #pragma omp atomic directives. According to
5184 responses received from omp@openmp.org, appears to be within spec.
5185 Which makes sense, since that's how several other compilers handle
5186 this situation as well.
5187 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5188 expanding. STORED_VAL is the operand of the matching
5189 GIMPLE_OMP_ATOMIC_STORE.
5192 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5196 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5201 expand_omp_atomic_mutex (basic_block load_bb
, basic_block store_bb
,
5202 tree addr
, tree loaded_val
, tree stored_val
)
5204 gimple_stmt_iterator si
;
5208 si
= gsi_last_bb (load_bb
);
5209 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5211 t
= built_in_decls
[BUILT_IN_GOMP_ATOMIC_START
];
5212 t
= build_function_call_expr (UNKNOWN_LOCATION
, t
, 0);
5213 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5215 stmt
= gimple_build_assign (loaded_val
, build_fold_indirect_ref (addr
));
5216 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5217 gsi_remove (&si
, true);
5219 si
= gsi_last_bb (store_bb
);
5220 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
5222 stmt
= gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr
)),
5224 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5226 t
= built_in_decls
[BUILT_IN_GOMP_ATOMIC_END
];
5227 t
= build_function_call_expr (UNKNOWN_LOCATION
, t
, 0);
5228 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5229 gsi_remove (&si
, true);
5231 if (gimple_in_ssa_p (cfun
))
5232 update_ssa (TODO_update_ssa_no_phi
);
5236 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5237 using expand_omp_atomic_fetch_op. If it failed, we try to
5238 call expand_omp_atomic_pipeline, and if it fails too, the
5239 ultimate fallback is wrapping the operation in a mutex
5240 (expand_omp_atomic_mutex). REGION is the atomic region built
5241 by build_omp_regions_1(). */
5244 expand_omp_atomic (struct omp_region
*region
)
5246 basic_block load_bb
= region
->entry
, store_bb
= region
->exit
;
5247 gimple load
= last_stmt (load_bb
), store
= last_stmt (store_bb
);
5248 tree loaded_val
= gimple_omp_atomic_load_lhs (load
);
5249 tree addr
= gimple_omp_atomic_load_rhs (load
);
5250 tree stored_val
= gimple_omp_atomic_store_val (store
);
5251 tree type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
5252 HOST_WIDE_INT index
;
5254 /* Make sure the type is one of the supported sizes. */
5255 index
= tree_low_cst (TYPE_SIZE_UNIT (type
), 1);
5256 index
= exact_log2 (index
);
5257 if (index
>= 0 && index
<= 4)
5259 unsigned int align
= TYPE_ALIGN_UNIT (type
);
5261 /* __sync builtins require strict data alignment. */
5262 if (exact_log2 (align
) >= index
)
5264 /* When possible, use specialized atomic update functions. */
5265 if ((INTEGRAL_TYPE_P (type
) || POINTER_TYPE_P (type
))
5266 && store_bb
== single_succ (load_bb
))
5268 if (expand_omp_atomic_fetch_op (load_bb
, addr
,
5269 loaded_val
, stored_val
, index
))
5273 /* If we don't have specialized __sync builtins, try and implement
5274 as a compare and swap loop. */
5275 if (expand_omp_atomic_pipeline (load_bb
, store_bb
, addr
,
5276 loaded_val
, stored_val
, index
))
5281 /* The ultimate fallback is wrapping the operation in a mutex. */
5282 expand_omp_atomic_mutex (load_bb
, store_bb
, addr
, loaded_val
, stored_val
);
5286 /* Expand the parallel region tree rooted at REGION. Expansion
5287 proceeds in depth-first order. Innermost regions are expanded
5288 first. This way, parallel regions that require a new function to
5289 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5290 internal dependencies in their body. */
5293 expand_omp (struct omp_region
*region
)
5297 location_t saved_location
;
5299 /* First, determine whether this is a combined parallel+workshare
5301 if (region
->type
== GIMPLE_OMP_PARALLEL
)
5302 determine_parallel_type (region
);
5305 expand_omp (region
->inner
);
5307 saved_location
= input_location
;
5308 if (gimple_has_location (last_stmt (region
->entry
)))
5309 input_location
= gimple_location (last_stmt (region
->entry
));
5311 switch (region
->type
)
5313 case GIMPLE_OMP_PARALLEL
:
5314 case GIMPLE_OMP_TASK
:
5315 expand_omp_taskreg (region
);
5318 case GIMPLE_OMP_FOR
:
5319 expand_omp_for (region
);
5322 case GIMPLE_OMP_SECTIONS
:
5323 expand_omp_sections (region
);
5326 case GIMPLE_OMP_SECTION
:
5327 /* Individual omp sections are handled together with their
5328 parent GIMPLE_OMP_SECTIONS region. */
5331 case GIMPLE_OMP_SINGLE
:
5332 expand_omp_single (region
);
5335 case GIMPLE_OMP_MASTER
:
5336 case GIMPLE_OMP_ORDERED
:
5337 case GIMPLE_OMP_CRITICAL
:
5338 expand_omp_synch (region
);
5341 case GIMPLE_OMP_ATOMIC_LOAD
:
5342 expand_omp_atomic (region
);
5349 input_location
= saved_location
;
5350 region
= region
->next
;
5355 /* Helper for build_omp_regions. Scan the dominator tree starting at
5356 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5357 true, the function ends once a single tree is built (otherwise, whole
5358 forest of OMP constructs may be built). */
5361 build_omp_regions_1 (basic_block bb
, struct omp_region
*parent
,
5364 gimple_stmt_iterator gsi
;
5368 gsi
= gsi_last_bb (bb
);
5369 if (!gsi_end_p (gsi
) && is_gimple_omp (gsi_stmt (gsi
)))
5371 struct omp_region
*region
;
5372 enum gimple_code code
;
5374 stmt
= gsi_stmt (gsi
);
5375 code
= gimple_code (stmt
);
5376 if (code
== GIMPLE_OMP_RETURN
)
5378 /* STMT is the return point out of region PARENT. Mark it
5379 as the exit point and make PARENT the immediately
5380 enclosing region. */
5381 gcc_assert (parent
);
5384 parent
= parent
->outer
;
5386 else if (code
== GIMPLE_OMP_ATOMIC_STORE
)
5388 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5389 GIMPLE_OMP_RETURN, but matches with
5390 GIMPLE_OMP_ATOMIC_LOAD. */
5391 gcc_assert (parent
);
5392 gcc_assert (parent
->type
== GIMPLE_OMP_ATOMIC_LOAD
);
5395 parent
= parent
->outer
;
5398 else if (code
== GIMPLE_OMP_CONTINUE
)
5400 gcc_assert (parent
);
5403 else if (code
== GIMPLE_OMP_SECTIONS_SWITCH
)
5405 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5406 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5411 /* Otherwise, this directive becomes the parent for a new
5413 region
= new_omp_region (bb
, code
, parent
);
5418 if (single_tree
&& !parent
)
5421 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
5423 son
= next_dom_son (CDI_DOMINATORS
, son
))
5424 build_omp_regions_1 (son
, parent
, single_tree
);
5427 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5431 build_omp_regions_root (basic_block root
)
5433 gcc_assert (root_omp_region
== NULL
);
5434 build_omp_regions_1 (root
, NULL
, true);
5435 gcc_assert (root_omp_region
!= NULL
);
5438 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5441 omp_expand_local (basic_block head
)
5443 build_omp_regions_root (head
);
5444 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5446 fprintf (dump_file
, "\nOMP region tree\n\n");
5447 dump_omp_region (dump_file
, root_omp_region
, 0);
5448 fprintf (dump_file
, "\n");
5451 remove_exit_barriers (root_omp_region
);
5452 expand_omp (root_omp_region
);
5454 free_omp_regions ();
5457 /* Scan the CFG and build a tree of OMP regions. Return the root of
5458 the OMP region tree. */
5461 build_omp_regions (void)
5463 gcc_assert (root_omp_region
== NULL
);
5464 calculate_dominance_info (CDI_DOMINATORS
);
5465 build_omp_regions_1 (ENTRY_BLOCK_PTR
, NULL
, false);
5468 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5471 execute_expand_omp (void)
5473 build_omp_regions ();
5475 if (!root_omp_region
)
5480 fprintf (dump_file
, "\nOMP region tree\n\n");
5481 dump_omp_region (dump_file
, root_omp_region
, 0);
5482 fprintf (dump_file
, "\n");
5485 remove_exit_barriers (root_omp_region
);
5487 expand_omp (root_omp_region
);
5489 cleanup_tree_cfg ();
5491 free_omp_regions ();
5496 /* OMP expansion -- the default pass, run before creation of SSA form. */
5499 gate_expand_omp (void)
5501 return (flag_openmp
!= 0 && !seen_error ());
5504 struct gimple_opt_pass pass_expand_omp
=
5508 "ompexp", /* name */
5509 gate_expand_omp
, /* gate */
5510 execute_expand_omp
, /* execute */
5513 0, /* static_pass_number */
5514 TV_NONE
, /* tv_id */
5515 PROP_gimple_any
, /* properties_required */
5516 0, /* properties_provided */
5517 0, /* properties_destroyed */
5518 0, /* todo_flags_start */
5519 TODO_dump_func
/* todo_flags_finish */
5523 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5525 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5526 CTX is the enclosing OMP context for the current statement. */
5529 lower_omp_sections (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5531 tree block
, control
;
5532 gimple_stmt_iterator tgsi
;
5534 gimple stmt
, new_stmt
, bind
, t
;
5535 gimple_seq ilist
, dlist
, olist
, new_body
, body
;
5536 struct gimplify_ctx gctx
;
5538 stmt
= gsi_stmt (*gsi_p
);
5540 push_gimplify_context (&gctx
);
5544 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt
),
5545 &ilist
, &dlist
, ctx
);
5547 tgsi
= gsi_start (gimple_omp_body (stmt
));
5548 for (len
= 0; !gsi_end_p (tgsi
); len
++, gsi_next (&tgsi
))
5551 tgsi
= gsi_start (gimple_omp_body (stmt
));
5553 for (i
= 0; i
< len
; i
++, gsi_next (&tgsi
))
5558 sec_start
= gsi_stmt (tgsi
);
5559 sctx
= maybe_lookup_ctx (sec_start
);
5562 gimple_seq_add_stmt (&body
, sec_start
);
5564 lower_omp (gimple_omp_body (sec_start
), sctx
);
5565 gimple_seq_add_seq (&body
, gimple_omp_body (sec_start
));
5566 gimple_omp_set_body (sec_start
, NULL
);
5570 gimple_seq l
= NULL
;
5571 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt
), NULL
,
5573 gimple_seq_add_seq (&body
, l
);
5574 gimple_omp_section_set_last (sec_start
);
5577 gimple_seq_add_stmt (&body
, gimple_build_omp_return (false));
5580 block
= make_node (BLOCK
);
5581 bind
= gimple_build_bind (NULL
, body
, block
);
5584 lower_reduction_clauses (gimple_omp_sections_clauses (stmt
), &olist
, ctx
);
5586 block
= make_node (BLOCK
);
5587 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
5589 pop_gimplify_context (new_stmt
);
5590 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
5591 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
5592 if (BLOCK_VARS (block
))
5593 TREE_USED (block
) = 1;
5596 gimple_seq_add_seq (&new_body
, ilist
);
5597 gimple_seq_add_stmt (&new_body
, stmt
);
5598 gimple_seq_add_stmt (&new_body
, gimple_build_omp_sections_switch ());
5599 gimple_seq_add_stmt (&new_body
, bind
);
5601 control
= create_tmp_var (unsigned_type_node
, ".section");
5602 t
= gimple_build_omp_continue (control
, control
);
5603 gimple_omp_sections_set_control (stmt
, control
);
5604 gimple_seq_add_stmt (&new_body
, t
);
5606 gimple_seq_add_seq (&new_body
, olist
);
5607 gimple_seq_add_seq (&new_body
, dlist
);
5609 new_body
= maybe_catch_exception (new_body
);
5611 t
= gimple_build_omp_return
5612 (!!find_omp_clause (gimple_omp_sections_clauses (stmt
),
5613 OMP_CLAUSE_NOWAIT
));
5614 gimple_seq_add_stmt (&new_body
, t
);
5616 gimple_bind_set_body (new_stmt
, new_body
);
5617 gimple_omp_set_body (stmt
, NULL
);
5619 gsi_replace (gsi_p
, new_stmt
, true);
5623 /* A subroutine of lower_omp_single. Expand the simple form of
5624 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5626 if (GOMP_single_start ())
5628 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5630 FIXME. It may be better to delay expanding the logic of this until
5631 pass_expand_omp. The expanded logic may make the job more difficult
5632 to a synchronization analysis pass. */
5635 lower_omp_single_simple (gimple single_stmt
, gimple_seq
*pre_p
)
5637 location_t loc
= gimple_location (single_stmt
);
5638 tree tlabel
= create_artificial_label (loc
);
5639 tree flabel
= create_artificial_label (loc
);
5643 decl
= built_in_decls
[BUILT_IN_GOMP_SINGLE_START
];
5644 lhs
= create_tmp_var (TREE_TYPE (TREE_TYPE (decl
)), NULL
);
5645 call
= gimple_build_call (decl
, 0);
5646 gimple_call_set_lhs (call
, lhs
);
5647 gimple_seq_add_stmt (pre_p
, call
);
5649 cond
= gimple_build_cond (EQ_EXPR
, lhs
,
5650 fold_convert_loc (loc
, TREE_TYPE (lhs
),
5653 gimple_seq_add_stmt (pre_p
, cond
);
5654 gimple_seq_add_stmt (pre_p
, gimple_build_label (tlabel
));
5655 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
5656 gimple_seq_add_stmt (pre_p
, gimple_build_label (flabel
));
5660 /* A subroutine of lower_omp_single. Expand the simple form of
5661 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5663 #pragma omp single copyprivate (a, b, c)
5665 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5668 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5674 GOMP_single_copy_end (©out);
5685 FIXME. It may be better to delay expanding the logic of this until
5686 pass_expand_omp. The expanded logic may make the job more difficult
5687 to a synchronization analysis pass. */
5690 lower_omp_single_copy (gimple single_stmt
, gimple_seq
*pre_p
, omp_context
*ctx
)
5692 tree ptr_type
, t
, l0
, l1
, l2
;
5693 gimple_seq copyin_seq
;
5694 location_t loc
= gimple_location (single_stmt
);
5696 ctx
->sender_decl
= create_tmp_var (ctx
->record_type
, ".omp_copy_o");
5698 ptr_type
= build_pointer_type (ctx
->record_type
);
5699 ctx
->receiver_decl
= create_tmp_var (ptr_type
, ".omp_copy_i");
5701 l0
= create_artificial_label (loc
);
5702 l1
= create_artificial_label (loc
);
5703 l2
= create_artificial_label (loc
);
5705 t
= build_call_expr_loc (loc
, built_in_decls
[BUILT_IN_GOMP_SINGLE_COPY_START
], 0);
5706 t
= fold_convert_loc (loc
, ptr_type
, t
);
5707 gimplify_assign (ctx
->receiver_decl
, t
, pre_p
);
5709 t
= build2 (EQ_EXPR
, boolean_type_node
, ctx
->receiver_decl
,
5710 build_int_cst (ptr_type
, 0));
5711 t
= build3 (COND_EXPR
, void_type_node
, t
,
5712 build_and_jump (&l0
), build_and_jump (&l1
));
5713 gimplify_and_add (t
, pre_p
);
5715 gimple_seq_add_stmt (pre_p
, gimple_build_label (l0
));
5717 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
5720 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt
), pre_p
,
5723 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
5724 t
= build_call_expr_loc (loc
, built_in_decls
[BUILT_IN_GOMP_SINGLE_COPY_END
],
5726 gimplify_and_add (t
, pre_p
);
5728 t
= build_and_jump (&l2
);
5729 gimplify_and_add (t
, pre_p
);
5731 gimple_seq_add_stmt (pre_p
, gimple_build_label (l1
));
5733 gimple_seq_add_seq (pre_p
, copyin_seq
);
5735 gimple_seq_add_stmt (pre_p
, gimple_build_label (l2
));
5739 /* Expand code for an OpenMP single directive. */
5742 lower_omp_single (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5745 gimple t
, bind
, single_stmt
= gsi_stmt (*gsi_p
);
5746 gimple_seq bind_body
, dlist
;
5747 struct gimplify_ctx gctx
;
5749 push_gimplify_context (&gctx
);
5752 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt
),
5753 &bind_body
, &dlist
, ctx
);
5754 lower_omp (gimple_omp_body (single_stmt
), ctx
);
5756 gimple_seq_add_stmt (&bind_body
, single_stmt
);
5758 if (ctx
->record_type
)
5759 lower_omp_single_copy (single_stmt
, &bind_body
, ctx
);
5761 lower_omp_single_simple (single_stmt
, &bind_body
);
5763 gimple_omp_set_body (single_stmt
, NULL
);
5765 gimple_seq_add_seq (&bind_body
, dlist
);
5767 bind_body
= maybe_catch_exception (bind_body
);
5769 t
= gimple_build_omp_return
5770 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt
),
5771 OMP_CLAUSE_NOWAIT
));
5772 gimple_seq_add_stmt (&bind_body
, t
);
5774 block
= make_node (BLOCK
);
5775 bind
= gimple_build_bind (NULL
, bind_body
, block
);
5777 pop_gimplify_context (bind
);
5779 gimple_bind_append_vars (bind
, ctx
->block_vars
);
5780 BLOCK_VARS (block
) = ctx
->block_vars
;
5781 gsi_replace (gsi_p
, bind
, true);
5782 if (BLOCK_VARS (block
))
5783 TREE_USED (block
) = 1;
5787 /* Expand code for an OpenMP master directive. */
5790 lower_omp_master (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5792 tree block
, lab
= NULL
, x
;
5793 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
5794 location_t loc
= gimple_location (stmt
);
5796 struct gimplify_ctx gctx
;
5798 push_gimplify_context (&gctx
);
5800 block
= make_node (BLOCK
);
5801 bind
= gimple_build_bind (NULL
, gimple_seq_alloc_with_stmt (stmt
),
5804 x
= build_call_expr_loc (loc
, built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
5805 x
= build2 (EQ_EXPR
, boolean_type_node
, x
, integer_zero_node
);
5806 x
= build3 (COND_EXPR
, void_type_node
, x
, NULL
, build_and_jump (&lab
));
5808 gimplify_and_add (x
, &tseq
);
5809 gimple_bind_add_seq (bind
, tseq
);
5811 lower_omp (gimple_omp_body (stmt
), ctx
);
5812 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
5813 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
5814 gimple_omp_set_body (stmt
, NULL
);
5816 gimple_bind_add_stmt (bind
, gimple_build_label (lab
));
5818 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
5820 pop_gimplify_context (bind
);
5822 gimple_bind_append_vars (bind
, ctx
->block_vars
);
5823 BLOCK_VARS (block
) = ctx
->block_vars
;
5824 gsi_replace (gsi_p
, bind
, true);
5828 /* Expand code for an OpenMP ordered directive. */
5831 lower_omp_ordered (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5834 gimple stmt
= gsi_stmt (*gsi_p
), bind
, x
;
5835 struct gimplify_ctx gctx
;
5837 push_gimplify_context (&gctx
);
5839 block
= make_node (BLOCK
);
5840 bind
= gimple_build_bind (NULL
, gimple_seq_alloc_with_stmt (stmt
),
5843 x
= gimple_build_call (built_in_decls
[BUILT_IN_GOMP_ORDERED_START
], 0);
5844 gimple_bind_add_stmt (bind
, x
);
5846 lower_omp (gimple_omp_body (stmt
), ctx
);
5847 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
5848 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
5849 gimple_omp_set_body (stmt
, NULL
);
5851 x
= gimple_build_call (built_in_decls
[BUILT_IN_GOMP_ORDERED_END
], 0);
5852 gimple_bind_add_stmt (bind
, x
);
5854 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
5856 pop_gimplify_context (bind
);
5858 gimple_bind_append_vars (bind
, ctx
->block_vars
);
5859 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
5860 gsi_replace (gsi_p
, bind
, true);
5864 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5865 substitution of a couple of function calls. But in the NAMED case,
5866 requires that languages coordinate a symbol name. It is therefore
5867 best put here in common code. */
5869 static GTY((param1_is (tree
), param2_is (tree
)))
5870 splay_tree critical_name_mutexes
;
5873 lower_omp_critical (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5876 tree name
, lock
, unlock
;
5877 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
5878 location_t loc
= gimple_location (stmt
);
5880 struct gimplify_ctx gctx
;
5882 name
= gimple_omp_critical_name (stmt
);
5888 if (!critical_name_mutexes
)
5889 critical_name_mutexes
5890 = splay_tree_new_ggc (splay_tree_compare_pointers
);
5892 n
= splay_tree_lookup (critical_name_mutexes
, (splay_tree_key
) name
);
5897 decl
= create_tmp_var_raw (ptr_type_node
, NULL
);
5899 new_str
= ACONCAT ((".gomp_critical_user_",
5900 IDENTIFIER_POINTER (name
), NULL
));
5901 DECL_NAME (decl
) = get_identifier (new_str
);
5902 TREE_PUBLIC (decl
) = 1;
5903 TREE_STATIC (decl
) = 1;
5904 DECL_COMMON (decl
) = 1;
5905 DECL_ARTIFICIAL (decl
) = 1;
5906 DECL_IGNORED_P (decl
) = 1;
5907 varpool_finalize_decl (decl
);
5909 splay_tree_insert (critical_name_mutexes
, (splay_tree_key
) name
,
5910 (splay_tree_value
) decl
);
5913 decl
= (tree
) n
->value
;
5915 lock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_NAME_START
];
5916 lock
= build_call_expr_loc (loc
, lock
, 1, build_fold_addr_expr_loc (loc
, decl
));
5918 unlock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_NAME_END
];
5919 unlock
= build_call_expr_loc (loc
, unlock
, 1,
5920 build_fold_addr_expr_loc (loc
, decl
));
5924 lock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_START
];
5925 lock
= build_call_expr_loc (loc
, lock
, 0);
5927 unlock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_END
];
5928 unlock
= build_call_expr_loc (loc
, unlock
, 0);
5931 push_gimplify_context (&gctx
);
5933 block
= make_node (BLOCK
);
5934 bind
= gimple_build_bind (NULL
, gimple_seq_alloc_with_stmt (stmt
), block
);
5936 tbody
= gimple_bind_body (bind
);
5937 gimplify_and_add (lock
, &tbody
);
5938 gimple_bind_set_body (bind
, tbody
);
5940 lower_omp (gimple_omp_body (stmt
), ctx
);
5941 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
5942 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
5943 gimple_omp_set_body (stmt
, NULL
);
5945 tbody
= gimple_bind_body (bind
);
5946 gimplify_and_add (unlock
, &tbody
);
5947 gimple_bind_set_body (bind
, tbody
);
5949 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
5951 pop_gimplify_context (bind
);
5952 gimple_bind_append_vars (bind
, ctx
->block_vars
);
5953 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
5954 gsi_replace (gsi_p
, bind
, true);
5958 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5959 for a lastprivate clause. Given a loop control predicate of (V
5960 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5961 is appended to *DLIST, iterator initialization is appended to
5965 lower_omp_for_lastprivate (struct omp_for_data
*fd
, gimple_seq
*body_p
,
5966 gimple_seq
*dlist
, struct omp_context
*ctx
)
5968 tree clauses
, cond
, vinit
;
5969 enum tree_code cond_code
;
5972 cond_code
= fd
->loop
.cond_code
;
5973 cond_code
= cond_code
== LT_EXPR
? GE_EXPR
: LE_EXPR
;
5975 /* When possible, use a strict equality expression. This can let VRP
5976 type optimizations deduce the value and remove a copy. */
5977 if (host_integerp (fd
->loop
.step
, 0))
5979 HOST_WIDE_INT step
= TREE_INT_CST_LOW (fd
->loop
.step
);
5980 if (step
== 1 || step
== -1)
5981 cond_code
= EQ_EXPR
;
5984 cond
= build2 (cond_code
, boolean_type_node
, fd
->loop
.v
, fd
->loop
.n2
);
5986 clauses
= gimple_omp_for_clauses (fd
->for_stmt
);
5988 lower_lastprivate_clauses (clauses
, cond
, &stmts
, ctx
);
5989 if (!gimple_seq_empty_p (stmts
))
5991 gimple_seq_add_seq (&stmts
, *dlist
);
5994 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
5995 vinit
= fd
->loop
.n1
;
5996 if (cond_code
== EQ_EXPR
5997 && host_integerp (fd
->loop
.n2
, 0)
5998 && ! integer_zerop (fd
->loop
.n2
))
5999 vinit
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
6001 /* Initialize the iterator variable, so that threads that don't execute
6002 any iterations don't execute the lastprivate clauses by accident. */
6003 gimplify_assign (fd
->loop
.v
, vinit
, body_p
);
6008 /* Lower code for an OpenMP loop directive. */
6011 lower_omp_for (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6014 struct omp_for_data fd
;
6015 gimple stmt
= gsi_stmt (*gsi_p
), new_stmt
;
6016 gimple_seq omp_for_body
, body
, dlist
;
6018 struct gimplify_ctx gctx
;
6020 push_gimplify_context (&gctx
);
6022 lower_omp (gimple_omp_for_pre_body (stmt
), ctx
);
6023 lower_omp (gimple_omp_body (stmt
), ctx
);
6025 block
= make_node (BLOCK
);
6026 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
6028 /* Move declaration of temporaries in the loop body before we make
6030 omp_for_body
= gimple_omp_body (stmt
);
6031 if (!gimple_seq_empty_p (omp_for_body
)
6032 && gimple_code (gimple_seq_first_stmt (omp_for_body
)) == GIMPLE_BIND
)
6034 tree vars
= gimple_bind_vars (gimple_seq_first_stmt (omp_for_body
));
6035 gimple_bind_append_vars (new_stmt
, vars
);
6038 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6041 lower_rec_input_clauses (gimple_omp_for_clauses (stmt
), &body
, &dlist
, ctx
);
6042 gimple_seq_add_seq (&body
, gimple_omp_for_pre_body (stmt
));
6044 /* Lower the header expressions. At this point, we can assume that
6045 the header is of the form:
6047 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6049 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6050 using the .omp_data_s mapping, if needed. */
6051 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
6053 rhs_p
= gimple_omp_for_initial_ptr (stmt
, i
);
6054 if (!is_gimple_min_invariant (*rhs_p
))
6055 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6057 rhs_p
= gimple_omp_for_final_ptr (stmt
, i
);
6058 if (!is_gimple_min_invariant (*rhs_p
))
6059 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6061 rhs_p
= &TREE_OPERAND (gimple_omp_for_incr (stmt
, i
), 1);
6062 if (!is_gimple_min_invariant (*rhs_p
))
6063 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6066 /* Once lowered, extract the bounds and clauses. */
6067 extract_omp_for_data (stmt
, &fd
, NULL
);
6069 lower_omp_for_lastprivate (&fd
, &body
, &dlist
, ctx
);
6071 gimple_seq_add_stmt (&body
, stmt
);
6072 gimple_seq_add_seq (&body
, gimple_omp_body (stmt
));
6074 gimple_seq_add_stmt (&body
, gimple_build_omp_continue (fd
.loop
.v
,
6077 /* After the loop, add exit clauses. */
6078 lower_reduction_clauses (gimple_omp_for_clauses (stmt
), &body
, ctx
);
6079 gimple_seq_add_seq (&body
, dlist
);
6081 body
= maybe_catch_exception (body
);
6083 /* Region exit marker goes at the end of the loop body. */
6084 gimple_seq_add_stmt (&body
, gimple_build_omp_return (fd
.have_nowait
));
6086 pop_gimplify_context (new_stmt
);
6088 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
6089 BLOCK_VARS (block
) = gimple_bind_vars (new_stmt
);
6090 if (BLOCK_VARS (block
))
6091 TREE_USED (block
) = 1;
6093 gimple_bind_set_body (new_stmt
, body
);
6094 gimple_omp_set_body (stmt
, NULL
);
6095 gimple_omp_for_set_pre_body (stmt
, NULL
);
6096 gsi_replace (gsi_p
, new_stmt
, true);
6099 /* Callback for walk_stmts. Check if the current statement only contains
6100 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6103 check_combined_parallel (gimple_stmt_iterator
*gsi_p
,
6104 bool *handled_ops_p
,
6105 struct walk_stmt_info
*wi
)
6107 int *info
= (int *) wi
->info
;
6108 gimple stmt
= gsi_stmt (*gsi_p
);
6110 *handled_ops_p
= true;
6111 switch (gimple_code (stmt
))
6115 case GIMPLE_OMP_FOR
:
6116 case GIMPLE_OMP_SECTIONS
:
6117 *info
= *info
== 0 ? 1 : -1;
6126 struct omp_taskcopy_context
6128 /* This field must be at the beginning, as we do "inheritance": Some
6129 callback functions for tree-inline.c (e.g., omp_copy_decl)
6130 receive a copy_body_data pointer that is up-casted to an
6131 omp_context pointer. */
6137 task_copyfn_copy_decl (tree var
, copy_body_data
*cb
)
6139 struct omp_taskcopy_context
*tcctx
= (struct omp_taskcopy_context
*) cb
;
6141 if (splay_tree_lookup (tcctx
->ctx
->sfield_map
, (splay_tree_key
) var
))
6142 return create_tmp_var (TREE_TYPE (var
), NULL
);
6148 task_copyfn_remap_type (struct omp_taskcopy_context
*tcctx
, tree orig_type
)
6150 tree name
, new_fields
= NULL
, type
, f
;
6152 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
6153 name
= DECL_NAME (TYPE_NAME (orig_type
));
6154 name
= build_decl (gimple_location (tcctx
->ctx
->stmt
),
6155 TYPE_DECL
, name
, type
);
6156 TYPE_NAME (type
) = name
;
6158 for (f
= TYPE_FIELDS (orig_type
); f
; f
= TREE_CHAIN (f
))
6160 tree new_f
= copy_node (f
);
6161 DECL_CONTEXT (new_f
) = type
;
6162 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &tcctx
->cb
);
6163 TREE_CHAIN (new_f
) = new_fields
;
6164 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
6165 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
6166 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
6169 *pointer_map_insert (tcctx
->cb
.decl_map
, f
) = new_f
;
6171 TYPE_FIELDS (type
) = nreverse (new_fields
);
6176 /* Create task copyfn. */
6179 create_task_copyfn (gimple task_stmt
, omp_context
*ctx
)
6181 struct function
*child_cfun
;
6182 tree child_fn
, t
, c
, src
, dst
, f
, sf
, arg
, sarg
, decl
;
6183 tree record_type
, srecord_type
, bind
, list
;
6184 bool record_needs_remap
= false, srecord_needs_remap
= false;
6186 struct omp_taskcopy_context tcctx
;
6187 struct gimplify_ctx gctx
;
6188 location_t loc
= gimple_location (task_stmt
);
6190 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
6191 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
6192 gcc_assert (child_cfun
->cfg
== NULL
);
6193 child_cfun
->dont_save_pending_sizes_p
= 1;
6194 DECL_SAVED_TREE (child_fn
) = alloc_stmt_list ();
6196 /* Reset DECL_CONTEXT on function arguments. */
6197 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= TREE_CHAIN (t
))
6198 DECL_CONTEXT (t
) = child_fn
;
6200 /* Populate the function. */
6201 push_gimplify_context (&gctx
);
6202 current_function_decl
= child_fn
;
6204 bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, NULL
);
6205 TREE_SIDE_EFFECTS (bind
) = 1;
6207 DECL_SAVED_TREE (child_fn
) = bind
;
6208 DECL_SOURCE_LOCATION (child_fn
) = gimple_location (task_stmt
);
6210 /* Remap src and dst argument types if needed. */
6211 record_type
= ctx
->record_type
;
6212 srecord_type
= ctx
->srecord_type
;
6213 for (f
= TYPE_FIELDS (record_type
); f
; f
= TREE_CHAIN (f
))
6214 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
6216 record_needs_remap
= true;
6219 for (f
= TYPE_FIELDS (srecord_type
); f
; f
= TREE_CHAIN (f
))
6220 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
6222 srecord_needs_remap
= true;
6226 if (record_needs_remap
|| srecord_needs_remap
)
6228 memset (&tcctx
, '\0', sizeof (tcctx
));
6229 tcctx
.cb
.src_fn
= ctx
->cb
.src_fn
;
6230 tcctx
.cb
.dst_fn
= child_fn
;
6231 tcctx
.cb
.src_node
= cgraph_node (tcctx
.cb
.src_fn
);
6232 tcctx
.cb
.dst_node
= tcctx
.cb
.src_node
;
6233 tcctx
.cb
.src_cfun
= ctx
->cb
.src_cfun
;
6234 tcctx
.cb
.copy_decl
= task_copyfn_copy_decl
;
6235 tcctx
.cb
.eh_lp_nr
= 0;
6236 tcctx
.cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
6237 tcctx
.cb
.decl_map
= pointer_map_create ();
6240 if (record_needs_remap
)
6241 record_type
= task_copyfn_remap_type (&tcctx
, record_type
);
6242 if (srecord_needs_remap
)
6243 srecord_type
= task_copyfn_remap_type (&tcctx
, srecord_type
);
6246 tcctx
.cb
.decl_map
= NULL
;
6248 push_cfun (child_cfun
);
6250 arg
= DECL_ARGUMENTS (child_fn
);
6251 TREE_TYPE (arg
) = build_pointer_type (record_type
);
6252 sarg
= TREE_CHAIN (arg
);
6253 TREE_TYPE (sarg
) = build_pointer_type (srecord_type
);
6255 /* First pass: initialize temporaries used in record_type and srecord_type
6256 sizes and field offsets. */
6257 if (tcctx
.cb
.decl_map
)
6258 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6259 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
6263 decl
= OMP_CLAUSE_DECL (c
);
6264 p
= (tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, decl
);
6267 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6268 sf
= (tree
) n
->value
;
6269 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6270 src
= build_fold_indirect_ref_loc (loc
, sarg
);
6271 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6272 t
= build2 (MODIFY_EXPR
, TREE_TYPE (*p
), *p
, src
);
6273 append_to_statement_list (t
, &list
);
6276 /* Second pass: copy shared var pointers and copy construct non-VLA
6277 firstprivate vars. */
6278 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6279 switch (OMP_CLAUSE_CODE (c
))
6281 case OMP_CLAUSE_SHARED
:
6282 decl
= OMP_CLAUSE_DECL (c
);
6283 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6286 f
= (tree
) n
->value
;
6287 if (tcctx
.cb
.decl_map
)
6288 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6289 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6290 sf
= (tree
) n
->value
;
6291 if (tcctx
.cb
.decl_map
)
6292 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6293 src
= build_fold_indirect_ref_loc (loc
, sarg
);
6294 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6295 dst
= build_fold_indirect_ref_loc (loc
, arg
);
6296 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6297 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
6298 append_to_statement_list (t
, &list
);
6300 case OMP_CLAUSE_FIRSTPRIVATE
:
6301 decl
= OMP_CLAUSE_DECL (c
);
6302 if (is_variable_sized (decl
))
6304 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6307 f
= (tree
) n
->value
;
6308 if (tcctx
.cb
.decl_map
)
6309 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6310 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6313 sf
= (tree
) n
->value
;
6314 if (tcctx
.cb
.decl_map
)
6315 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6316 src
= build_fold_indirect_ref_loc (loc
, sarg
);
6317 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6318 if (use_pointer_for_field (decl
, NULL
) || is_reference (decl
))
6319 src
= build_fold_indirect_ref_loc (loc
, src
);
6323 dst
= build_fold_indirect_ref_loc (loc
, arg
);
6324 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6325 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
6326 append_to_statement_list (t
, &list
);
6328 case OMP_CLAUSE_PRIVATE
:
6329 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
6331 decl
= OMP_CLAUSE_DECL (c
);
6332 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6333 f
= (tree
) n
->value
;
6334 if (tcctx
.cb
.decl_map
)
6335 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6336 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6339 sf
= (tree
) n
->value
;
6340 if (tcctx
.cb
.decl_map
)
6341 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6342 src
= build_fold_indirect_ref_loc (loc
, sarg
);
6343 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6344 if (use_pointer_for_field (decl
, NULL
))
6345 src
= build_fold_indirect_ref_loc (loc
, src
);
6349 dst
= build_fold_indirect_ref_loc (loc
, arg
);
6350 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6351 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
6352 append_to_statement_list (t
, &list
);
6358 /* Last pass: handle VLA firstprivates. */
6359 if (tcctx
.cb
.decl_map
)
6360 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6361 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
6365 decl
= OMP_CLAUSE_DECL (c
);
6366 if (!is_variable_sized (decl
))
6368 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6371 f
= (tree
) n
->value
;
6372 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6373 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl
));
6374 ind
= DECL_VALUE_EXPR (decl
);
6375 gcc_assert (TREE_CODE (ind
) == INDIRECT_REF
);
6376 gcc_assert (DECL_P (TREE_OPERAND (ind
, 0)));
6377 n
= splay_tree_lookup (ctx
->sfield_map
,
6378 (splay_tree_key
) TREE_OPERAND (ind
, 0));
6379 sf
= (tree
) n
->value
;
6380 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6381 src
= build_fold_indirect_ref_loc (loc
, sarg
);
6382 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6383 src
= build_fold_indirect_ref_loc (loc
, src
);
6384 dst
= build_fold_indirect_ref_loc (loc
, arg
);
6385 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6386 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
6387 append_to_statement_list (t
, &list
);
6388 n
= splay_tree_lookup (ctx
->field_map
,
6389 (splay_tree_key
) TREE_OPERAND (ind
, 0));
6390 df
= (tree
) n
->value
;
6391 df
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, df
);
6392 ptr
= build_fold_indirect_ref_loc (loc
, arg
);
6393 ptr
= build3 (COMPONENT_REF
, TREE_TYPE (df
), ptr
, df
, NULL
);
6394 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ptr
), ptr
,
6395 build_fold_addr_expr_loc (loc
, dst
));
6396 append_to_statement_list (t
, &list
);
6399 t
= build1 (RETURN_EXPR
, void_type_node
, NULL
);
6400 append_to_statement_list (t
, &list
);
6402 if (tcctx
.cb
.decl_map
)
6403 pointer_map_destroy (tcctx
.cb
.decl_map
);
6404 pop_gimplify_context (NULL
);
6405 BIND_EXPR_BODY (bind
) = list
;
6407 current_function_decl
= ctx
->cb
.src_fn
;
6410 /* Lower the OpenMP parallel or task directive in the current statement
6411 in GSI_P. CTX holds context information for the directive. */
6414 lower_omp_taskreg (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6418 gimple stmt
= gsi_stmt (*gsi_p
);
6419 gimple par_bind
, bind
;
6420 gimple_seq par_body
, olist
, ilist
, par_olist
, par_ilist
, new_body
;
6421 struct gimplify_ctx gctx
;
6422 location_t loc
= gimple_location (stmt
);
6424 clauses
= gimple_omp_taskreg_clauses (stmt
);
6425 par_bind
= gimple_seq_first_stmt (gimple_omp_body (stmt
));
6426 par_body
= gimple_bind_body (par_bind
);
6427 child_fn
= ctx
->cb
.dst_fn
;
6428 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
6429 && !gimple_omp_parallel_combined_p (stmt
))
6431 struct walk_stmt_info wi
;
6434 memset (&wi
, 0, sizeof (wi
));
6437 walk_gimple_seq (par_body
, check_combined_parallel
, NULL
, &wi
);
6439 gimple_omp_parallel_set_combined_p (stmt
, true);
6441 if (ctx
->srecord_type
)
6442 create_task_copyfn (stmt
, ctx
);
6444 push_gimplify_context (&gctx
);
6448 lower_rec_input_clauses (clauses
, &par_ilist
, &par_olist
, ctx
);
6449 lower_omp (par_body
, ctx
);
6450 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
)
6451 lower_reduction_clauses (clauses
, &par_olist
, ctx
);
6453 /* Declare all the variables created by mapping and the variables
6454 declared in the scope of the parallel body. */
6455 record_vars_into (ctx
->block_vars
, child_fn
);
6456 record_vars_into (gimple_bind_vars (par_bind
), child_fn
);
6458 if (ctx
->record_type
)
6461 = create_tmp_var (ctx
->srecord_type
? ctx
->srecord_type
6462 : ctx
->record_type
, ".omp_data_o");
6463 TREE_ADDRESSABLE (ctx
->sender_decl
) = 1;
6464 gimple_omp_taskreg_set_data_arg (stmt
, ctx
->sender_decl
);
6469 lower_send_clauses (clauses
, &ilist
, &olist
, ctx
);
6470 lower_send_shared_vars (&ilist
, &olist
, ctx
);
6472 /* Once all the expansions are done, sequence all the different
6473 fragments inside gimple_omp_body. */
6477 if (ctx
->record_type
)
6479 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
6480 /* fixup_child_record_type might have changed receiver_decl's type. */
6481 t
= fold_convert_loc (loc
, TREE_TYPE (ctx
->receiver_decl
), t
);
6482 gimple_seq_add_stmt (&new_body
,
6483 gimple_build_assign (ctx
->receiver_decl
, t
));
6486 gimple_seq_add_seq (&new_body
, par_ilist
);
6487 gimple_seq_add_seq (&new_body
, par_body
);
6488 gimple_seq_add_seq (&new_body
, par_olist
);
6489 new_body
= maybe_catch_exception (new_body
);
6490 gimple_seq_add_stmt (&new_body
, gimple_build_omp_return (false));
6491 gimple_omp_set_body (stmt
, new_body
);
6493 bind
= gimple_build_bind (NULL
, NULL
, gimple_bind_block (par_bind
));
6494 gimple_bind_add_stmt (bind
, stmt
);
6497 gimple_seq_add_stmt (&ilist
, bind
);
6498 gimple_seq_add_seq (&ilist
, olist
);
6499 bind
= gimple_build_bind (NULL
, ilist
, NULL
);
6502 gsi_replace (gsi_p
, bind
, true);
6504 pop_gimplify_context (NULL
);
6507 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6508 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6509 of OpenMP context, but with task_shared_vars set. */
6512 lower_omp_regimplify_p (tree
*tp
, int *walk_subtrees
,
6517 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6518 if (TREE_CODE (t
) == VAR_DECL
&& data
== NULL
&& DECL_HAS_VALUE_EXPR_P (t
))
6521 if (task_shared_vars
6523 && bitmap_bit_p (task_shared_vars
, DECL_UID (t
)))
6526 /* If a global variable has been privatized, TREE_CONSTANT on
6527 ADDR_EXPR might be wrong. */
6528 if (data
== NULL
&& TREE_CODE (t
) == ADDR_EXPR
)
6529 recompute_tree_invariant_for_addr_expr (t
);
6531 *walk_subtrees
= !TYPE_P (t
) && !DECL_P (t
);
6536 lower_omp_1 (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6538 gimple stmt
= gsi_stmt (*gsi_p
);
6539 struct walk_stmt_info wi
;
6541 if (gimple_has_location (stmt
))
6542 input_location
= gimple_location (stmt
);
6544 if (task_shared_vars
)
6545 memset (&wi
, '\0', sizeof (wi
));
6547 /* If we have issued syntax errors, avoid doing any heavy lifting.
6548 Just replace the OpenMP directives with a NOP to avoid
6549 confusing RTL expansion. */
6550 if (seen_error () && is_gimple_omp (stmt
))
6552 gsi_replace (gsi_p
, gimple_build_nop (), true);
6556 switch (gimple_code (stmt
))
6559 if ((ctx
|| task_shared_vars
)
6560 && (walk_tree (gimple_cond_lhs_ptr (stmt
), lower_omp_regimplify_p
,
6561 ctx
? NULL
: &wi
, NULL
)
6562 || walk_tree (gimple_cond_rhs_ptr (stmt
), lower_omp_regimplify_p
,
6563 ctx
? NULL
: &wi
, NULL
)))
6564 gimple_regimplify_operands (stmt
, gsi_p
);
6567 lower_omp (gimple_catch_handler (stmt
), ctx
);
6569 case GIMPLE_EH_FILTER
:
6570 lower_omp (gimple_eh_filter_failure (stmt
), ctx
);
6573 lower_omp (gimple_try_eval (stmt
), ctx
);
6574 lower_omp (gimple_try_cleanup (stmt
), ctx
);
6577 lower_omp (gimple_bind_body (stmt
), ctx
);
6579 case GIMPLE_OMP_PARALLEL
:
6580 case GIMPLE_OMP_TASK
:
6581 ctx
= maybe_lookup_ctx (stmt
);
6582 lower_omp_taskreg (gsi_p
, ctx
);
6584 case GIMPLE_OMP_FOR
:
6585 ctx
= maybe_lookup_ctx (stmt
);
6587 lower_omp_for (gsi_p
, ctx
);
6589 case GIMPLE_OMP_SECTIONS
:
6590 ctx
= maybe_lookup_ctx (stmt
);
6592 lower_omp_sections (gsi_p
, ctx
);
6594 case GIMPLE_OMP_SINGLE
:
6595 ctx
= maybe_lookup_ctx (stmt
);
6597 lower_omp_single (gsi_p
, ctx
);
6599 case GIMPLE_OMP_MASTER
:
6600 ctx
= maybe_lookup_ctx (stmt
);
6602 lower_omp_master (gsi_p
, ctx
);
6604 case GIMPLE_OMP_ORDERED
:
6605 ctx
= maybe_lookup_ctx (stmt
);
6607 lower_omp_ordered (gsi_p
, ctx
);
6609 case GIMPLE_OMP_CRITICAL
:
6610 ctx
= maybe_lookup_ctx (stmt
);
6612 lower_omp_critical (gsi_p
, ctx
);
6614 case GIMPLE_OMP_ATOMIC_LOAD
:
6615 if ((ctx
|| task_shared_vars
)
6616 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt
),
6617 lower_omp_regimplify_p
, ctx
? NULL
: &wi
, NULL
))
6618 gimple_regimplify_operands (stmt
, gsi_p
);
6621 if ((ctx
|| task_shared_vars
)
6622 && walk_gimple_op (stmt
, lower_omp_regimplify_p
,
6624 gimple_regimplify_operands (stmt
, gsi_p
);
6630 lower_omp (gimple_seq body
, omp_context
*ctx
)
6632 location_t saved_location
= input_location
;
6633 gimple_stmt_iterator gsi
= gsi_start (body
);
6634 for (gsi
= gsi_start (body
); !gsi_end_p (gsi
); gsi_next (&gsi
))
6635 lower_omp_1 (&gsi
, ctx
);
6636 input_location
= saved_location
;
6639 /* Main entry point. */
6642 execute_lower_omp (void)
6646 /* This pass always runs, to provide PROP_gimple_lomp.
6647 But there is nothing to do unless -fopenmp is given. */
6648 if (flag_openmp
== 0)
6651 all_contexts
= splay_tree_new (splay_tree_compare_pointers
, 0,
6652 delete_omp_context
);
6654 body
= gimple_body (current_function_decl
);
6655 scan_omp (body
, NULL
);
6656 gcc_assert (taskreg_nesting_level
== 0);
6658 if (all_contexts
->root
)
6660 struct gimplify_ctx gctx
;
6662 if (task_shared_vars
)
6663 push_gimplify_context (&gctx
);
6664 lower_omp (body
, NULL
);
6665 if (task_shared_vars
)
6666 pop_gimplify_context (NULL
);
6671 splay_tree_delete (all_contexts
);
6672 all_contexts
= NULL
;
6674 BITMAP_FREE (task_shared_vars
);
6678 struct gimple_opt_pass pass_lower_omp
=
6682 "omplower", /* name */
6684 execute_lower_omp
, /* execute */
6687 0, /* static_pass_number */
6688 TV_NONE
, /* tv_id */
6689 PROP_gimple_any
, /* properties_required */
6690 PROP_gimple_lomp
, /* properties_provided */
6691 0, /* properties_destroyed */
6692 0, /* todo_flags_start */
6693 TODO_dump_func
/* todo_flags_finish */
6697 /* The following is a utility to diagnose OpenMP structured block violations.
6698 It is not part of the "omplower" pass, as that's invoked too late. It
6699 should be invoked by the respective front ends after gimplification. */
6701 static splay_tree all_labels
;
6703 /* Check for mismatched contexts and generate an error if needed. Return
6704 true if an error is detected. */
6707 diagnose_sb_0 (gimple_stmt_iterator
*gsi_p
,
6708 gimple branch_ctx
, gimple label_ctx
)
6710 if (label_ctx
== branch_ctx
)
6715 Previously we kept track of the label's entire context in diagnose_sb_[12]
6716 so we could traverse it and issue a correct "exit" or "enter" error
6717 message upon a structured block violation.
6719 We built the context by building a list with tree_cons'ing, but there is
6720 no easy counterpart in gimple tuples. It seems like far too much work
6721 for issuing exit/enter error messages. If someone really misses the
6722 distinct error message... patches welcome.
6726 /* Try to avoid confusing the user by producing and error message
6727 with correct "exit" or "enter" verbiage. We prefer "exit"
6728 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6729 if (branch_ctx
== NULL
)
6735 if (TREE_VALUE (label_ctx
) == branch_ctx
)
6740 label_ctx
= TREE_CHAIN (label_ctx
);
6745 error ("invalid exit from OpenMP structured block");
6747 error ("invalid entry to OpenMP structured block");
6750 /* If it's obvious we have an invalid entry, be specific about the error. */
6751 if (branch_ctx
== NULL
)
6752 error ("invalid entry to OpenMP structured block");
6754 /* Otherwise, be vague and lazy, but efficient. */
6755 error ("invalid branch to/from an OpenMP structured block");
6757 gsi_replace (gsi_p
, gimple_build_nop (), false);
6761 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6762 where each label is found. */
6765 diagnose_sb_1 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
6766 struct walk_stmt_info
*wi
)
6768 gimple context
= (gimple
) wi
->info
;
6769 gimple inner_context
;
6770 gimple stmt
= gsi_stmt (*gsi_p
);
6772 *handled_ops_p
= true;
6774 switch (gimple_code (stmt
))
6778 case GIMPLE_OMP_PARALLEL
:
6779 case GIMPLE_OMP_TASK
:
6780 case GIMPLE_OMP_SECTIONS
:
6781 case GIMPLE_OMP_SINGLE
:
6782 case GIMPLE_OMP_SECTION
:
6783 case GIMPLE_OMP_MASTER
:
6784 case GIMPLE_OMP_ORDERED
:
6785 case GIMPLE_OMP_CRITICAL
:
6786 /* The minimal context here is just the current OMP construct. */
6787 inner_context
= stmt
;
6788 wi
->info
= inner_context
;
6789 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
6793 case GIMPLE_OMP_FOR
:
6794 inner_context
= stmt
;
6795 wi
->info
= inner_context
;
6796 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6798 walk_gimple_seq (gimple_omp_for_pre_body (stmt
),
6799 diagnose_sb_1
, NULL
, wi
);
6800 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
6805 splay_tree_insert (all_labels
, (splay_tree_key
) gimple_label_label (stmt
),
6806 (splay_tree_value
) context
);
6816 /* Pass 2: Check each branch and see if its context differs from that of
6817 the destination label's context. */
6820 diagnose_sb_2 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
6821 struct walk_stmt_info
*wi
)
6823 gimple context
= (gimple
) wi
->info
;
6825 gimple stmt
= gsi_stmt (*gsi_p
);
6827 *handled_ops_p
= true;
6829 switch (gimple_code (stmt
))
6833 case GIMPLE_OMP_PARALLEL
:
6834 case GIMPLE_OMP_TASK
:
6835 case GIMPLE_OMP_SECTIONS
:
6836 case GIMPLE_OMP_SINGLE
:
6837 case GIMPLE_OMP_SECTION
:
6838 case GIMPLE_OMP_MASTER
:
6839 case GIMPLE_OMP_ORDERED
:
6840 case GIMPLE_OMP_CRITICAL
:
6842 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_2
, NULL
, wi
);
6846 case GIMPLE_OMP_FOR
:
6848 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6850 walk_gimple_seq (gimple_omp_for_pre_body (stmt
),
6851 diagnose_sb_2
, NULL
, wi
);
6852 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_2
, NULL
, wi
);
6858 tree lab
= gimple_cond_true_label (stmt
);
6861 n
= splay_tree_lookup (all_labels
,
6862 (splay_tree_key
) lab
);
6863 diagnose_sb_0 (gsi_p
, context
,
6864 n
? (gimple
) n
->value
: NULL
);
6866 lab
= gimple_cond_false_label (stmt
);
6869 n
= splay_tree_lookup (all_labels
,
6870 (splay_tree_key
) lab
);
6871 diagnose_sb_0 (gsi_p
, context
,
6872 n
? (gimple
) n
->value
: NULL
);
6879 tree lab
= gimple_goto_dest (stmt
);
6880 if (TREE_CODE (lab
) != LABEL_DECL
)
6883 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
6884 diagnose_sb_0 (gsi_p
, context
, n
? (gimple
) n
->value
: NULL
);
6891 for (i
= 0; i
< gimple_switch_num_labels (stmt
); ++i
)
6893 tree lab
= CASE_LABEL (gimple_switch_label (stmt
, i
));
6894 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
6895 if (n
&& diagnose_sb_0 (gsi_p
, context
, (gimple
) n
->value
))
6902 diagnose_sb_0 (gsi_p
, context
, NULL
);
6913 diagnose_omp_structured_block_errors (void)
6915 struct walk_stmt_info wi
;
6916 gimple_seq body
= gimple_body (current_function_decl
);
6918 all_labels
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
6920 memset (&wi
, 0, sizeof (wi
));
6921 walk_gimple_seq (body
, diagnose_sb_1
, NULL
, &wi
);
6923 memset (&wi
, 0, sizeof (wi
));
6924 wi
.want_locations
= true;
6925 walk_gimple_seq (body
, diagnose_sb_2
, NULL
, &wi
);
6927 splay_tree_delete (all_labels
);
6934 gate_diagnose_omp_blocks (void)
6936 return flag_openmp
!= 0;
6939 struct gimple_opt_pass pass_diagnose_omp_blocks
=
6943 "*diagnose_omp_blocks", /* name */
6944 gate_diagnose_omp_blocks
, /* gate */
6945 diagnose_omp_structured_block_errors
, /* execute */
6948 0, /* static_pass_number */
6949 TV_NONE
, /* tv_id */
6950 PROP_gimple_any
, /* properties_required */
6951 0, /* properties_provided */
6952 0, /* properties_destroyed */
6953 0, /* todo_flags_start */
6954 0, /* todo_flags_finish */
6958 #include "gt-omp-low.h"