]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/omp-low.c
gcc:
[thirdparty/gcc.git] / gcc / omp-low.c
CommitLineData
1e8e9920 1/* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
711789cc 6 Copyright (C) 2005-2013 Free Software Foundation, Inc.
1e8e9920 7
8This file is part of GCC.
9
10GCC is free software; you can redistribute it and/or modify it under
11the terms of the GNU General Public License as published by the Free
8c4c00c1 12Software Foundation; either version 3, or (at your option) any later
1e8e9920 13version.
14
15GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16WARRANTY; without even the implied warranty of MERCHANTABILITY or
17FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18for more details.
19
20You should have received a copy of the GNU General Public License
8c4c00c1 21along with GCC; see the file COPYING3. If not see
22<http://www.gnu.org/licenses/>. */
1e8e9920 23
24#include "config.h"
25#include "system.h"
26#include "coretypes.h"
27#include "tm.h"
28#include "tree.h"
29#include "rtl.h"
e795d6e1 30#include "gimple.h"
a8783bee 31#include "gimplify.h"
dcf1a1ec 32#include "gimple-iterator.h"
e795d6e1 33#include "gimplify-me.h"
dcf1a1ec 34#include "gimple-walk.h"
75a70cf9 35#include "tree-iterator.h"
1e8e9920 36#include "tree-inline.h"
37#include "langhooks.h"
852f689e 38#include "diagnostic-core.h"
073c1fd5 39#include "gimple-ssa.h"
40#include "cgraph.h"
41#include "tree-cfg.h"
42#include "tree-phinodes.h"
43#include "ssa-iterators.h"
44#include "tree-ssanames.h"
45#include "tree-into-ssa.h"
46#include "tree-dfa.h"
69ee5dbb 47#include "tree-ssa.h"
1e8e9920 48#include "flags.h"
49#include "function.h"
50#include "expr.h"
1e8e9920 51#include "tree-pass.h"
52#include "ggc.h"
53#include "except.h"
e3022db7 54#include "splay-tree.h"
cb7f680b 55#include "optabs.h"
56#include "cfgloop.h"
3d483a94 57#include "target.h"
7740abd8 58#include "omp-low.h"
424a4a92 59#include "gimple-low.h"
60#include "tree-cfgcleanup.h"
1e8e9920 61
75a70cf9 62
48e1416a 63/* Lowering of OpenMP parallel and workshare constructs proceeds in two
1e8e9920 64 phases. The first phase scans the function looking for OMP statements
65 and then for variables that must be replaced to satisfy data sharing
66 clauses. The second phase expands code for the constructs, as well as
334ec2d8 67 re-gimplifying things when variables have been replaced with complex
1e8e9920 68 expressions.
69
d134bccc 70 Final code generation is done by pass_expand_omp. The flowgraph is
71 scanned for parallel regions which are then moved to a new
72 function, to be invoked by the thread library. */
1e8e9920 73
7740abd8 74/* Parallel region information. Every parallel and workshare
75 directive is enclosed between two markers, the OMP_* directive
76 and a corresponding OMP_RETURN statement. */
77
78struct omp_region
79{
80 /* The enclosing region. */
81 struct omp_region *outer;
82
83 /* First child region. */
84 struct omp_region *inner;
85
86 /* Next peer region. */
87 struct omp_region *next;
88
89 /* Block containing the omp directive as its last stmt. */
90 basic_block entry;
91
92 /* Block containing the OMP_RETURN as its last stmt. */
93 basic_block exit;
94
95 /* Block containing the OMP_CONTINUE as its last stmt. */
96 basic_block cont;
97
98 /* If this is a combined parallel+workshare region, this is a list
99 of additional arguments needed by the combined parallel+workshare
100 library call. */
101 vec<tree, va_gc> *ws_args;
102
103 /* The code for the omp directive of this region. */
104 enum gimple_code type;
105
106 /* Schedule kind, only used for OMP_FOR type regions. */
107 enum omp_clause_schedule_kind sched_kind;
108
109 /* True if this is a combined parallel+workshare region. */
110 bool is_combined_parallel;
111};
112
1e8e9920 113/* Context structure. Used to store information about each parallel
114 directive in the code. */
115
116typedef struct omp_context
117{
118 /* This field must be at the beginning, as we do "inheritance": Some
119 callback functions for tree-inline.c (e.g., omp_copy_decl)
120 receive a copy_body_data pointer that is up-casted to an
121 omp_context pointer. */
122 copy_body_data cb;
123
124 /* The tree of contexts corresponding to the encountered constructs. */
125 struct omp_context *outer;
75a70cf9 126 gimple stmt;
1e8e9920 127
48e1416a 128 /* Map variables to fields in a structure that allows communication
1e8e9920 129 between sending and receiving threads. */
130 splay_tree field_map;
131 tree record_type;
132 tree sender_decl;
133 tree receiver_decl;
134
fd6481cf 135 /* These are used just by task contexts, if task firstprivate fn is
136 needed. srecord_type is used to communicate from the thread
137 that encountered the task construct to task firstprivate fn,
138 record_type is allocated by GOMP_task, initialized by task firstprivate
139 fn and passed to the task body fn. */
140 splay_tree sfield_map;
141 tree srecord_type;
142
1e8e9920 143 /* A chain of variables to add to the top-level block surrounding the
144 construct. In the case of a parallel, this is in the child function. */
145 tree block_vars;
146
bc7bff74 147 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
148 barriers should jump to during omplower pass. */
149 tree cancel_label;
150
1e8e9920 151 /* What to do with variables with implicitly determined sharing
152 attributes. */
153 enum omp_clause_default_kind default_kind;
154
155 /* Nesting depth of this context. Used to beautify error messages re
156 invalid gotos. The outermost ctx is depth 1, with depth 0 being
157 reserved for the main body of the function. */
158 int depth;
159
1e8e9920 160 /* True if this parallel directive is nested within another. */
161 bool is_nested;
bc7bff74 162
163 /* True if this construct can be cancelled. */
164 bool cancellable;
1e8e9920 165} omp_context;
166
167
fd6481cf 168struct omp_for_data_loop
169{
170 tree v, n1, n2, step;
171 enum tree_code cond_code;
172};
173
773c5ba7 174/* A structure describing the main elements of a parallel loop. */
1e8e9920 175
773c5ba7 176struct omp_for_data
1e8e9920 177{
fd6481cf 178 struct omp_for_data_loop loop;
75a70cf9 179 tree chunk_size;
180 gimple for_stmt;
fd6481cf 181 tree pre, iter_type;
182 int collapse;
1e8e9920 183 bool have_nowait, have_ordered;
184 enum omp_clause_schedule_kind sched_kind;
fd6481cf 185 struct omp_for_data_loop *loops;
1e8e9920 186};
187
773c5ba7 188
1e8e9920 189static splay_tree all_contexts;
fd6481cf 190static int taskreg_nesting_level;
bc7bff74 191static int target_nesting_level;
7740abd8 192static struct omp_region *root_omp_region;
fd6481cf 193static bitmap task_shared_vars;
1e8e9920 194
ab129075 195static void scan_omp (gimple_seq *, omp_context *);
75a70cf9 196static tree scan_omp_1_op (tree *, int *, void *);
197
198#define WALK_SUBSTMTS \
199 case GIMPLE_BIND: \
200 case GIMPLE_TRY: \
201 case GIMPLE_CATCH: \
202 case GIMPLE_EH_FILTER: \
4c0315d0 203 case GIMPLE_TRANSACTION: \
75a70cf9 204 /* The sub-statements for these should be walked. */ \
205 *handled_ops_p = false; \
206 break;
207
208/* Convenience function for calling scan_omp_1_op on tree operands. */
209
210static inline tree
211scan_omp_op (tree *tp, omp_context *ctx)
212{
213 struct walk_stmt_info wi;
214
215 memset (&wi, 0, sizeof (wi));
216 wi.info = ctx;
217 wi.want_locations = true;
218
219 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
220}
221
e3a19533 222static void lower_omp (gimple_seq *, omp_context *);
f49d7bb5 223static tree lookup_decl_in_outer_ctx (tree, omp_context *);
224static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
1e8e9920 225
226/* Find an OpenMP clause of type KIND within CLAUSES. */
227
79acaae1 228tree
590c3166 229find_omp_clause (tree clauses, enum omp_clause_code kind)
1e8e9920 230{
231 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
55d6e7cd 232 if (OMP_CLAUSE_CODE (clauses) == kind)
1e8e9920 233 return clauses;
234
235 return NULL_TREE;
236}
237
238/* Return true if CTX is for an omp parallel. */
239
240static inline bool
241is_parallel_ctx (omp_context *ctx)
242{
75a70cf9 243 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
1e8e9920 244}
245
773c5ba7 246
fd6481cf 247/* Return true if CTX is for an omp task. */
248
249static inline bool
250is_task_ctx (omp_context *ctx)
251{
75a70cf9 252 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
fd6481cf 253}
254
255
256/* Return true if CTX is for an omp parallel or omp task. */
257
258static inline bool
259is_taskreg_ctx (omp_context *ctx)
260{
75a70cf9 261 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
262 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
fd6481cf 263}
264
265
773c5ba7 266/* Return true if REGION is a combined parallel+workshare region. */
1e8e9920 267
268static inline bool
773c5ba7 269is_combined_parallel (struct omp_region *region)
270{
271 return region->is_combined_parallel;
272}
273
274
275/* Extract the header elements of parallel loop FOR_STMT and store
276 them into *FD. */
277
278static void
75a70cf9 279extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
fd6481cf 280 struct omp_for_data_loop *loops)
773c5ba7 281{
fd6481cf 282 tree t, var, *collapse_iter, *collapse_count;
283 tree count = NULL_TREE, iter_type = long_integer_type_node;
284 struct omp_for_data_loop *loop;
285 int i;
286 struct omp_for_data_loop dummy_loop;
389dd41b 287 location_t loc = gimple_location (for_stmt);
3d483a94 288 bool simd = gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_SIMD;
bc7bff74 289 bool distribute = gimple_omp_for_kind (for_stmt)
290 == GF_OMP_FOR_KIND_DISTRIBUTE;
773c5ba7 291
292 fd->for_stmt = for_stmt;
293 fd->pre = NULL;
75a70cf9 294 fd->collapse = gimple_omp_for_collapse (for_stmt);
fd6481cf 295 if (fd->collapse > 1)
296 fd->loops = loops;
297 else
298 fd->loops = &fd->loop;
773c5ba7 299
bc7bff74 300 fd->have_nowait = distribute || simd;
301 fd->have_ordered = false;
773c5ba7 302 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
303 fd->chunk_size = NULL_TREE;
fd6481cf 304 collapse_iter = NULL;
305 collapse_count = NULL;
773c5ba7 306
75a70cf9 307 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
55d6e7cd 308 switch (OMP_CLAUSE_CODE (t))
773c5ba7 309 {
310 case OMP_CLAUSE_NOWAIT:
311 fd->have_nowait = true;
312 break;
313 case OMP_CLAUSE_ORDERED:
314 fd->have_ordered = true;
315 break;
316 case OMP_CLAUSE_SCHEDULE:
bc7bff74 317 gcc_assert (!distribute);
773c5ba7 318 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
319 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
320 break;
bc7bff74 321 case OMP_CLAUSE_DIST_SCHEDULE:
322 gcc_assert (distribute);
323 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
324 break;
fd6481cf 325 case OMP_CLAUSE_COLLAPSE:
326 if (fd->collapse > 1)
327 {
328 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
329 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
330 }
773c5ba7 331 default:
332 break;
333 }
334
fd6481cf 335 /* FIXME: for now map schedule(auto) to schedule(static).
336 There should be analysis to determine whether all iterations
337 are approximately the same amount of work (then schedule(static)
bde357c8 338 is best) or if it varies (then schedule(dynamic,N) is better). */
fd6481cf 339 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
340 {
341 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
342 gcc_assert (fd->chunk_size == NULL);
343 }
344 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
773c5ba7 345 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
346 gcc_assert (fd->chunk_size == NULL);
347 else if (fd->chunk_size == NULL)
348 {
349 /* We only need to compute a default chunk size for ordered
350 static loops and dynamic loops. */
fd6481cf 351 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
bc7bff74 352 || fd->have_ordered)
773c5ba7 353 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
354 ? integer_zero_node : integer_one_node;
355 }
fd6481cf 356
357 for (i = 0; i < fd->collapse; i++)
358 {
359 if (fd->collapse == 1)
360 loop = &fd->loop;
361 else if (loops != NULL)
362 loop = loops + i;
363 else
364 loop = &dummy_loop;
365
75a70cf9 366 loop->v = gimple_omp_for_index (for_stmt, i);
fd6481cf 367 gcc_assert (SSA_VAR_P (loop->v));
368 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
369 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
370 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
75a70cf9 371 loop->n1 = gimple_omp_for_initial (for_stmt, i);
fd6481cf 372
75a70cf9 373 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
374 loop->n2 = gimple_omp_for_final (for_stmt, i);
fd6481cf 375 switch (loop->cond_code)
376 {
377 case LT_EXPR:
378 case GT_EXPR:
379 break;
380 case LE_EXPR:
381 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
2cc66f2a 382 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
fd6481cf 383 else
389dd41b 384 loop->n2 = fold_build2_loc (loc,
385 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
fd6481cf 386 build_int_cst (TREE_TYPE (loop->n2), 1));
387 loop->cond_code = LT_EXPR;
388 break;
389 case GE_EXPR:
390 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
2cc66f2a 391 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
fd6481cf 392 else
389dd41b 393 loop->n2 = fold_build2_loc (loc,
394 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
fd6481cf 395 build_int_cst (TREE_TYPE (loop->n2), 1));
396 loop->cond_code = GT_EXPR;
397 break;
398 default:
399 gcc_unreachable ();
400 }
401
75a70cf9 402 t = gimple_omp_for_incr (for_stmt, i);
fd6481cf 403 gcc_assert (TREE_OPERAND (t, 0) == var);
404 switch (TREE_CODE (t))
405 {
406 case PLUS_EXPR:
fd6481cf 407 loop->step = TREE_OPERAND (t, 1);
408 break;
85d86b55 409 case POINTER_PLUS_EXPR:
410 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
411 break;
fd6481cf 412 case MINUS_EXPR:
413 loop->step = TREE_OPERAND (t, 1);
389dd41b 414 loop->step = fold_build1_loc (loc,
415 NEGATE_EXPR, TREE_TYPE (loop->step),
fd6481cf 416 loop->step);
417 break;
418 default:
419 gcc_unreachable ();
420 }
421
bc7bff74 422 if (simd
423 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
424 && !fd->have_ordered))
3d483a94 425 {
426 if (fd->collapse == 1)
427 iter_type = TREE_TYPE (loop->v);
428 else if (i == 0
429 || TYPE_PRECISION (iter_type)
430 < TYPE_PRECISION (TREE_TYPE (loop->v)))
431 iter_type
432 = build_nonstandard_integer_type
bc7bff74 433 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
3d483a94 434 }
435 else if (iter_type != long_long_unsigned_type_node)
fd6481cf 436 {
437 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
438 iter_type = long_long_unsigned_type_node;
439 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
440 && TYPE_PRECISION (TREE_TYPE (loop->v))
441 >= TYPE_PRECISION (iter_type))
442 {
443 tree n;
444
445 if (loop->cond_code == LT_EXPR)
389dd41b 446 n = fold_build2_loc (loc,
447 PLUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 448 loop->n2, loop->step);
449 else
450 n = loop->n1;
451 if (TREE_CODE (n) != INTEGER_CST
452 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
453 iter_type = long_long_unsigned_type_node;
454 }
455 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
456 > TYPE_PRECISION (iter_type))
457 {
458 tree n1, n2;
459
460 if (loop->cond_code == LT_EXPR)
461 {
462 n1 = loop->n1;
389dd41b 463 n2 = fold_build2_loc (loc,
464 PLUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 465 loop->n2, loop->step);
466 }
467 else
468 {
389dd41b 469 n1 = fold_build2_loc (loc,
470 MINUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 471 loop->n2, loop->step);
472 n2 = loop->n1;
473 }
474 if (TREE_CODE (n1) != INTEGER_CST
475 || TREE_CODE (n2) != INTEGER_CST
476 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
477 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
478 iter_type = long_long_unsigned_type_node;
479 }
480 }
481
482 if (collapse_count && *collapse_count == NULL)
483 {
8e6b4515 484 t = fold_binary (loop->cond_code, boolean_type_node,
485 fold_convert (TREE_TYPE (loop->v), loop->n1),
486 fold_convert (TREE_TYPE (loop->v), loop->n2));
487 if (t && integer_zerop (t))
488 count = build_zero_cst (long_long_unsigned_type_node);
489 else if ((i == 0 || count != NULL_TREE)
490 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
491 && TREE_CONSTANT (loop->n1)
492 && TREE_CONSTANT (loop->n2)
493 && TREE_CODE (loop->step) == INTEGER_CST)
fd6481cf 494 {
495 tree itype = TREE_TYPE (loop->v);
496
497 if (POINTER_TYPE_P (itype))
3cea8318 498 itype = signed_type_for (itype);
fd6481cf 499 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
389dd41b 500 t = fold_build2_loc (loc,
501 PLUS_EXPR, itype,
502 fold_convert_loc (loc, itype, loop->step), t);
503 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
504 fold_convert_loc (loc, itype, loop->n2));
505 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
506 fold_convert_loc (loc, itype, loop->n1));
fd6481cf 507 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
389dd41b 508 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
509 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
510 fold_build1_loc (loc, NEGATE_EXPR, itype,
511 fold_convert_loc (loc, itype,
512 loop->step)));
fd6481cf 513 else
389dd41b 514 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
515 fold_convert_loc (loc, itype, loop->step));
516 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
fd6481cf 517 if (count != NULL_TREE)
389dd41b 518 count = fold_build2_loc (loc,
519 MULT_EXPR, long_long_unsigned_type_node,
fd6481cf 520 count, t);
521 else
522 count = t;
523 if (TREE_CODE (count) != INTEGER_CST)
524 count = NULL_TREE;
525 }
8e6b4515 526 else if (count && !integer_zerop (count))
fd6481cf 527 count = NULL_TREE;
528 }
529 }
530
3d483a94 531 if (count
bc7bff74 532 && !simd
533 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
534 || fd->have_ordered))
fd6481cf 535 {
536 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
537 iter_type = long_long_unsigned_type_node;
538 else
539 iter_type = long_integer_type_node;
540 }
541 else if (collapse_iter && *collapse_iter != NULL)
542 iter_type = TREE_TYPE (*collapse_iter);
543 fd->iter_type = iter_type;
544 if (collapse_iter && *collapse_iter == NULL)
545 *collapse_iter = create_tmp_var (iter_type, ".iter");
546 if (collapse_count && *collapse_count == NULL)
547 {
548 if (count)
389dd41b 549 *collapse_count = fold_convert_loc (loc, iter_type, count);
fd6481cf 550 else
551 *collapse_count = create_tmp_var (iter_type, ".count");
552 }
553
554 if (fd->collapse > 1)
555 {
556 fd->loop.v = *collapse_iter;
557 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
558 fd->loop.n2 = *collapse_count;
559 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
560 fd->loop.cond_code = LT_EXPR;
561 }
773c5ba7 562}
563
564
565/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
566 is the immediate dominator of PAR_ENTRY_BB, return true if there
567 are no data dependencies that would prevent expanding the parallel
568 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
569
570 When expanding a combined parallel+workshare region, the call to
571 the child function may need additional arguments in the case of
75a70cf9 572 GIMPLE_OMP_FOR regions. In some cases, these arguments are
573 computed out of variables passed in from the parent to the child
574 via 'struct .omp_data_s'. For instance:
773c5ba7 575
576 #pragma omp parallel for schedule (guided, i * 4)
577 for (j ...)
578
579 Is lowered into:
580
581 # BLOCK 2 (PAR_ENTRY_BB)
582 .omp_data_o.i = i;
583 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
48e1416a 584
773c5ba7 585 # BLOCK 3 (WS_ENTRY_BB)
586 .omp_data_i = &.omp_data_o;
587 D.1667 = .omp_data_i->i;
588 D.1598 = D.1667 * 4;
589 #pragma omp for schedule (guided, D.1598)
590
591 When we outline the parallel region, the call to the child function
592 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
593 that value is computed *after* the call site. So, in principle we
594 cannot do the transformation.
595
596 To see whether the code in WS_ENTRY_BB blocks the combined
597 parallel+workshare call, we collect all the variables used in the
75a70cf9 598 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
773c5ba7 599 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
600 call.
601
602 FIXME. If we had the SSA form built at this point, we could merely
603 hoist the code in block 3 into block 2 and be done with it. But at
604 this point we don't have dataflow information and though we could
605 hack something up here, it is really not worth the aggravation. */
606
607static bool
f018d957 608workshare_safe_to_combine_p (basic_block ws_entry_bb)
773c5ba7 609{
610 struct omp_for_data fd;
f018d957 611 gimple ws_stmt = last_stmt (ws_entry_bb);
773c5ba7 612
75a70cf9 613 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
773c5ba7 614 return true;
615
75a70cf9 616 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
773c5ba7 617
fd6481cf 618 extract_omp_for_data (ws_stmt, &fd, NULL);
619
620 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
621 return false;
622 if (fd.iter_type != long_integer_type_node)
623 return false;
773c5ba7 624
625 /* FIXME. We give up too easily here. If any of these arguments
626 are not constants, they will likely involve variables that have
627 been mapped into fields of .omp_data_s for sharing with the child
628 function. With appropriate data flow, it would be possible to
629 see through this. */
fd6481cf 630 if (!is_gimple_min_invariant (fd.loop.n1)
631 || !is_gimple_min_invariant (fd.loop.n2)
632 || !is_gimple_min_invariant (fd.loop.step)
773c5ba7 633 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
634 return false;
635
636 return true;
637}
638
639
640/* Collect additional arguments needed to emit a combined
641 parallel+workshare call. WS_STMT is the workshare directive being
642 expanded. */
643
f1f41a6c 644static vec<tree, va_gc> *
bc7bff74 645get_ws_args_for (gimple par_stmt, gimple ws_stmt)
773c5ba7 646{
647 tree t;
389dd41b 648 location_t loc = gimple_location (ws_stmt);
f1f41a6c 649 vec<tree, va_gc> *ws_args;
773c5ba7 650
75a70cf9 651 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
773c5ba7 652 {
653 struct omp_for_data fd;
bc7bff74 654 tree n1, n2;
773c5ba7 655
fd6481cf 656 extract_omp_for_data (ws_stmt, &fd, NULL);
bc7bff74 657 n1 = fd.loop.n1;
658 n2 = fd.loop.n2;
659
660 if (gimple_omp_for_combined_into_p (ws_stmt))
661 {
662 tree innerc
663 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
664 OMP_CLAUSE__LOOPTEMP_);
665 gcc_assert (innerc);
666 n1 = OMP_CLAUSE_DECL (innerc);
667 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
668 OMP_CLAUSE__LOOPTEMP_);
669 gcc_assert (innerc);
670 n2 = OMP_CLAUSE_DECL (innerc);
671 }
773c5ba7 672
f1f41a6c 673 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
773c5ba7 674
bc7bff74 675 t = fold_convert_loc (loc, long_integer_type_node, n1);
f1f41a6c 676 ws_args->quick_push (t);
773c5ba7 677
bc7bff74 678 t = fold_convert_loc (loc, long_integer_type_node, n2);
f1f41a6c 679 ws_args->quick_push (t);
773c5ba7 680
414c3a2c 681 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
f1f41a6c 682 ws_args->quick_push (t);
414c3a2c 683
684 if (fd.chunk_size)
685 {
686 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
f1f41a6c 687 ws_args->quick_push (t);
414c3a2c 688 }
773c5ba7 689
690 return ws_args;
691 }
75a70cf9 692 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
773c5ba7 693 {
ac6e3339 694 /* Number of sections is equal to the number of edges from the
75a70cf9 695 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
696 the exit of the sections region. */
697 basic_block bb = single_succ (gimple_bb (ws_stmt));
ac6e3339 698 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
f1f41a6c 699 vec_alloc (ws_args, 1);
700 ws_args->quick_push (t);
414c3a2c 701 return ws_args;
773c5ba7 702 }
703
704 gcc_unreachable ();
705}
706
707
708/* Discover whether REGION is a combined parallel+workshare region. */
709
710static void
711determine_parallel_type (struct omp_region *region)
1e8e9920 712{
773c5ba7 713 basic_block par_entry_bb, par_exit_bb;
714 basic_block ws_entry_bb, ws_exit_bb;
715
03ed154b 716 if (region == NULL || region->inner == NULL
ac6e3339 717 || region->exit == NULL || region->inner->exit == NULL
718 || region->inner->cont == NULL)
773c5ba7 719 return;
720
721 /* We only support parallel+for and parallel+sections. */
75a70cf9 722 if (region->type != GIMPLE_OMP_PARALLEL
723 || (region->inner->type != GIMPLE_OMP_FOR
724 && region->inner->type != GIMPLE_OMP_SECTIONS))
773c5ba7 725 return;
726
727 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
728 WS_EXIT_BB -> PAR_EXIT_BB. */
61e47ac8 729 par_entry_bb = region->entry;
730 par_exit_bb = region->exit;
731 ws_entry_bb = region->inner->entry;
732 ws_exit_bb = region->inner->exit;
773c5ba7 733
734 if (single_succ (par_entry_bb) == ws_entry_bb
735 && single_succ (ws_exit_bb) == par_exit_bb
f018d957 736 && workshare_safe_to_combine_p (ws_entry_bb)
75a70cf9 737 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
de7ef844 738 || (last_and_only_stmt (ws_entry_bb)
739 && last_and_only_stmt (par_exit_bb))))
773c5ba7 740 {
bc7bff74 741 gimple par_stmt = last_stmt (par_entry_bb);
75a70cf9 742 gimple ws_stmt = last_stmt (ws_entry_bb);
61e47ac8 743
75a70cf9 744 if (region->inner->type == GIMPLE_OMP_FOR)
773c5ba7 745 {
746 /* If this is a combined parallel loop, we need to determine
747 whether or not to use the combined library calls. There
748 are two cases where we do not apply the transformation:
749 static loops and any kind of ordered loop. In the first
750 case, we already open code the loop so there is no need
751 to do anything else. In the latter case, the combined
752 parallel loop call would still need extra synchronization
753 to implement ordered semantics, so there would not be any
754 gain in using the combined call. */
75a70cf9 755 tree clauses = gimple_omp_for_clauses (ws_stmt);
773c5ba7 756 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
757 if (c == NULL
758 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
759 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
760 {
761 region->is_combined_parallel = false;
762 region->inner->is_combined_parallel = false;
763 return;
764 }
765 }
766
767 region->is_combined_parallel = true;
768 region->inner->is_combined_parallel = true;
bc7bff74 769 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
773c5ba7 770 }
1e8e9920 771}
772
773c5ba7 773
1e8e9920 774/* Return true if EXPR is variable sized. */
775
776static inline bool
1f1872fd 777is_variable_sized (const_tree expr)
1e8e9920 778{
779 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
780}
781
782/* Return true if DECL is a reference type. */
783
784static inline bool
785is_reference (tree decl)
786{
787 return lang_hooks.decls.omp_privatize_by_reference (decl);
788}
789
790/* Lookup variables in the decl or field splay trees. The "maybe" form
791 allows for the variable form to not have been entered, otherwise we
792 assert that the variable must have been entered. */
793
794static inline tree
795lookup_decl (tree var, omp_context *ctx)
796{
e3022db7 797 tree *n;
798 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
799 return *n;
1e8e9920 800}
801
802static inline tree
e8a588af 803maybe_lookup_decl (const_tree var, omp_context *ctx)
1e8e9920 804{
e3022db7 805 tree *n;
806 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
807 return n ? *n : NULL_TREE;
1e8e9920 808}
809
810static inline tree
811lookup_field (tree var, omp_context *ctx)
812{
813 splay_tree_node n;
814 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
815 return (tree) n->value;
816}
817
fd6481cf 818static inline tree
819lookup_sfield (tree var, omp_context *ctx)
820{
821 splay_tree_node n;
822 n = splay_tree_lookup (ctx->sfield_map
823 ? ctx->sfield_map : ctx->field_map,
824 (splay_tree_key) var);
825 return (tree) n->value;
826}
827
1e8e9920 828static inline tree
829maybe_lookup_field (tree var, omp_context *ctx)
830{
831 splay_tree_node n;
832 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
833 return n ? (tree) n->value : NULL_TREE;
834}
835
e8a588af 836/* Return true if DECL should be copied by pointer. SHARED_CTX is
837 the parallel context if DECL is to be shared. */
1e8e9920 838
839static bool
fd6481cf 840use_pointer_for_field (tree decl, omp_context *shared_ctx)
1e8e9920 841{
842 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
843 return true;
844
554f2707 845 /* We can only use copy-in/copy-out semantics for shared variables
1e8e9920 846 when we know the value is not accessible from an outer scope. */
e8a588af 847 if (shared_ctx)
1e8e9920 848 {
849 /* ??? Trivially accessible from anywhere. But why would we even
850 be passing an address in this case? Should we simply assert
851 this to be false, or should we have a cleanup pass that removes
852 these from the list of mappings? */
853 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
854 return true;
855
856 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
857 without analyzing the expression whether or not its location
858 is accessible to anyone else. In the case of nested parallel
859 regions it certainly may be. */
df2c34fc 860 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
1e8e9920 861 return true;
862
863 /* Do not use copy-in/copy-out for variables that have their
864 address taken. */
865 if (TREE_ADDRESSABLE (decl))
866 return true;
e8a588af 867
b8214689 868 /* lower_send_shared_vars only uses copy-in, but not copy-out
869 for these. */
870 if (TREE_READONLY (decl)
871 || ((TREE_CODE (decl) == RESULT_DECL
872 || TREE_CODE (decl) == PARM_DECL)
873 && DECL_BY_REFERENCE (decl)))
874 return false;
875
e8a588af 876 /* Disallow copy-in/out in nested parallel if
877 decl is shared in outer parallel, otherwise
878 each thread could store the shared variable
879 in its own copy-in location, making the
880 variable no longer really shared. */
b8214689 881 if (shared_ctx->is_nested)
e8a588af 882 {
883 omp_context *up;
884
885 for (up = shared_ctx->outer; up; up = up->outer)
0cb159ec 886 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
e8a588af 887 break;
888
0cb159ec 889 if (up)
e8a588af 890 {
891 tree c;
892
75a70cf9 893 for (c = gimple_omp_taskreg_clauses (up->stmt);
e8a588af 894 c; c = OMP_CLAUSE_CHAIN (c))
895 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
896 && OMP_CLAUSE_DECL (c) == decl)
897 break;
898
899 if (c)
784ad964 900 goto maybe_mark_addressable_and_ret;
e8a588af 901 }
902 }
fd6481cf 903
b8214689 904 /* For tasks avoid using copy-in/out. As tasks can be
fd6481cf 905 deferred or executed in different thread, when GOMP_task
906 returns, the task hasn't necessarily terminated. */
b8214689 907 if (is_task_ctx (shared_ctx))
fd6481cf 908 {
784ad964 909 tree outer;
910 maybe_mark_addressable_and_ret:
911 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
fd6481cf 912 if (is_gimple_reg (outer))
913 {
914 /* Taking address of OUTER in lower_send_shared_vars
915 might need regimplification of everything that uses the
916 variable. */
917 if (!task_shared_vars)
918 task_shared_vars = BITMAP_ALLOC (NULL);
919 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
920 TREE_ADDRESSABLE (outer) = 1;
921 }
922 return true;
923 }
1e8e9920 924 }
925
926 return false;
927}
928
79acaae1 929/* Construct a new automatic decl similar to VAR. */
930
931static tree
932omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
933{
934 tree copy = copy_var_decl (var, name, type);
935
936 DECL_CONTEXT (copy) = current_function_decl;
1767a056 937 DECL_CHAIN (copy) = ctx->block_vars;
1e8e9920 938 ctx->block_vars = copy;
939
940 return copy;
941}
942
943static tree
944omp_copy_decl_1 (tree var, omp_context *ctx)
945{
946 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
947}
948
445d06b6 949/* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
950 as appropriate. */
951static tree
952omp_build_component_ref (tree obj, tree field)
953{
954 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
955 if (TREE_THIS_VOLATILE (field))
956 TREE_THIS_VOLATILE (ret) |= 1;
957 if (TREE_READONLY (field))
958 TREE_READONLY (ret) |= 1;
959 return ret;
960}
961
1e8e9920 962/* Build tree nodes to access the field for VAR on the receiver side. */
963
964static tree
965build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
966{
967 tree x, field = lookup_field (var, ctx);
968
969 /* If the receiver record type was remapped in the child function,
970 remap the field into the new record type. */
971 x = maybe_lookup_field (field, ctx);
972 if (x != NULL)
973 field = x;
974
182cf5a9 975 x = build_simple_mem_ref (ctx->receiver_decl);
445d06b6 976 x = omp_build_component_ref (x, field);
1e8e9920 977 if (by_ref)
182cf5a9 978 x = build_simple_mem_ref (x);
1e8e9920 979
980 return x;
981}
982
983/* Build tree nodes to access VAR in the scope outer to CTX. In the case
984 of a parallel, this is a component reference; for workshare constructs
985 this is some variable. */
986
987static tree
988build_outer_var_ref (tree var, omp_context *ctx)
989{
990 tree x;
991
f49d7bb5 992 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1e8e9920 993 x = var;
994 else if (is_variable_sized (var))
995 {
996 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
997 x = build_outer_var_ref (x, ctx);
182cf5a9 998 x = build_simple_mem_ref (x);
1e8e9920 999 }
fd6481cf 1000 else if (is_taskreg_ctx (ctx))
1e8e9920 1001 {
e8a588af 1002 bool by_ref = use_pointer_for_field (var, NULL);
1e8e9920 1003 x = build_receiver_ref (var, by_ref, ctx);
1004 }
3d483a94 1005 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1006 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
1007 {
1008 /* #pragma omp simd isn't a worksharing construct, and can reference even
1009 private vars in its linear etc. clauses. */
1010 x = NULL_TREE;
1011 if (ctx->outer && is_taskreg_ctx (ctx))
1012 x = lookup_decl (var, ctx->outer);
1013 else if (ctx->outer)
84cb1020 1014 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
3d483a94 1015 if (x == NULL_TREE)
1016 x = var;
1017 }
1e8e9920 1018 else if (ctx->outer)
1019 x = lookup_decl (var, ctx->outer);
9438af57 1020 else if (is_reference (var))
1021 /* This can happen with orphaned constructs. If var is reference, it is
1022 possible it is shared and as such valid. */
1023 x = var;
1e8e9920 1024 else
1025 gcc_unreachable ();
1026
1027 if (is_reference (var))
182cf5a9 1028 x = build_simple_mem_ref (x);
1e8e9920 1029
1030 return x;
1031}
1032
1033/* Build tree nodes to access the field for VAR on the sender side. */
1034
1035static tree
1036build_sender_ref (tree var, omp_context *ctx)
1037{
fd6481cf 1038 tree field = lookup_sfield (var, ctx);
445d06b6 1039 return omp_build_component_ref (ctx->sender_decl, field);
1e8e9920 1040}
1041
1042/* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1043
1044static void
fd6481cf 1045install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1e8e9920 1046{
fd6481cf 1047 tree field, type, sfield = NULL_TREE;
1e8e9920 1048
fd6481cf 1049 gcc_assert ((mask & 1) == 0
1050 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
1051 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1052 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1e8e9920 1053
1054 type = TREE_TYPE (var);
bc7bff74 1055 if (mask & 4)
1056 {
1057 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1058 type = build_pointer_type (build_pointer_type (type));
1059 }
1060 else if (by_ref)
1e8e9920 1061 type = build_pointer_type (type);
fd6481cf 1062 else if ((mask & 3) == 1 && is_reference (var))
1063 type = TREE_TYPE (type);
1e8e9920 1064
e60a6f7b 1065 field = build_decl (DECL_SOURCE_LOCATION (var),
1066 FIELD_DECL, DECL_NAME (var), type);
1e8e9920 1067
1068 /* Remember what variable this field was created for. This does have a
1069 side effect of making dwarf2out ignore this member, so for helpful
1070 debugging we clear it later in delete_omp_context. */
1071 DECL_ABSTRACT_ORIGIN (field) = var;
fd6481cf 1072 if (type == TREE_TYPE (var))
1073 {
1074 DECL_ALIGN (field) = DECL_ALIGN (var);
1075 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1076 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1077 }
1078 else
1079 DECL_ALIGN (field) = TYPE_ALIGN (type);
1e8e9920 1080
fd6481cf 1081 if ((mask & 3) == 3)
1082 {
1083 insert_field_into_struct (ctx->record_type, field);
1084 if (ctx->srecord_type)
1085 {
e60a6f7b 1086 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1087 FIELD_DECL, DECL_NAME (var), type);
fd6481cf 1088 DECL_ABSTRACT_ORIGIN (sfield) = var;
1089 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1090 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1091 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1092 insert_field_into_struct (ctx->srecord_type, sfield);
1093 }
1094 }
1095 else
1096 {
1097 if (ctx->srecord_type == NULL_TREE)
1098 {
1099 tree t;
1100
1101 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1102 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1103 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1104 {
e60a6f7b 1105 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1106 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
fd6481cf 1107 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1108 insert_field_into_struct (ctx->srecord_type, sfield);
1109 splay_tree_insert (ctx->sfield_map,
1110 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1111 (splay_tree_value) sfield);
1112 }
1113 }
1114 sfield = field;
1115 insert_field_into_struct ((mask & 1) ? ctx->record_type
1116 : ctx->srecord_type, field);
1117 }
1e8e9920 1118
fd6481cf 1119 if (mask & 1)
1120 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1121 (splay_tree_value) field);
1122 if ((mask & 2) && ctx->sfield_map)
1123 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1124 (splay_tree_value) sfield);
1e8e9920 1125}
1126
1127static tree
1128install_var_local (tree var, omp_context *ctx)
1129{
1130 tree new_var = omp_copy_decl_1 (var, ctx);
1131 insert_decl_map (&ctx->cb, var, new_var);
1132 return new_var;
1133}
1134
1135/* Adjust the replacement for DECL in CTX for the new context. This means
1136 copying the DECL_VALUE_EXPR, and fixing up the type. */
1137
1138static void
1139fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1140{
1141 tree new_decl, size;
1142
1143 new_decl = lookup_decl (decl, ctx);
1144
1145 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1146
1147 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1148 && DECL_HAS_VALUE_EXPR_P (decl))
1149 {
1150 tree ve = DECL_VALUE_EXPR (decl);
75a70cf9 1151 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1e8e9920 1152 SET_DECL_VALUE_EXPR (new_decl, ve);
1153 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1154 }
1155
1156 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1157 {
1158 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1159 if (size == error_mark_node)
1160 size = TYPE_SIZE (TREE_TYPE (new_decl));
1161 DECL_SIZE (new_decl) = size;
1162
1163 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1164 if (size == error_mark_node)
1165 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1166 DECL_SIZE_UNIT (new_decl) = size;
1167 }
1168}
1169
1170/* The callback for remap_decl. Search all containing contexts for a
1171 mapping of the variable; this avoids having to duplicate the splay
1172 tree ahead of time. We know a mapping doesn't already exist in the
1173 given context. Create new mappings to implement default semantics. */
1174
1175static tree
1176omp_copy_decl (tree var, copy_body_data *cb)
1177{
1178 omp_context *ctx = (omp_context *) cb;
1179 tree new_var;
1180
1e8e9920 1181 if (TREE_CODE (var) == LABEL_DECL)
1182 {
e60a6f7b 1183 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
773c5ba7 1184 DECL_CONTEXT (new_var) = current_function_decl;
1e8e9920 1185 insert_decl_map (&ctx->cb, var, new_var);
1186 return new_var;
1187 }
1188
fd6481cf 1189 while (!is_taskreg_ctx (ctx))
1e8e9920 1190 {
1191 ctx = ctx->outer;
1192 if (ctx == NULL)
1193 return var;
1194 new_var = maybe_lookup_decl (var, ctx);
1195 if (new_var)
1196 return new_var;
1197 }
1198
f49d7bb5 1199 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1200 return var;
1201
1e8e9920 1202 return error_mark_node;
1203}
1204
773c5ba7 1205
1206/* Return the parallel region associated with STMT. */
1207
773c5ba7 1208/* Debugging dumps for parallel regions. */
1209void dump_omp_region (FILE *, struct omp_region *, int);
1210void debug_omp_region (struct omp_region *);
1211void debug_all_omp_regions (void);
1212
1213/* Dump the parallel region tree rooted at REGION. */
1214
1215void
1216dump_omp_region (FILE *file, struct omp_region *region, int indent)
1217{
61e47ac8 1218 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
75a70cf9 1219 gimple_code_name[region->type]);
773c5ba7 1220
1221 if (region->inner)
1222 dump_omp_region (file, region->inner, indent + 4);
1223
61e47ac8 1224 if (region->cont)
1225 {
75a70cf9 1226 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
61e47ac8 1227 region->cont->index);
1228 }
48e1416a 1229
773c5ba7 1230 if (region->exit)
75a70cf9 1231 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
61e47ac8 1232 region->exit->index);
773c5ba7 1233 else
61e47ac8 1234 fprintf (file, "%*s[no exit marker]\n", indent, "");
773c5ba7 1235
1236 if (region->next)
61e47ac8 1237 dump_omp_region (file, region->next, indent);
773c5ba7 1238}
1239
4b987fac 1240DEBUG_FUNCTION void
773c5ba7 1241debug_omp_region (struct omp_region *region)
1242{
1243 dump_omp_region (stderr, region, 0);
1244}
1245
4b987fac 1246DEBUG_FUNCTION void
773c5ba7 1247debug_all_omp_regions (void)
1248{
1249 dump_omp_region (stderr, root_omp_region, 0);
1250}
1251
1252
1253/* Create a new parallel region starting at STMT inside region PARENT. */
1254
7740abd8 1255static struct omp_region *
75a70cf9 1256new_omp_region (basic_block bb, enum gimple_code type,
1257 struct omp_region *parent)
773c5ba7 1258{
4077bf7a 1259 struct omp_region *region = XCNEW (struct omp_region);
773c5ba7 1260
1261 region->outer = parent;
61e47ac8 1262 region->entry = bb;
1263 region->type = type;
773c5ba7 1264
1265 if (parent)
1266 {
1267 /* This is a nested region. Add it to the list of inner
1268 regions in PARENT. */
1269 region->next = parent->inner;
1270 parent->inner = region;
1271 }
61e47ac8 1272 else
773c5ba7 1273 {
1274 /* This is a toplevel region. Add it to the list of toplevel
1275 regions in ROOT_OMP_REGION. */
1276 region->next = root_omp_region;
1277 root_omp_region = region;
1278 }
61e47ac8 1279
1280 return region;
1281}
1282
1283/* Release the memory associated with the region tree rooted at REGION. */
1284
1285static void
1286free_omp_region_1 (struct omp_region *region)
1287{
1288 struct omp_region *i, *n;
1289
1290 for (i = region->inner; i ; i = n)
773c5ba7 1291 {
61e47ac8 1292 n = i->next;
1293 free_omp_region_1 (i);
773c5ba7 1294 }
1295
61e47ac8 1296 free (region);
1297}
773c5ba7 1298
61e47ac8 1299/* Release the memory for the entire omp region tree. */
1300
1301void
1302free_omp_regions (void)
1303{
1304 struct omp_region *r, *n;
1305 for (r = root_omp_region; r ; r = n)
1306 {
1307 n = r->next;
1308 free_omp_region_1 (r);
1309 }
1310 root_omp_region = NULL;
773c5ba7 1311}
1312
1313
1e8e9920 1314/* Create a new context, with OUTER_CTX being the surrounding context. */
1315
1316static omp_context *
75a70cf9 1317new_omp_context (gimple stmt, omp_context *outer_ctx)
1e8e9920 1318{
1319 omp_context *ctx = XCNEW (omp_context);
1320
1321 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1322 (splay_tree_value) ctx);
1323 ctx->stmt = stmt;
1324
1325 if (outer_ctx)
1326 {
1327 ctx->outer = outer_ctx;
1328 ctx->cb = outer_ctx->cb;
1329 ctx->cb.block = NULL;
1330 ctx->depth = outer_ctx->depth + 1;
1331 }
1332 else
1333 {
1334 ctx->cb.src_fn = current_function_decl;
1335 ctx->cb.dst_fn = current_function_decl;
53f79206 1336 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1337 gcc_checking_assert (ctx->cb.src_node);
1e8e9920 1338 ctx->cb.dst_node = ctx->cb.src_node;
1339 ctx->cb.src_cfun = cfun;
1340 ctx->cb.copy_decl = omp_copy_decl;
e38def9c 1341 ctx->cb.eh_lp_nr = 0;
1e8e9920 1342 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1343 ctx->depth = 1;
1344 }
1345
e3022db7 1346 ctx->cb.decl_map = pointer_map_create ();
1e8e9920 1347
1348 return ctx;
1349}
1350
75a70cf9 1351static gimple_seq maybe_catch_exception (gimple_seq);
f6430caa 1352
1353/* Finalize task copyfn. */
1354
1355static void
75a70cf9 1356finalize_task_copyfn (gimple task_stmt)
f6430caa 1357{
1358 struct function *child_cfun;
9078126c 1359 tree child_fn;
e3a19533 1360 gimple_seq seq = NULL, new_seq;
75a70cf9 1361 gimple bind;
f6430caa 1362
75a70cf9 1363 child_fn = gimple_omp_task_copy_fn (task_stmt);
f6430caa 1364 if (child_fn == NULL_TREE)
1365 return;
1366
1367 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
82b40354 1368 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
f6430caa 1369
f6430caa 1370 push_cfun (child_cfun);
7e3aae05 1371 bind = gimplify_body (child_fn, false);
75a70cf9 1372 gimple_seq_add_stmt (&seq, bind);
1373 new_seq = maybe_catch_exception (seq);
1374 if (new_seq != seq)
1375 {
1376 bind = gimple_build_bind (NULL, new_seq, NULL);
e3a19533 1377 seq = NULL;
75a70cf9 1378 gimple_seq_add_stmt (&seq, bind);
1379 }
1380 gimple_set_body (child_fn, seq);
f6430caa 1381 pop_cfun ();
f6430caa 1382
82b40354 1383 /* Inform the callgraph about the new function. */
f6430caa 1384 cgraph_add_new_function (child_fn, false);
1385}
1386
1e8e9920 1387/* Destroy a omp_context data structures. Called through the splay tree
1388 value delete callback. */
1389
1390static void
1391delete_omp_context (splay_tree_value value)
1392{
1393 omp_context *ctx = (omp_context *) value;
1394
e3022db7 1395 pointer_map_destroy (ctx->cb.decl_map);
1e8e9920 1396
1397 if (ctx->field_map)
1398 splay_tree_delete (ctx->field_map);
fd6481cf 1399 if (ctx->sfield_map)
1400 splay_tree_delete (ctx->sfield_map);
1e8e9920 1401
1402 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1403 it produces corrupt debug information. */
1404 if (ctx->record_type)
1405 {
1406 tree t;
1767a056 1407 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1e8e9920 1408 DECL_ABSTRACT_ORIGIN (t) = NULL;
1409 }
fd6481cf 1410 if (ctx->srecord_type)
1411 {
1412 tree t;
1767a056 1413 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
fd6481cf 1414 DECL_ABSTRACT_ORIGIN (t) = NULL;
1415 }
1e8e9920 1416
f6430caa 1417 if (is_task_ctx (ctx))
1418 finalize_task_copyfn (ctx->stmt);
1419
1e8e9920 1420 XDELETE (ctx);
1421}
1422
1423/* Fix up RECEIVER_DECL with a type that has been remapped to the child
1424 context. */
1425
1426static void
1427fixup_child_record_type (omp_context *ctx)
1428{
1429 tree f, type = ctx->record_type;
1430
1431 /* ??? It isn't sufficient to just call remap_type here, because
1432 variably_modified_type_p doesn't work the way we expect for
1433 record types. Testing each field for whether it needs remapping
1434 and creating a new record by hand works, however. */
1767a056 1435 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1e8e9920 1436 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1437 break;
1438 if (f)
1439 {
1440 tree name, new_fields = NULL;
1441
1442 type = lang_hooks.types.make_type (RECORD_TYPE);
1443 name = DECL_NAME (TYPE_NAME (ctx->record_type));
e60a6f7b 1444 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1445 TYPE_DECL, name, type);
1e8e9920 1446 TYPE_NAME (type) = name;
1447
1767a056 1448 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1e8e9920 1449 {
1450 tree new_f = copy_node (f);
1451 DECL_CONTEXT (new_f) = type;
1452 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1767a056 1453 DECL_CHAIN (new_f) = new_fields;
75a70cf9 1454 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1455 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1456 &ctx->cb, NULL);
1457 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1458 &ctx->cb, NULL);
1e8e9920 1459 new_fields = new_f;
1460
1461 /* Arrange to be able to look up the receiver field
1462 given the sender field. */
1463 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1464 (splay_tree_value) new_f);
1465 }
1466 TYPE_FIELDS (type) = nreverse (new_fields);
1467 layout_type (type);
1468 }
1469
1470 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1471}
1472
1473/* Instantiate decls as necessary in CTX to satisfy the data sharing
1474 specified by CLAUSES. */
1475
1476static void
1477scan_sharing_clauses (tree clauses, omp_context *ctx)
1478{
1479 tree c, decl;
1480 bool scan_array_reductions = false;
1481
1482 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1483 {
1484 bool by_ref;
1485
55d6e7cd 1486 switch (OMP_CLAUSE_CODE (c))
1e8e9920 1487 {
1488 case OMP_CLAUSE_PRIVATE:
1489 decl = OMP_CLAUSE_DECL (c);
fd6481cf 1490 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1491 goto do_private;
1492 else if (!is_variable_sized (decl))
1e8e9920 1493 install_var_local (decl, ctx);
1494 break;
1495
1496 case OMP_CLAUSE_SHARED:
bc7bff74 1497 /* Ignore shared directives in teams construct. */
1498 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1499 break;
fd6481cf 1500 gcc_assert (is_taskreg_ctx (ctx));
1e8e9920 1501 decl = OMP_CLAUSE_DECL (c);
e7327393 1502 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1503 || !is_variable_sized (decl));
f49d7bb5 1504 /* Global variables don't need to be copied,
1505 the receiver side will use them directly. */
1506 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1507 break;
fd6481cf 1508 by_ref = use_pointer_for_field (decl, ctx);
1e8e9920 1509 if (! TREE_READONLY (decl)
1510 || TREE_ADDRESSABLE (decl)
1511 || by_ref
1512 || is_reference (decl))
1513 {
fd6481cf 1514 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1515 install_var_local (decl, ctx);
1516 break;
1517 }
1518 /* We don't need to copy const scalar vars back. */
55d6e7cd 1519 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1e8e9920 1520 goto do_private;
1521
1522 case OMP_CLAUSE_LASTPRIVATE:
1523 /* Let the corresponding firstprivate clause create
1524 the variable. */
1525 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1526 break;
1527 /* FALLTHRU */
1528
1529 case OMP_CLAUSE_FIRSTPRIVATE:
1530 case OMP_CLAUSE_REDUCTION:
3d483a94 1531 case OMP_CLAUSE_LINEAR:
1e8e9920 1532 decl = OMP_CLAUSE_DECL (c);
1533 do_private:
1534 if (is_variable_sized (decl))
1e8e9920 1535 {
fd6481cf 1536 if (is_task_ctx (ctx))
1537 install_var_field (decl, false, 1, ctx);
1538 break;
1539 }
1540 else if (is_taskreg_ctx (ctx))
1541 {
1542 bool global
1543 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
e8a588af 1544 by_ref = use_pointer_for_field (decl, NULL);
fd6481cf 1545
1546 if (is_task_ctx (ctx)
1547 && (global || by_ref || is_reference (decl)))
1548 {
1549 install_var_field (decl, false, 1, ctx);
1550 if (!global)
1551 install_var_field (decl, by_ref, 2, ctx);
1552 }
1553 else if (!global)
1554 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1555 }
1556 install_var_local (decl, ctx);
1557 break;
1558
bc7bff74 1559 case OMP_CLAUSE__LOOPTEMP_:
1560 gcc_assert (is_parallel_ctx (ctx));
1561 decl = OMP_CLAUSE_DECL (c);
1562 install_var_field (decl, false, 3, ctx);
1563 install_var_local (decl, ctx);
1564 break;
1565
1e8e9920 1566 case OMP_CLAUSE_COPYPRIVATE:
1e8e9920 1567 case OMP_CLAUSE_COPYIN:
1568 decl = OMP_CLAUSE_DECL (c);
e8a588af 1569 by_ref = use_pointer_for_field (decl, NULL);
fd6481cf 1570 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1571 break;
1572
1573 case OMP_CLAUSE_DEFAULT:
1574 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1575 break;
1576
2169f33b 1577 case OMP_CLAUSE_FINAL:
1e8e9920 1578 case OMP_CLAUSE_IF:
1579 case OMP_CLAUSE_NUM_THREADS:
bc7bff74 1580 case OMP_CLAUSE_NUM_TEAMS:
1581 case OMP_CLAUSE_THREAD_LIMIT:
1582 case OMP_CLAUSE_DEVICE:
1e8e9920 1583 case OMP_CLAUSE_SCHEDULE:
bc7bff74 1584 case OMP_CLAUSE_DIST_SCHEDULE:
1585 case OMP_CLAUSE_DEPEND:
1e8e9920 1586 if (ctx->outer)
75a70cf9 1587 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1e8e9920 1588 break;
1589
bc7bff74 1590 case OMP_CLAUSE_TO:
1591 case OMP_CLAUSE_FROM:
1592 case OMP_CLAUSE_MAP:
1593 if (ctx->outer)
1594 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
1595 decl = OMP_CLAUSE_DECL (c);
1596 /* Global variables with "omp declare target" attribute
1597 don't need to be copied, the receiver side will use them
1598 directly. */
1599 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1600 && DECL_P (decl)
1601 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1602 && lookup_attribute ("omp declare target",
1603 DECL_ATTRIBUTES (decl)))
1604 break;
1605 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1606 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER)
1607 {
1608 /* Ignore OMP_CLAUSE_MAP_POINTER kind for arrays in
1609 #pragma omp target data, there is nothing to map for
1610 those. */
1611 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA
1612 && !POINTER_TYPE_P (TREE_TYPE (decl)))
1613 break;
1614 }
1615 if (DECL_P (decl))
1616 {
1617 if (DECL_SIZE (decl)
1618 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1619 {
1620 tree decl2 = DECL_VALUE_EXPR (decl);
1621 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1622 decl2 = TREE_OPERAND (decl2, 0);
1623 gcc_assert (DECL_P (decl2));
1624 install_var_field (decl2, true, 3, ctx);
1625 install_var_local (decl2, ctx);
1626 install_var_local (decl, ctx);
1627 }
1628 else
1629 {
1630 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1631 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1632 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
1633 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1634 install_var_field (decl, true, 7, ctx);
1635 else
1636 install_var_field (decl, true, 3, ctx);
1637 if (gimple_omp_target_kind (ctx->stmt)
1638 == GF_OMP_TARGET_KIND_REGION)
1639 install_var_local (decl, ctx);
1640 }
1641 }
1642 else
1643 {
1644 tree base = get_base_address (decl);
1645 tree nc = OMP_CLAUSE_CHAIN (c);
1646 if (DECL_P (base)
1647 && nc != NULL_TREE
1648 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
1649 && OMP_CLAUSE_DECL (nc) == base
1650 && OMP_CLAUSE_MAP_KIND (nc) == OMP_CLAUSE_MAP_POINTER
1651 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
1652 {
1653 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
1654 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
1655 }
1656 else
1657 {
1658 gcc_assert (!splay_tree_lookup (ctx->field_map,
1659 (splay_tree_key) decl));
1660 tree field
1661 = build_decl (OMP_CLAUSE_LOCATION (c),
1662 FIELD_DECL, NULL_TREE, ptr_type_node);
1663 DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
1664 insert_field_into_struct (ctx->record_type, field);
1665 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
1666 (splay_tree_value) field);
1667 }
1668 }
1669 break;
1670
1e8e9920 1671 case OMP_CLAUSE_NOWAIT:
1672 case OMP_CLAUSE_ORDERED:
fd6481cf 1673 case OMP_CLAUSE_COLLAPSE:
1674 case OMP_CLAUSE_UNTIED:
2169f33b 1675 case OMP_CLAUSE_MERGEABLE:
bc7bff74 1676 case OMP_CLAUSE_PROC_BIND:
3d483a94 1677 case OMP_CLAUSE_SAFELEN:
1e8e9920 1678 break;
1679
bc7bff74 1680 case OMP_CLAUSE_ALIGNED:
1681 decl = OMP_CLAUSE_DECL (c);
1682 if (is_global_var (decl)
1683 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1684 install_var_local (decl, ctx);
1685 break;
1686
1e8e9920 1687 default:
1688 gcc_unreachable ();
1689 }
1690 }
1691
1692 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1693 {
55d6e7cd 1694 switch (OMP_CLAUSE_CODE (c))
1e8e9920 1695 {
1696 case OMP_CLAUSE_LASTPRIVATE:
1697 /* Let the corresponding firstprivate clause create
1698 the variable. */
75a70cf9 1699 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
fd6481cf 1700 scan_array_reductions = true;
1e8e9920 1701 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1702 break;
1703 /* FALLTHRU */
1704
1705 case OMP_CLAUSE_PRIVATE:
1706 case OMP_CLAUSE_FIRSTPRIVATE:
1707 case OMP_CLAUSE_REDUCTION:
3d483a94 1708 case OMP_CLAUSE_LINEAR:
1e8e9920 1709 decl = OMP_CLAUSE_DECL (c);
1710 if (is_variable_sized (decl))
1711 install_var_local (decl, ctx);
1712 fixup_remapped_decl (decl, ctx,
55d6e7cd 1713 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1e8e9920 1714 && OMP_CLAUSE_PRIVATE_DEBUG (c));
55d6e7cd 1715 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1e8e9920 1716 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1717 scan_array_reductions = true;
1718 break;
1719
1720 case OMP_CLAUSE_SHARED:
bc7bff74 1721 /* Ignore shared directives in teams construct. */
1722 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1723 break;
1e8e9920 1724 decl = OMP_CLAUSE_DECL (c);
f49d7bb5 1725 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1726 fixup_remapped_decl (decl, ctx, false);
1e8e9920 1727 break;
1728
bc7bff74 1729 case OMP_CLAUSE_MAP:
1730 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA)
1731 break;
1732 decl = OMP_CLAUSE_DECL (c);
1733 if (DECL_P (decl)
1734 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1735 && lookup_attribute ("omp declare target",
1736 DECL_ATTRIBUTES (decl)))
1737 break;
1738 if (DECL_P (decl))
1739 {
1740 if (OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1741 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
1742 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
1743 {
1744 tree new_decl = lookup_decl (decl, ctx);
1745 TREE_TYPE (new_decl)
1746 = remap_type (TREE_TYPE (decl), &ctx->cb);
1747 }
1748 else if (DECL_SIZE (decl)
1749 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1750 {
1751 tree decl2 = DECL_VALUE_EXPR (decl);
1752 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1753 decl2 = TREE_OPERAND (decl2, 0);
1754 gcc_assert (DECL_P (decl2));
1755 fixup_remapped_decl (decl2, ctx, false);
1756 fixup_remapped_decl (decl, ctx, true);
1757 }
1758 else
1759 fixup_remapped_decl (decl, ctx, false);
1760 }
1761 break;
1762
1e8e9920 1763 case OMP_CLAUSE_COPYPRIVATE:
1764 case OMP_CLAUSE_COPYIN:
1765 case OMP_CLAUSE_DEFAULT:
1766 case OMP_CLAUSE_IF:
1767 case OMP_CLAUSE_NUM_THREADS:
bc7bff74 1768 case OMP_CLAUSE_NUM_TEAMS:
1769 case OMP_CLAUSE_THREAD_LIMIT:
1770 case OMP_CLAUSE_DEVICE:
1e8e9920 1771 case OMP_CLAUSE_SCHEDULE:
bc7bff74 1772 case OMP_CLAUSE_DIST_SCHEDULE:
1e8e9920 1773 case OMP_CLAUSE_NOWAIT:
1774 case OMP_CLAUSE_ORDERED:
fd6481cf 1775 case OMP_CLAUSE_COLLAPSE:
1776 case OMP_CLAUSE_UNTIED:
2169f33b 1777 case OMP_CLAUSE_FINAL:
1778 case OMP_CLAUSE_MERGEABLE:
bc7bff74 1779 case OMP_CLAUSE_PROC_BIND:
3d483a94 1780 case OMP_CLAUSE_SAFELEN:
bc7bff74 1781 case OMP_CLAUSE_ALIGNED:
1782 case OMP_CLAUSE_DEPEND:
1783 case OMP_CLAUSE__LOOPTEMP_:
1784 case OMP_CLAUSE_TO:
1785 case OMP_CLAUSE_FROM:
1e8e9920 1786 break;
1787
1788 default:
1789 gcc_unreachable ();
1790 }
1791 }
1792
1793 if (scan_array_reductions)
1794 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
55d6e7cd 1795 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1e8e9920 1796 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1797 {
ab129075 1798 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1799 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1e8e9920 1800 }
fd6481cf 1801 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
75a70cf9 1802 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
ab129075 1803 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1e8e9920 1804}
1805
1806/* Create a new name for omp child function. Returns an identifier. */
1807
1808static GTY(()) unsigned int tmp_ompfn_id_num;
1809
1810static tree
fd6481cf 1811create_omp_child_function_name (bool task_copy)
1e8e9920 1812{
a70a5e2c 1813 return (clone_function_name (current_function_decl,
1814 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1e8e9920 1815}
1816
1817/* Build a decl for the omp child function. It'll not contain a body
1818 yet, just the bare decl. */
1819
1820static void
fd6481cf 1821create_omp_child_function (omp_context *ctx, bool task_copy)
1e8e9920 1822{
1823 tree decl, type, name, t;
1824
fd6481cf 1825 name = create_omp_child_function_name (task_copy);
1826 if (task_copy)
1827 type = build_function_type_list (void_type_node, ptr_type_node,
1828 ptr_type_node, NULL_TREE);
1829 else
1830 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1e8e9920 1831
e60a6f7b 1832 decl = build_decl (gimple_location (ctx->stmt),
1833 FUNCTION_DECL, name, type);
1e8e9920 1834
fd6481cf 1835 if (!task_copy)
1836 ctx->cb.dst_fn = decl;
1837 else
75a70cf9 1838 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1e8e9920 1839
1840 TREE_STATIC (decl) = 1;
1841 TREE_USED (decl) = 1;
1842 DECL_ARTIFICIAL (decl) = 1;
84bfaaeb 1843 DECL_NAMELESS (decl) = 1;
1e8e9920 1844 DECL_IGNORED_P (decl) = 0;
1845 TREE_PUBLIC (decl) = 0;
1846 DECL_UNINLINABLE (decl) = 1;
1847 DECL_EXTERNAL (decl) = 0;
1848 DECL_CONTEXT (decl) = NULL_TREE;
773c5ba7 1849 DECL_INITIAL (decl) = make_node (BLOCK);
bc7bff74 1850 bool target_p = false;
1851 if (lookup_attribute ("omp declare target",
1852 DECL_ATTRIBUTES (current_function_decl)))
1853 target_p = true;
1854 else
1855 {
1856 omp_context *octx;
1857 for (octx = ctx; octx; octx = octx->outer)
1858 if (gimple_code (octx->stmt) == GIMPLE_OMP_TARGET
1859 && gimple_omp_target_kind (octx->stmt)
1860 == GF_OMP_TARGET_KIND_REGION)
1861 {
1862 target_p = true;
1863 break;
1864 }
1865 }
1866 if (target_p)
1867 DECL_ATTRIBUTES (decl)
1868 = tree_cons (get_identifier ("omp declare target"),
1869 NULL_TREE, DECL_ATTRIBUTES (decl));
1e8e9920 1870
e60a6f7b 1871 t = build_decl (DECL_SOURCE_LOCATION (decl),
1872 RESULT_DECL, NULL_TREE, void_type_node);
1e8e9920 1873 DECL_ARTIFICIAL (t) = 1;
1874 DECL_IGNORED_P (t) = 1;
8e5b4ed6 1875 DECL_CONTEXT (t) = decl;
1e8e9920 1876 DECL_RESULT (decl) = t;
1877
e60a6f7b 1878 t = build_decl (DECL_SOURCE_LOCATION (decl),
1879 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1e8e9920 1880 DECL_ARTIFICIAL (t) = 1;
84bfaaeb 1881 DECL_NAMELESS (t) = 1;
1e8e9920 1882 DECL_ARG_TYPE (t) = ptr_type_node;
773c5ba7 1883 DECL_CONTEXT (t) = current_function_decl;
1e8e9920 1884 TREE_USED (t) = 1;
1885 DECL_ARGUMENTS (decl) = t;
fd6481cf 1886 if (!task_copy)
1887 ctx->receiver_decl = t;
1888 else
1889 {
e60a6f7b 1890 t = build_decl (DECL_SOURCE_LOCATION (decl),
1891 PARM_DECL, get_identifier (".omp_data_o"),
fd6481cf 1892 ptr_type_node);
1893 DECL_ARTIFICIAL (t) = 1;
84bfaaeb 1894 DECL_NAMELESS (t) = 1;
fd6481cf 1895 DECL_ARG_TYPE (t) = ptr_type_node;
1896 DECL_CONTEXT (t) = current_function_decl;
1897 TREE_USED (t) = 1;
86f2ad37 1898 TREE_ADDRESSABLE (t) = 1;
1767a056 1899 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
fd6481cf 1900 DECL_ARGUMENTS (decl) = t;
1901 }
1e8e9920 1902
48e1416a 1903 /* Allocate memory for the function structure. The call to
773c5ba7 1904 allocate_struct_function clobbers CFUN, so we need to restore
1e8e9920 1905 it afterward. */
87d4aa85 1906 push_struct_function (decl);
75a70cf9 1907 cfun->function_end_locus = gimple_location (ctx->stmt);
87d4aa85 1908 pop_cfun ();
1e8e9920 1909}
1910
bc7bff74 1911/* Callback for walk_gimple_seq. Check if combined parallel
1912 contains gimple_omp_for_combined_into_p OMP_FOR. */
1913
1914static tree
1915find_combined_for (gimple_stmt_iterator *gsi_p,
1916 bool *handled_ops_p,
1917 struct walk_stmt_info *wi)
1918{
1919 gimple stmt = gsi_stmt (*gsi_p);
1920
1921 *handled_ops_p = true;
1922 switch (gimple_code (stmt))
1923 {
1924 WALK_SUBSTMTS;
1925
1926 case GIMPLE_OMP_FOR:
1927 if (gimple_omp_for_combined_into_p (stmt)
1928 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
1929 {
1930 wi->info = stmt;
1931 return integer_zero_node;
1932 }
1933 break;
1934 default:
1935 break;
1936 }
1937 return NULL;
1938}
1939
1e8e9920 1940/* Scan an OpenMP parallel directive. */
1941
1942static void
75a70cf9 1943scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1e8e9920 1944{
1945 omp_context *ctx;
1946 tree name;
75a70cf9 1947 gimple stmt = gsi_stmt (*gsi);
1e8e9920 1948
1949 /* Ignore parallel directives with empty bodies, unless there
1950 are copyin clauses. */
1951 if (optimize > 0
75a70cf9 1952 && empty_body_p (gimple_omp_body (stmt))
1953 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1954 OMP_CLAUSE_COPYIN) == NULL)
1e8e9920 1955 {
75a70cf9 1956 gsi_replace (gsi, gimple_build_nop (), false);
1e8e9920 1957 return;
1958 }
1959
bc7bff74 1960 if (gimple_omp_parallel_combined_p (stmt))
1961 {
1962 gimple for_stmt;
1963 struct walk_stmt_info wi;
1964
1965 memset (&wi, 0, sizeof (wi));
1966 wi.val_only = true;
1967 walk_gimple_seq (gimple_omp_body (stmt),
1968 find_combined_for, NULL, &wi);
1969 for_stmt = (gimple) wi.info;
1970 if (for_stmt)
1971 {
1972 struct omp_for_data fd;
1973 extract_omp_for_data (for_stmt, &fd, NULL);
1974 /* We need two temporaries with fd.loop.v type (istart/iend)
1975 and then (fd.collapse - 1) temporaries with the same
1976 type for count2 ... countN-1 vars if not constant. */
1977 size_t count = 2, i;
1978 tree type = fd.iter_type;
1979 if (fd.collapse > 1
1980 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
1981 count += fd.collapse - 1;
1982 for (i = 0; i < count; i++)
1983 {
1984 tree temp = create_tmp_var (type, NULL);
1985 tree c = build_omp_clause (UNKNOWN_LOCATION,
1986 OMP_CLAUSE__LOOPTEMP_);
1987 OMP_CLAUSE_DECL (c) = temp;
1988 OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
1989 gimple_omp_parallel_set_clauses (stmt, c);
1990 }
1991 }
1992 }
1993
75a70cf9 1994 ctx = new_omp_context (stmt, outer_ctx);
fd6481cf 1995 if (taskreg_nesting_level > 1)
773c5ba7 1996 ctx->is_nested = true;
1e8e9920 1997 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1e8e9920 1998 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1999 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1e8e9920 2000 name = create_tmp_var_name (".omp_data_s");
e60a6f7b 2001 name = build_decl (gimple_location (stmt),
2002 TYPE_DECL, name, ctx->record_type);
84bfaaeb 2003 DECL_ARTIFICIAL (name) = 1;
2004 DECL_NAMELESS (name) = 1;
1e8e9920 2005 TYPE_NAME (ctx->record_type) = name;
fd6481cf 2006 create_omp_child_function (ctx, false);
75a70cf9 2007 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1e8e9920 2008
75a70cf9 2009 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
ab129075 2010 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 2011
2012 if (TYPE_FIELDS (ctx->record_type) == NULL)
2013 ctx->record_type = ctx->receiver_decl = NULL;
2014 else
2015 {
2016 layout_type (ctx->record_type);
2017 fixup_child_record_type (ctx);
2018 }
2019}
2020
fd6481cf 2021/* Scan an OpenMP task directive. */
2022
2023static void
75a70cf9 2024scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
fd6481cf 2025{
2026 omp_context *ctx;
75a70cf9 2027 tree name, t;
2028 gimple stmt = gsi_stmt (*gsi);
389dd41b 2029 location_t loc = gimple_location (stmt);
fd6481cf 2030
2031 /* Ignore task directives with empty bodies. */
2032 if (optimize > 0
75a70cf9 2033 && empty_body_p (gimple_omp_body (stmt)))
fd6481cf 2034 {
75a70cf9 2035 gsi_replace (gsi, gimple_build_nop (), false);
fd6481cf 2036 return;
2037 }
2038
75a70cf9 2039 ctx = new_omp_context (stmt, outer_ctx);
fd6481cf 2040 if (taskreg_nesting_level > 1)
2041 ctx->is_nested = true;
2042 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2043 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2044 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2045 name = create_tmp_var_name (".omp_data_s");
e60a6f7b 2046 name = build_decl (gimple_location (stmt),
2047 TYPE_DECL, name, ctx->record_type);
84bfaaeb 2048 DECL_ARTIFICIAL (name) = 1;
2049 DECL_NAMELESS (name) = 1;
fd6481cf 2050 TYPE_NAME (ctx->record_type) = name;
2051 create_omp_child_function (ctx, false);
75a70cf9 2052 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
fd6481cf 2053
75a70cf9 2054 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
fd6481cf 2055
2056 if (ctx->srecord_type)
2057 {
2058 name = create_tmp_var_name (".omp_data_a");
e60a6f7b 2059 name = build_decl (gimple_location (stmt),
2060 TYPE_DECL, name, ctx->srecord_type);
84bfaaeb 2061 DECL_ARTIFICIAL (name) = 1;
2062 DECL_NAMELESS (name) = 1;
fd6481cf 2063 TYPE_NAME (ctx->srecord_type) = name;
2064 create_omp_child_function (ctx, true);
2065 }
2066
ab129075 2067 scan_omp (gimple_omp_body_ptr (stmt), ctx);
fd6481cf 2068
2069 if (TYPE_FIELDS (ctx->record_type) == NULL)
2070 {
2071 ctx->record_type = ctx->receiver_decl = NULL;
75a70cf9 2072 t = build_int_cst (long_integer_type_node, 0);
2073 gimple_omp_task_set_arg_size (stmt, t);
2074 t = build_int_cst (long_integer_type_node, 1);
2075 gimple_omp_task_set_arg_align (stmt, t);
fd6481cf 2076 }
2077 else
2078 {
2079 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2080 /* Move VLA fields to the end. */
2081 p = &TYPE_FIELDS (ctx->record_type);
2082 while (*p)
2083 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2084 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2085 {
2086 *q = *p;
2087 *p = TREE_CHAIN (*p);
2088 TREE_CHAIN (*q) = NULL_TREE;
2089 q = &TREE_CHAIN (*q);
2090 }
2091 else
1767a056 2092 p = &DECL_CHAIN (*p);
fd6481cf 2093 *p = vla_fields;
2094 layout_type (ctx->record_type);
2095 fixup_child_record_type (ctx);
2096 if (ctx->srecord_type)
2097 layout_type (ctx->srecord_type);
389dd41b 2098 t = fold_convert_loc (loc, long_integer_type_node,
fd6481cf 2099 TYPE_SIZE_UNIT (ctx->record_type));
75a70cf9 2100 gimple_omp_task_set_arg_size (stmt, t);
2101 t = build_int_cst (long_integer_type_node,
fd6481cf 2102 TYPE_ALIGN_UNIT (ctx->record_type));
75a70cf9 2103 gimple_omp_task_set_arg_align (stmt, t);
fd6481cf 2104 }
2105}
2106
1e8e9920 2107
773c5ba7 2108/* Scan an OpenMP loop directive. */
1e8e9920 2109
2110static void
75a70cf9 2111scan_omp_for (gimple stmt, omp_context *outer_ctx)
1e8e9920 2112{
773c5ba7 2113 omp_context *ctx;
75a70cf9 2114 size_t i;
1e8e9920 2115
773c5ba7 2116 ctx = new_omp_context (stmt, outer_ctx);
1e8e9920 2117
75a70cf9 2118 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1e8e9920 2119
ab129075 2120 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
75a70cf9 2121 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
fd6481cf 2122 {
75a70cf9 2123 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
2124 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
2125 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
2126 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
fd6481cf 2127 }
ab129075 2128 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 2129}
2130
2131/* Scan an OpenMP sections directive. */
2132
2133static void
75a70cf9 2134scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1e8e9920 2135{
1e8e9920 2136 omp_context *ctx;
2137
2138 ctx = new_omp_context (stmt, outer_ctx);
75a70cf9 2139 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
ab129075 2140 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 2141}
2142
2143/* Scan an OpenMP single directive. */
2144
2145static void
75a70cf9 2146scan_omp_single (gimple stmt, omp_context *outer_ctx)
1e8e9920 2147{
1e8e9920 2148 omp_context *ctx;
2149 tree name;
2150
2151 ctx = new_omp_context (stmt, outer_ctx);
2152 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2153 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2154 name = create_tmp_var_name (".omp_copy_s");
e60a6f7b 2155 name = build_decl (gimple_location (stmt),
2156 TYPE_DECL, name, ctx->record_type);
1e8e9920 2157 TYPE_NAME (ctx->record_type) = name;
2158
75a70cf9 2159 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
ab129075 2160 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 2161
2162 if (TYPE_FIELDS (ctx->record_type) == NULL)
2163 ctx->record_type = NULL;
2164 else
2165 layout_type (ctx->record_type);
2166}
2167
bc7bff74 2168/* Scan an OpenMP target{, data, update} directive. */
2169
2170static void
2171scan_omp_target (gimple stmt, omp_context *outer_ctx)
2172{
2173 omp_context *ctx;
2174 tree name;
2175 int kind = gimple_omp_target_kind (stmt);
2176
2177 ctx = new_omp_context (stmt, outer_ctx);
2178 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2179 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2180 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2181 name = create_tmp_var_name (".omp_data_t");
2182 name = build_decl (gimple_location (stmt),
2183 TYPE_DECL, name, ctx->record_type);
2184 DECL_ARTIFICIAL (name) = 1;
2185 DECL_NAMELESS (name) = 1;
2186 TYPE_NAME (ctx->record_type) = name;
2187 if (kind == GF_OMP_TARGET_KIND_REGION)
2188 {
2189 create_omp_child_function (ctx, false);
2190 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
2191 }
2192
2193 scan_sharing_clauses (gimple_omp_target_clauses (stmt), ctx);
2194 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2195
2196 if (TYPE_FIELDS (ctx->record_type) == NULL)
2197 ctx->record_type = ctx->receiver_decl = NULL;
2198 else
2199 {
2200 TYPE_FIELDS (ctx->record_type)
2201 = nreverse (TYPE_FIELDS (ctx->record_type));
2202#ifdef ENABLE_CHECKING
2203 tree field;
2204 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
2205 for (field = TYPE_FIELDS (ctx->record_type);
2206 field;
2207 field = DECL_CHAIN (field))
2208 gcc_assert (DECL_ALIGN (field) == align);
2209#endif
2210 layout_type (ctx->record_type);
2211 if (kind == GF_OMP_TARGET_KIND_REGION)
2212 fixup_child_record_type (ctx);
2213 }
2214}
2215
2216/* Scan an OpenMP teams directive. */
2217
2218static void
2219scan_omp_teams (gimple stmt, omp_context *outer_ctx)
2220{
2221 omp_context *ctx = new_omp_context (stmt, outer_ctx);
2222 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
2223 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2224}
1e8e9920 2225
c1d127dd 2226/* Check OpenMP nesting restrictions. */
ab129075 2227static bool
2228check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
c1d127dd 2229{
3d483a94 2230 if (ctx != NULL)
2231 {
2232 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2233 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
2234 {
2235 error_at (gimple_location (stmt),
2236 "OpenMP constructs may not be nested inside simd region");
2237 return false;
2238 }
bc7bff74 2239 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2240 {
2241 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
2242 || (gimple_omp_for_kind (stmt)
2243 != GF_OMP_FOR_KIND_DISTRIBUTE))
2244 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
2245 {
2246 error_at (gimple_location (stmt),
2247 "only distribute or parallel constructs are allowed to "
2248 "be closely nested inside teams construct");
2249 return false;
2250 }
2251 }
3d483a94 2252 }
75a70cf9 2253 switch (gimple_code (stmt))
c1d127dd 2254 {
75a70cf9 2255 case GIMPLE_OMP_FOR:
3d483a94 2256 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_SIMD)
2257 return true;
bc7bff74 2258 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
2259 {
2260 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
2261 {
2262 error_at (gimple_location (stmt),
2263 "distribute construct must be closely nested inside "
2264 "teams construct");
2265 return false;
2266 }
2267 return true;
2268 }
2269 /* FALLTHRU */
2270 case GIMPLE_CALL:
2271 if (is_gimple_call (stmt)
2272 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2273 == BUILT_IN_GOMP_CANCEL
2274 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2275 == BUILT_IN_GOMP_CANCELLATION_POINT))
2276 {
2277 const char *bad = NULL;
2278 const char *kind = NULL;
2279 if (ctx == NULL)
2280 {
2281 error_at (gimple_location (stmt), "orphaned %qs construct",
2282 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2283 == BUILT_IN_GOMP_CANCEL
2284 ? "#pragma omp cancel"
2285 : "#pragma omp cancellation point");
2286 return false;
2287 }
2288 switch (host_integerp (gimple_call_arg (stmt, 0), 0)
2289 ? tree_low_cst (gimple_call_arg (stmt, 0), 0)
2290 : 0)
2291 {
2292 case 1:
2293 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
2294 bad = "#pragma omp parallel";
2295 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2296 == BUILT_IN_GOMP_CANCEL
2297 && !integer_zerop (gimple_call_arg (stmt, 1)))
2298 ctx->cancellable = true;
2299 kind = "parallel";
2300 break;
2301 case 2:
2302 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2303 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
2304 bad = "#pragma omp for";
2305 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2306 == BUILT_IN_GOMP_CANCEL
2307 && !integer_zerop (gimple_call_arg (stmt, 1)))
2308 {
2309 ctx->cancellable = true;
2310 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2311 OMP_CLAUSE_NOWAIT))
2312 warning_at (gimple_location (stmt), 0,
2313 "%<#pragma omp cancel for%> inside "
2314 "%<nowait%> for construct");
2315 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2316 OMP_CLAUSE_ORDERED))
2317 warning_at (gimple_location (stmt), 0,
2318 "%<#pragma omp cancel for%> inside "
2319 "%<ordered%> for construct");
2320 }
2321 kind = "for";
2322 break;
2323 case 4:
2324 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
2325 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
2326 bad = "#pragma omp sections";
2327 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2328 == BUILT_IN_GOMP_CANCEL
2329 && !integer_zerop (gimple_call_arg (stmt, 1)))
2330 {
2331 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
2332 {
2333 ctx->cancellable = true;
2334 if (find_omp_clause (gimple_omp_sections_clauses
2335 (ctx->stmt),
2336 OMP_CLAUSE_NOWAIT))
2337 warning_at (gimple_location (stmt), 0,
2338 "%<#pragma omp cancel sections%> inside "
2339 "%<nowait%> sections construct");
2340 }
2341 else
2342 {
2343 gcc_assert (ctx->outer
2344 && gimple_code (ctx->outer->stmt)
2345 == GIMPLE_OMP_SECTIONS);
2346 ctx->outer->cancellable = true;
2347 if (find_omp_clause (gimple_omp_sections_clauses
2348 (ctx->outer->stmt),
2349 OMP_CLAUSE_NOWAIT))
2350 warning_at (gimple_location (stmt), 0,
2351 "%<#pragma omp cancel sections%> inside "
2352 "%<nowait%> sections construct");
2353 }
2354 }
2355 kind = "sections";
2356 break;
2357 case 8:
2358 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
2359 bad = "#pragma omp task";
2360 else
2361 ctx->cancellable = true;
2362 kind = "taskgroup";
2363 break;
2364 default:
2365 error_at (gimple_location (stmt), "invalid arguments");
2366 return false;
2367 }
2368 if (bad)
2369 {
2370 error_at (gimple_location (stmt),
2371 "%<%s %s%> construct not closely nested inside of %qs",
2372 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2373 == BUILT_IN_GOMP_CANCEL
2374 ? "#pragma omp cancel"
2375 : "#pragma omp cancellation point", kind, bad);
2376 return false;
2377 }
2378 }
3d483a94 2379 /* FALLTHRU */
75a70cf9 2380 case GIMPLE_OMP_SECTIONS:
2381 case GIMPLE_OMP_SINGLE:
c1d127dd 2382 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 2383 switch (gimple_code (ctx->stmt))
c1d127dd 2384 {
75a70cf9 2385 case GIMPLE_OMP_FOR:
2386 case GIMPLE_OMP_SECTIONS:
2387 case GIMPLE_OMP_SINGLE:
2388 case GIMPLE_OMP_ORDERED:
2389 case GIMPLE_OMP_MASTER:
2390 case GIMPLE_OMP_TASK:
bc7bff74 2391 case GIMPLE_OMP_CRITICAL:
75a70cf9 2392 if (is_gimple_call (stmt))
fd6481cf 2393 {
bc7bff74 2394 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2395 != BUILT_IN_GOMP_BARRIER)
2396 return true;
ab129075 2397 error_at (gimple_location (stmt),
2398 "barrier region may not be closely nested inside "
2399 "of work-sharing, critical, ordered, master or "
2400 "explicit task region");
2401 return false;
fd6481cf 2402 }
ab129075 2403 error_at (gimple_location (stmt),
2404 "work-sharing region may not be closely nested inside "
2405 "of work-sharing, critical, ordered, master or explicit "
2406 "task region");
2407 return false;
75a70cf9 2408 case GIMPLE_OMP_PARALLEL:
ab129075 2409 return true;
c1d127dd 2410 default:
2411 break;
2412 }
2413 break;
75a70cf9 2414 case GIMPLE_OMP_MASTER:
c1d127dd 2415 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 2416 switch (gimple_code (ctx->stmt))
c1d127dd 2417 {
75a70cf9 2418 case GIMPLE_OMP_FOR:
2419 case GIMPLE_OMP_SECTIONS:
2420 case GIMPLE_OMP_SINGLE:
2421 case GIMPLE_OMP_TASK:
ab129075 2422 error_at (gimple_location (stmt),
2423 "master region may not be closely nested inside "
2424 "of work-sharing or explicit task region");
2425 return false;
75a70cf9 2426 case GIMPLE_OMP_PARALLEL:
ab129075 2427 return true;
c1d127dd 2428 default:
2429 break;
2430 }
2431 break;
75a70cf9 2432 case GIMPLE_OMP_ORDERED:
c1d127dd 2433 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 2434 switch (gimple_code (ctx->stmt))
c1d127dd 2435 {
75a70cf9 2436 case GIMPLE_OMP_CRITICAL:
2437 case GIMPLE_OMP_TASK:
ab129075 2438 error_at (gimple_location (stmt),
2439 "ordered region may not be closely nested inside "
2440 "of critical or explicit task region");
2441 return false;
75a70cf9 2442 case GIMPLE_OMP_FOR:
2443 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
c1d127dd 2444 OMP_CLAUSE_ORDERED) == NULL)
ab129075 2445 {
2446 error_at (gimple_location (stmt),
2447 "ordered region must be closely nested inside "
c1d127dd 2448 "a loop region with an ordered clause");
ab129075 2449 return false;
2450 }
2451 return true;
75a70cf9 2452 case GIMPLE_OMP_PARALLEL:
bc7bff74 2453 error_at (gimple_location (stmt),
2454 "ordered region must be closely nested inside "
2455 "a loop region with an ordered clause");
2456 return false;
c1d127dd 2457 default:
2458 break;
2459 }
2460 break;
75a70cf9 2461 case GIMPLE_OMP_CRITICAL:
c1d127dd 2462 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 2463 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
2464 && (gimple_omp_critical_name (stmt)
2465 == gimple_omp_critical_name (ctx->stmt)))
c1d127dd 2466 {
ab129075 2467 error_at (gimple_location (stmt),
2468 "critical region may not be nested inside a critical "
2469 "region with the same name");
2470 return false;
c1d127dd 2471 }
2472 break;
bc7bff74 2473 case GIMPLE_OMP_TEAMS:
2474 if (ctx == NULL
2475 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
2476 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
2477 {
2478 error_at (gimple_location (stmt),
2479 "teams construct not closely nested inside of target "
2480 "region");
2481 return false;
2482 }
2483 break;
c1d127dd 2484 default:
2485 break;
2486 }
ab129075 2487 return true;
c1d127dd 2488}
2489
2490
75a70cf9 2491/* Helper function scan_omp.
2492
2493 Callback for walk_tree or operators in walk_gimple_stmt used to
2494 scan for OpenMP directives in TP. */
1e8e9920 2495
2496static tree
75a70cf9 2497scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1e8e9920 2498{
4077bf7a 2499 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
2500 omp_context *ctx = (omp_context *) wi->info;
1e8e9920 2501 tree t = *tp;
2502
75a70cf9 2503 switch (TREE_CODE (t))
2504 {
2505 case VAR_DECL:
2506 case PARM_DECL:
2507 case LABEL_DECL:
2508 case RESULT_DECL:
2509 if (ctx)
2510 *tp = remap_decl (t, &ctx->cb);
2511 break;
2512
2513 default:
2514 if (ctx && TYPE_P (t))
2515 *tp = remap_type (t, &ctx->cb);
2516 else if (!DECL_P (t))
7cf869dd 2517 {
2518 *walk_subtrees = 1;
2519 if (ctx)
182cf5a9 2520 {
2521 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
2522 if (tem != TREE_TYPE (t))
2523 {
2524 if (TREE_CODE (t) == INTEGER_CST)
2525 *tp = build_int_cst_wide (tem,
2526 TREE_INT_CST_LOW (t),
2527 TREE_INT_CST_HIGH (t));
2528 else
2529 TREE_TYPE (t) = tem;
2530 }
2531 }
7cf869dd 2532 }
75a70cf9 2533 break;
2534 }
2535
2536 return NULL_TREE;
2537}
2538
2539
2540/* Helper function for scan_omp.
2541
2542 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2543 the current statement in GSI. */
2544
2545static tree
2546scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
2547 struct walk_stmt_info *wi)
2548{
2549 gimple stmt = gsi_stmt (*gsi);
2550 omp_context *ctx = (omp_context *) wi->info;
2551
2552 if (gimple_has_location (stmt))
2553 input_location = gimple_location (stmt);
1e8e9920 2554
c1d127dd 2555 /* Check the OpenMP nesting restrictions. */
bc7bff74 2556 bool remove = false;
2557 if (is_gimple_omp (stmt))
2558 remove = !check_omp_nesting_restrictions (stmt, ctx);
2559 else if (is_gimple_call (stmt))
2560 {
2561 tree fndecl = gimple_call_fndecl (stmt);
2562 if (fndecl
2563 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2564 switch (DECL_FUNCTION_CODE (fndecl))
2565 {
2566 case BUILT_IN_GOMP_BARRIER:
2567 case BUILT_IN_GOMP_CANCEL:
2568 case BUILT_IN_GOMP_CANCELLATION_POINT:
2569 case BUILT_IN_GOMP_TASKYIELD:
2570 case BUILT_IN_GOMP_TASKWAIT:
2571 case BUILT_IN_GOMP_TASKGROUP_START:
2572 case BUILT_IN_GOMP_TASKGROUP_END:
ab129075 2573 remove = !check_omp_nesting_restrictions (stmt, ctx);
bc7bff74 2574 break;
2575 default:
2576 break;
2577 }
2578 }
2579 if (remove)
2580 {
2581 stmt = gimple_build_nop ();
2582 gsi_replace (gsi, stmt, false);
fd6481cf 2583 }
c1d127dd 2584
75a70cf9 2585 *handled_ops_p = true;
2586
2587 switch (gimple_code (stmt))
1e8e9920 2588 {
75a70cf9 2589 case GIMPLE_OMP_PARALLEL:
fd6481cf 2590 taskreg_nesting_level++;
75a70cf9 2591 scan_omp_parallel (gsi, ctx);
fd6481cf 2592 taskreg_nesting_level--;
2593 break;
2594
75a70cf9 2595 case GIMPLE_OMP_TASK:
fd6481cf 2596 taskreg_nesting_level++;
75a70cf9 2597 scan_omp_task (gsi, ctx);
fd6481cf 2598 taskreg_nesting_level--;
1e8e9920 2599 break;
2600
75a70cf9 2601 case GIMPLE_OMP_FOR:
2602 scan_omp_for (stmt, ctx);
1e8e9920 2603 break;
2604
75a70cf9 2605 case GIMPLE_OMP_SECTIONS:
2606 scan_omp_sections (stmt, ctx);
1e8e9920 2607 break;
2608
75a70cf9 2609 case GIMPLE_OMP_SINGLE:
2610 scan_omp_single (stmt, ctx);
1e8e9920 2611 break;
2612
75a70cf9 2613 case GIMPLE_OMP_SECTION:
2614 case GIMPLE_OMP_MASTER:
bc7bff74 2615 case GIMPLE_OMP_TASKGROUP:
75a70cf9 2616 case GIMPLE_OMP_ORDERED:
2617 case GIMPLE_OMP_CRITICAL:
2618 ctx = new_omp_context (stmt, ctx);
ab129075 2619 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 2620 break;
2621
bc7bff74 2622 case GIMPLE_OMP_TARGET:
2623 scan_omp_target (stmt, ctx);
2624 break;
2625
2626 case GIMPLE_OMP_TEAMS:
2627 scan_omp_teams (stmt, ctx);
2628 break;
2629
75a70cf9 2630 case GIMPLE_BIND:
1e8e9920 2631 {
2632 tree var;
1e8e9920 2633
75a70cf9 2634 *handled_ops_p = false;
2635 if (ctx)
1767a056 2636 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
75a70cf9 2637 insert_decl_map (&ctx->cb, var, var);
1e8e9920 2638 }
2639 break;
1e8e9920 2640 default:
75a70cf9 2641 *handled_ops_p = false;
1e8e9920 2642 break;
2643 }
2644
2645 return NULL_TREE;
2646}
2647
2648
75a70cf9 2649/* Scan all the statements starting at the current statement. CTX
2650 contains context information about the OpenMP directives and
2651 clauses found during the scan. */
1e8e9920 2652
2653static void
ab129075 2654scan_omp (gimple_seq *body_p, omp_context *ctx)
1e8e9920 2655{
2656 location_t saved_location;
2657 struct walk_stmt_info wi;
2658
2659 memset (&wi, 0, sizeof (wi));
1e8e9920 2660 wi.info = ctx;
1e8e9920 2661 wi.want_locations = true;
2662
2663 saved_location = input_location;
ab129075 2664 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
1e8e9920 2665 input_location = saved_location;
2666}
2667\f
2668/* Re-gimplification and code generation routines. */
2669
2670/* Build a call to GOMP_barrier. */
2671
bc7bff74 2672static gimple
2673build_omp_barrier (tree lhs)
2674{
2675 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
2676 : BUILT_IN_GOMP_BARRIER);
2677 gimple g = gimple_build_call (fndecl, 0);
2678 if (lhs)
2679 gimple_call_set_lhs (g, lhs);
2680 return g;
1e8e9920 2681}
2682
2683/* If a context was created for STMT when it was scanned, return it. */
2684
2685static omp_context *
75a70cf9 2686maybe_lookup_ctx (gimple stmt)
1e8e9920 2687{
2688 splay_tree_node n;
2689 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2690 return n ? (omp_context *) n->value : NULL;
2691}
2692
773c5ba7 2693
2694/* Find the mapping for DECL in CTX or the immediately enclosing
2695 context that has a mapping for DECL.
2696
2697 If CTX is a nested parallel directive, we may have to use the decl
2698 mappings created in CTX's parent context. Suppose that we have the
2699 following parallel nesting (variable UIDs showed for clarity):
2700
2701 iD.1562 = 0;
2702 #omp parallel shared(iD.1562) -> outer parallel
2703 iD.1562 = iD.1562 + 1;
2704
2705 #omp parallel shared (iD.1562) -> inner parallel
2706 iD.1562 = iD.1562 - 1;
2707
2708 Each parallel structure will create a distinct .omp_data_s structure
2709 for copying iD.1562 in/out of the directive:
2710
2711 outer parallel .omp_data_s.1.i -> iD.1562
2712 inner parallel .omp_data_s.2.i -> iD.1562
2713
2714 A shared variable mapping will produce a copy-out operation before
2715 the parallel directive and a copy-in operation after it. So, in
2716 this case we would have:
2717
2718 iD.1562 = 0;
2719 .omp_data_o.1.i = iD.1562;
2720 #omp parallel shared(iD.1562) -> outer parallel
2721 .omp_data_i.1 = &.omp_data_o.1
2722 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2723
2724 .omp_data_o.2.i = iD.1562; -> **
2725 #omp parallel shared(iD.1562) -> inner parallel
2726 .omp_data_i.2 = &.omp_data_o.2
2727 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2728
2729
2730 ** This is a problem. The symbol iD.1562 cannot be referenced
2731 inside the body of the outer parallel region. But since we are
2732 emitting this copy operation while expanding the inner parallel
2733 directive, we need to access the CTX structure of the outer
2734 parallel directive to get the correct mapping:
2735
2736 .omp_data_o.2.i = .omp_data_i.1->i
2737
2738 Since there may be other workshare or parallel directives enclosing
2739 the parallel directive, it may be necessary to walk up the context
2740 parent chain. This is not a problem in general because nested
2741 parallelism happens only rarely. */
2742
2743static tree
2744lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2745{
2746 tree t;
2747 omp_context *up;
2748
773c5ba7 2749 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2750 t = maybe_lookup_decl (decl, up);
2751
87b31375 2752 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
773c5ba7 2753
c37594c7 2754 return t ? t : decl;
773c5ba7 2755}
2756
2757
f49d7bb5 2758/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2759 in outer contexts. */
2760
2761static tree
2762maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2763{
2764 tree t = NULL;
2765 omp_context *up;
2766
87b31375 2767 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2768 t = maybe_lookup_decl (decl, up);
f49d7bb5 2769
2770 return t ? t : decl;
2771}
2772
2773
1e8e9920 2774/* Construct the initialization value for reduction CLAUSE. */
2775
2776tree
2777omp_reduction_init (tree clause, tree type)
2778{
389dd41b 2779 location_t loc = OMP_CLAUSE_LOCATION (clause);
1e8e9920 2780 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2781 {
2782 case PLUS_EXPR:
2783 case MINUS_EXPR:
2784 case BIT_IOR_EXPR:
2785 case BIT_XOR_EXPR:
2786 case TRUTH_OR_EXPR:
2787 case TRUTH_ORIF_EXPR:
2788 case TRUTH_XOR_EXPR:
2789 case NE_EXPR:
385f3f36 2790 return build_zero_cst (type);
1e8e9920 2791
2792 case MULT_EXPR:
2793 case TRUTH_AND_EXPR:
2794 case TRUTH_ANDIF_EXPR:
2795 case EQ_EXPR:
389dd41b 2796 return fold_convert_loc (loc, type, integer_one_node);
1e8e9920 2797
2798 case BIT_AND_EXPR:
389dd41b 2799 return fold_convert_loc (loc, type, integer_minus_one_node);
1e8e9920 2800
2801 case MAX_EXPR:
2802 if (SCALAR_FLOAT_TYPE_P (type))
2803 {
2804 REAL_VALUE_TYPE max, min;
2805 if (HONOR_INFINITIES (TYPE_MODE (type)))
2806 {
2807 real_inf (&max);
2808 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2809 }
2810 else
2811 real_maxval (&min, 1, TYPE_MODE (type));
2812 return build_real (type, min);
2813 }
2814 else
2815 {
2816 gcc_assert (INTEGRAL_TYPE_P (type));
2817 return TYPE_MIN_VALUE (type);
2818 }
2819
2820 case MIN_EXPR:
2821 if (SCALAR_FLOAT_TYPE_P (type))
2822 {
2823 REAL_VALUE_TYPE max;
2824 if (HONOR_INFINITIES (TYPE_MODE (type)))
2825 real_inf (&max);
2826 else
2827 real_maxval (&max, 0, TYPE_MODE (type));
2828 return build_real (type, max);
2829 }
2830 else
2831 {
2832 gcc_assert (INTEGRAL_TYPE_P (type));
2833 return TYPE_MAX_VALUE (type);
2834 }
2835
2836 default:
2837 gcc_unreachable ();
2838 }
2839}
2840
bc7bff74 2841/* Return alignment to be assumed for var in CLAUSE, which should be
2842 OMP_CLAUSE_ALIGNED. */
2843
2844static tree
2845omp_clause_aligned_alignment (tree clause)
2846{
2847 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
2848 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
2849
2850 /* Otherwise return implementation defined alignment. */
2851 unsigned int al = 1;
2852 enum machine_mode mode, vmode;
2853 int vs = targetm.vectorize.autovectorize_vector_sizes ();
2854 if (vs)
2855 vs = 1 << floor_log2 (vs);
2856 static enum mode_class classes[]
2857 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
2858 for (int i = 0; i < 4; i += 2)
2859 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
2860 mode != VOIDmode;
2861 mode = GET_MODE_WIDER_MODE (mode))
2862 {
2863 vmode = targetm.vectorize.preferred_simd_mode (mode);
2864 if (GET_MODE_CLASS (vmode) != classes[i + 1])
2865 continue;
2866 while (vs
2867 && GET_MODE_SIZE (vmode) < vs
2868 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
2869 vmode = GET_MODE_2XWIDER_MODE (vmode);
2870
2871 tree type = lang_hooks.types.type_for_mode (mode, 1);
2872 if (type == NULL_TREE || TYPE_MODE (type) != mode)
2873 continue;
2874 type = build_vector_type (type, GET_MODE_SIZE (vmode)
2875 / GET_MODE_SIZE (mode));
2876 if (TYPE_MODE (type) != vmode)
2877 continue;
2878 if (TYPE_ALIGN_UNIT (type) > al)
2879 al = TYPE_ALIGN_UNIT (type);
2880 }
2881 return build_int_cst (integer_type_node, al);
2882}
2883
3d483a94 2884/* Return maximum possible vectorization factor for the target. */
2885
2886static int
2887omp_max_vf (void)
2888{
2889 if (!optimize
2890 || optimize_debug
043115ec 2891 || (!flag_tree_loop_vectorize
2892 && (global_options_set.x_flag_tree_loop_vectorize
2893 || global_options_set.x_flag_tree_vectorize)))
3d483a94 2894 return 1;
2895
2896 int vs = targetm.vectorize.autovectorize_vector_sizes ();
2897 if (vs)
2898 {
2899 vs = 1 << floor_log2 (vs);
2900 return vs;
2901 }
2902 enum machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
2903 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
2904 return GET_MODE_NUNITS (vqimode);
2905 return 1;
2906}
2907
2908/* Helper function of lower_rec_input_clauses, used for #pragma omp simd
2909 privatization. */
2910
2911static bool
2912lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
2913 tree &idx, tree &lane, tree &ivar, tree &lvar)
2914{
2915 if (max_vf == 0)
2916 {
2917 max_vf = omp_max_vf ();
2918 if (max_vf > 1)
2919 {
2920 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2921 OMP_CLAUSE_SAFELEN);
2922 if (c
2923 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c), max_vf) == -1)
2924 max_vf = tree_low_cst (OMP_CLAUSE_SAFELEN_EXPR (c), 0);
2925 }
2926 if (max_vf > 1)
2927 {
2928 idx = create_tmp_var (unsigned_type_node, NULL);
2929 lane = create_tmp_var (unsigned_type_node, NULL);
2930 }
2931 }
2932 if (max_vf == 1)
2933 return false;
2934
2935 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
2936 tree avar = create_tmp_var_raw (atype, NULL);
2937 if (TREE_ADDRESSABLE (new_var))
2938 TREE_ADDRESSABLE (avar) = 1;
2939 DECL_ATTRIBUTES (avar)
2940 = tree_cons (get_identifier ("omp simd array"), NULL,
2941 DECL_ATTRIBUTES (avar));
2942 gimple_add_tmp_var (avar);
2943 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
2944 NULL_TREE, NULL_TREE);
2945 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
2946 NULL_TREE, NULL_TREE);
bc7bff74 2947 if (DECL_P (new_var))
2948 {
2949 SET_DECL_VALUE_EXPR (new_var, lvar);
2950 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2951 }
3d483a94 2952 return true;
2953}
2954
1e8e9920 2955/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2956 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2957 private variables. Initialization statements go in ILIST, while calls
2958 to destructors go in DLIST. */
2959
2960static void
75a70cf9 2961lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
bc7bff74 2962 omp_context *ctx, struct omp_for_data *fd)
1e8e9920 2963{
c2f47e15 2964 tree c, dtor, copyin_seq, x, ptr;
1e8e9920 2965 bool copyin_by_ref = false;
f49d7bb5 2966 bool lastprivate_firstprivate = false;
bc7bff74 2967 bool reduction_omp_orig_ref = false;
1e8e9920 2968 int pass;
3d483a94 2969 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2970 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD);
2971 int max_vf = 0;
2972 tree lane = NULL_TREE, idx = NULL_TREE;
2973 tree ivar = NULL_TREE, lvar = NULL_TREE;
2974 gimple_seq llist[2] = { NULL, NULL };
1e8e9920 2975
1e8e9920 2976 copyin_seq = NULL;
2977
3d483a94 2978 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
2979 with data sharing clauses referencing variable sized vars. That
2980 is unnecessarily hard to support and very unlikely to result in
2981 vectorized code anyway. */
2982 if (is_simd)
2983 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2984 switch (OMP_CLAUSE_CODE (c))
2985 {
2986 case OMP_CLAUSE_REDUCTION:
3d483a94 2987 case OMP_CLAUSE_PRIVATE:
2988 case OMP_CLAUSE_FIRSTPRIVATE:
2989 case OMP_CLAUSE_LASTPRIVATE:
2990 case OMP_CLAUSE_LINEAR:
2991 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
2992 max_vf = 1;
2993 break;
2994 default:
2995 continue;
2996 }
2997
1e8e9920 2998 /* Do all the fixed sized types in the first pass, and the variable sized
2999 types in the second pass. This makes sure that the scalar arguments to
48e1416a 3000 the variable sized types are processed before we use them in the
1e8e9920 3001 variable sized operations. */
3002 for (pass = 0; pass < 2; ++pass)
3003 {
3004 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3005 {
55d6e7cd 3006 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
1e8e9920 3007 tree var, new_var;
3008 bool by_ref;
389dd41b 3009 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 3010
3011 switch (c_kind)
3012 {
3013 case OMP_CLAUSE_PRIVATE:
3014 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
3015 continue;
3016 break;
3017 case OMP_CLAUSE_SHARED:
bc7bff74 3018 /* Ignore shared directives in teams construct. */
3019 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3020 continue;
f49d7bb5 3021 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
3022 {
3023 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
3024 continue;
3025 }
1e8e9920 3026 case OMP_CLAUSE_FIRSTPRIVATE:
1e8e9920 3027 case OMP_CLAUSE_COPYIN:
bc7bff74 3028 case OMP_CLAUSE_LINEAR:
3029 break;
1e8e9920 3030 case OMP_CLAUSE_REDUCTION:
bc7bff74 3031 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
3032 reduction_omp_orig_ref = true;
1e8e9920 3033 break;
bc7bff74 3034 case OMP_CLAUSE__LOOPTEMP_:
3035 /* Handle _looptemp_ clauses only on parallel. */
3036 if (fd)
3037 continue;
3d483a94 3038 break;
df2c34fc 3039 case OMP_CLAUSE_LASTPRIVATE:
f49d7bb5 3040 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3041 {
3042 lastprivate_firstprivate = true;
3043 if (pass != 0)
3044 continue;
3045 }
df2c34fc 3046 break;
bc7bff74 3047 case OMP_CLAUSE_ALIGNED:
3048 if (pass == 0)
3049 continue;
3050 var = OMP_CLAUSE_DECL (c);
3051 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
3052 && !is_global_var (var))
3053 {
3054 new_var = maybe_lookup_decl (var, ctx);
3055 if (new_var == NULL_TREE)
3056 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
3057 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3058 x = build_call_expr_loc (clause_loc, x, 2, new_var,
3059 omp_clause_aligned_alignment (c));
3060 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3061 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3062 gimplify_and_add (x, ilist);
3063 }
3064 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
3065 && is_global_var (var))
3066 {
3067 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
3068 new_var = lookup_decl (var, ctx);
3069 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
3070 t = build_fold_addr_expr_loc (clause_loc, t);
3071 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3072 t = build_call_expr_loc (clause_loc, t2, 2, t,
3073 omp_clause_aligned_alignment (c));
3074 t = fold_convert_loc (clause_loc, ptype, t);
3075 x = create_tmp_var (ptype, NULL);
3076 t = build2 (MODIFY_EXPR, ptype, x, t);
3077 gimplify_and_add (t, ilist);
3078 t = build_simple_mem_ref_loc (clause_loc, x);
3079 SET_DECL_VALUE_EXPR (new_var, t);
3080 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3081 }
3082 continue;
1e8e9920 3083 default:
3084 continue;
3085 }
3086
3087 new_var = var = OMP_CLAUSE_DECL (c);
3088 if (c_kind != OMP_CLAUSE_COPYIN)
3089 new_var = lookup_decl (var, ctx);
3090
3091 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
3092 {
3093 if (pass != 0)
3094 continue;
3095 }
1e8e9920 3096 else if (is_variable_sized (var))
3097 {
773c5ba7 3098 /* For variable sized types, we need to allocate the
3099 actual storage here. Call alloca and store the
3100 result in the pointer decl that we created elsewhere. */
1e8e9920 3101 if (pass == 0)
3102 continue;
3103
fd6481cf 3104 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
3105 {
75a70cf9 3106 gimple stmt;
b9a16870 3107 tree tmp, atmp;
75a70cf9 3108
fd6481cf 3109 ptr = DECL_VALUE_EXPR (new_var);
3110 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
3111 ptr = TREE_OPERAND (ptr, 0);
3112 gcc_assert (DECL_P (ptr));
3113 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
75a70cf9 3114
3115 /* void *tmp = __builtin_alloca */
b9a16870 3116 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3117 stmt = gimple_build_call (atmp, 1, x);
75a70cf9 3118 tmp = create_tmp_var_raw (ptr_type_node, NULL);
3119 gimple_add_tmp_var (tmp);
3120 gimple_call_set_lhs (stmt, tmp);
3121
3122 gimple_seq_add_stmt (ilist, stmt);
3123
389dd41b 3124 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
75a70cf9 3125 gimplify_assign (ptr, x, ilist);
fd6481cf 3126 }
1e8e9920 3127 }
1e8e9920 3128 else if (is_reference (var))
3129 {
773c5ba7 3130 /* For references that are being privatized for Fortran,
3131 allocate new backing storage for the new pointer
3132 variable. This allows us to avoid changing all the
3133 code that expects a pointer to something that expects
bc7bff74 3134 a direct variable. */
1e8e9920 3135 if (pass == 0)
3136 continue;
3137
3138 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
fd6481cf 3139 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
3140 {
3141 x = build_receiver_ref (var, false, ctx);
389dd41b 3142 x = build_fold_addr_expr_loc (clause_loc, x);
fd6481cf 3143 }
3144 else if (TREE_CONSTANT (x))
1e8e9920 3145 {
3146 const char *name = NULL;
3147 if (DECL_NAME (var))
3148 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
3149
df2c34fc 3150 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
3151 name);
3152 gimple_add_tmp_var (x);
86f2ad37 3153 TREE_ADDRESSABLE (x) = 1;
389dd41b 3154 x = build_fold_addr_expr_loc (clause_loc, x);
1e8e9920 3155 }
3156 else
3157 {
b9a16870 3158 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3159 x = build_call_expr_loc (clause_loc, atmp, 1, x);
1e8e9920 3160 }
3161
389dd41b 3162 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
75a70cf9 3163 gimplify_assign (new_var, x, ilist);
1e8e9920 3164
182cf5a9 3165 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
1e8e9920 3166 }
3167 else if (c_kind == OMP_CLAUSE_REDUCTION
3168 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3169 {
3170 if (pass == 0)
3171 continue;
3172 }
3173 else if (pass != 0)
3174 continue;
3175
55d6e7cd 3176 switch (OMP_CLAUSE_CODE (c))
1e8e9920 3177 {
3178 case OMP_CLAUSE_SHARED:
bc7bff74 3179 /* Ignore shared directives in teams construct. */
3180 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3181 continue;
f49d7bb5 3182 /* Shared global vars are just accessed directly. */
3183 if (is_global_var (new_var))
3184 break;
1e8e9920 3185 /* Set up the DECL_VALUE_EXPR for shared variables now. This
3186 needs to be delayed until after fixup_child_record_type so
3187 that we get the correct type during the dereference. */
e8a588af 3188 by_ref = use_pointer_for_field (var, ctx);
1e8e9920 3189 x = build_receiver_ref (var, by_ref, ctx);
3190 SET_DECL_VALUE_EXPR (new_var, x);
3191 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3192
3193 /* ??? If VAR is not passed by reference, and the variable
3194 hasn't been initialized yet, then we'll get a warning for
3195 the store into the omp_data_s structure. Ideally, we'd be
48e1416a 3196 able to notice this and not store anything at all, but
1e8e9920 3197 we're generating code too early. Suppress the warning. */
3198 if (!by_ref)
3199 TREE_NO_WARNING (var) = 1;
3200 break;
3201
3202 case OMP_CLAUSE_LASTPRIVATE:
3203 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3204 break;
3205 /* FALLTHRU */
3206
3207 case OMP_CLAUSE_PRIVATE:
fd6481cf 3208 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
3209 x = build_outer_var_ref (var, ctx);
3210 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3211 {
3212 if (is_task_ctx (ctx))
3213 x = build_receiver_ref (var, false, ctx);
3214 else
3215 x = build_outer_var_ref (var, ctx);
3216 }
3217 else
3218 x = NULL;
3d483a94 3219 do_private:
bc7bff74 3220 tree nx;
3221 nx = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
3d483a94 3222 if (is_simd)
3223 {
3224 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
bc7bff74 3225 if ((TREE_ADDRESSABLE (new_var) || nx || y
3d483a94 3226 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
3227 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3228 idx, lane, ivar, lvar))
3229 {
bc7bff74 3230 if (nx)
3d483a94 3231 x = lang_hooks.decls.omp_clause_default_ctor
3232 (c, unshare_expr (ivar), x);
bc7bff74 3233 if (nx && x)
3d483a94 3234 gimplify_and_add (x, &llist[0]);
3235 if (y)
3236 {
3237 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
3238 if (y)
3239 {
3240 gimple_seq tseq = NULL;
3241
3242 dtor = y;
3243 gimplify_stmt (&dtor, &tseq);
3244 gimple_seq_add_seq (&llist[1], tseq);
3245 }
3246 }
3247 break;
3248 }
3249 }
bc7bff74 3250 if (nx)
3251 gimplify_and_add (nx, ilist);
1e8e9920 3252 /* FALLTHRU */
3253
3254 do_dtor:
3255 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
3256 if (x)
3257 {
75a70cf9 3258 gimple_seq tseq = NULL;
3259
1e8e9920 3260 dtor = x;
75a70cf9 3261 gimplify_stmt (&dtor, &tseq);
e3a19533 3262 gimple_seq_add_seq (dlist, tseq);
1e8e9920 3263 }
3264 break;
3265
3d483a94 3266 case OMP_CLAUSE_LINEAR:
3267 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
3268 goto do_firstprivate;
3269 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
3270 x = NULL;
3271 else
3272 x = build_outer_var_ref (var, ctx);
3273 goto do_private;
3274
1e8e9920 3275 case OMP_CLAUSE_FIRSTPRIVATE:
fd6481cf 3276 if (is_task_ctx (ctx))
3277 {
3278 if (is_reference (var) || is_variable_sized (var))
3279 goto do_dtor;
3280 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
3281 ctx))
3282 || use_pointer_for_field (var, NULL))
3283 {
3284 x = build_receiver_ref (var, false, ctx);
3285 SET_DECL_VALUE_EXPR (new_var, x);
3286 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3287 goto do_dtor;
3288 }
3289 }
3d483a94 3290 do_firstprivate:
1e8e9920 3291 x = build_outer_var_ref (var, ctx);
3d483a94 3292 if (is_simd)
3293 {
bc7bff74 3294 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3295 && gimple_omp_for_combined_into_p (ctx->stmt))
3296 {
3297 tree stept = POINTER_TYPE_P (TREE_TYPE (x))
3298 ? sizetype : TREE_TYPE (x);
3299 tree t = fold_convert (stept,
3300 OMP_CLAUSE_LINEAR_STEP (c));
3301 tree c = find_omp_clause (clauses,
3302 OMP_CLAUSE__LOOPTEMP_);
3303 gcc_assert (c);
3304 tree l = OMP_CLAUSE_DECL (c);
3305 if (fd->collapse == 1)
3306 {
3307 tree n1 = fd->loop.n1;
3308 tree step = fd->loop.step;
3309 tree itype = TREE_TYPE (l);
3310 if (POINTER_TYPE_P (itype))
3311 itype = signed_type_for (itype);
3312 l = fold_build2 (MINUS_EXPR, itype, l, n1);
3313 if (TYPE_UNSIGNED (itype)
3314 && fd->loop.cond_code == GT_EXPR)
3315 l = fold_build2 (TRUNC_DIV_EXPR, itype,
3316 fold_build1 (NEGATE_EXPR,
3317 itype, l),
3318 fold_build1 (NEGATE_EXPR,
3319 itype, step));
3320 else
3321 l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
3322 }
3323 t = fold_build2 (MULT_EXPR, stept,
3324 fold_convert (stept, l), t);
3325 if (POINTER_TYPE_P (TREE_TYPE (x)))
3326 x = fold_build2 (POINTER_PLUS_EXPR,
3327 TREE_TYPE (x), x, t);
3328 else
3329 x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
3330 }
3331
3d483a94 3332 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
3333 || TREE_ADDRESSABLE (new_var))
3334 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3335 idx, lane, ivar, lvar))
3336 {
3337 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
3338 {
3339 tree iv = create_tmp_var (TREE_TYPE (new_var), NULL);
3340 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
3341 gimplify_and_add (x, ilist);
3342 gimple_stmt_iterator gsi
3343 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3344 gimple g
3345 = gimple_build_assign (unshare_expr (lvar), iv);
3346 gsi_insert_before_without_update (&gsi, g,
3347 GSI_SAME_STMT);
3348 tree stept = POINTER_TYPE_P (TREE_TYPE (x))
3349 ? sizetype : TREE_TYPE (x);
3350 tree t = fold_convert (stept,
3351 OMP_CLAUSE_LINEAR_STEP (c));
3352 enum tree_code code = PLUS_EXPR;
3353 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
3354 code = POINTER_PLUS_EXPR;
3355 g = gimple_build_assign_with_ops (code, iv, iv, t);
3356 gsi_insert_before_without_update (&gsi, g,
3357 GSI_SAME_STMT);
3358 break;
3359 }
3360 x = lang_hooks.decls.omp_clause_copy_ctor
3361 (c, unshare_expr (ivar), x);
3362 gimplify_and_add (x, &llist[0]);
3363 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3364 if (x)
3365 {
3366 gimple_seq tseq = NULL;
3367
3368 dtor = x;
3369 gimplify_stmt (&dtor, &tseq);
3370 gimple_seq_add_seq (&llist[1], tseq);
3371 }
3372 break;
3373 }
3374 }
1e8e9920 3375 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
3376 gimplify_and_add (x, ilist);
3377 goto do_dtor;
1e8e9920 3378
bc7bff74 3379 case OMP_CLAUSE__LOOPTEMP_:
3380 gcc_assert (is_parallel_ctx (ctx));
3381 x = build_outer_var_ref (var, ctx);
3382 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3383 gimplify_and_add (x, ilist);
3384 break;
3385
1e8e9920 3386 case OMP_CLAUSE_COPYIN:
e8a588af 3387 by_ref = use_pointer_for_field (var, NULL);
1e8e9920 3388 x = build_receiver_ref (var, by_ref, ctx);
3389 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
3390 append_to_statement_list (x, &copyin_seq);
3391 copyin_by_ref |= by_ref;
3392 break;
3393
3394 case OMP_CLAUSE_REDUCTION:
3395 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3396 {
fd6481cf 3397 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
bc7bff74 3398 gimple tseq;
fd6481cf 3399 x = build_outer_var_ref (var, ctx);
3400
bc7bff74 3401 if (is_reference (var)
3402 && !useless_type_conversion_p (TREE_TYPE (placeholder),
3403 TREE_TYPE (x)))
389dd41b 3404 x = build_fold_addr_expr_loc (clause_loc, x);
fd6481cf 3405 SET_DECL_VALUE_EXPR (placeholder, x);
3406 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
bc7bff74 3407 tree new_vard = new_var;
3408 if (is_reference (var))
3409 {
3410 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3411 new_vard = TREE_OPERAND (new_var, 0);
3412 gcc_assert (DECL_P (new_vard));
3413 }
3d483a94 3414 if (is_simd
3415 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3416 idx, lane, ivar, lvar))
3417 {
bc7bff74 3418 if (new_vard == new_var)
3419 {
3420 gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
3421 SET_DECL_VALUE_EXPR (new_var, ivar);
3422 }
3423 else
3424 {
3425 SET_DECL_VALUE_EXPR (new_vard,
3426 build_fold_addr_expr (ivar));
3427 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3428 }
3429 x = lang_hooks.decls.omp_clause_default_ctor
3430 (c, unshare_expr (ivar),
3431 build_outer_var_ref (var, ctx));
3432 if (x)
3433 gimplify_and_add (x, &llist[0]);
3434 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3435 {
3436 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3437 lower_omp (&tseq, ctx);
3438 gimple_seq_add_seq (&llist[0], tseq);
3439 }
3440 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3441 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3442 lower_omp (&tseq, ctx);
3443 gimple_seq_add_seq (&llist[1], tseq);
3444 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3445 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3446 if (new_vard == new_var)
3447 SET_DECL_VALUE_EXPR (new_var, lvar);
3448 else
3449 SET_DECL_VALUE_EXPR (new_vard,
3450 build_fold_addr_expr (lvar));
3451 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3452 if (x)
3453 {
3454 tseq = NULL;
3455 dtor = x;
3456 gimplify_stmt (&dtor, &tseq);
3457 gimple_seq_add_seq (&llist[1], tseq);
3458 }
3459 break;
3460 }
3461 x = lang_hooks.decls.omp_clause_default_ctor
3462 (c, new_var, unshare_expr (x));
3463 if (x)
3464 gimplify_and_add (x, ilist);
3465 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3466 {
3467 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3468 lower_omp (&tseq, ctx);
3469 gimple_seq_add_seq (ilist, tseq);
3470 }
3471 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3472 if (is_simd)
3473 {
3474 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3475 lower_omp (&tseq, ctx);
3476 gimple_seq_add_seq (dlist, tseq);
3477 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3478 }
3479 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3480 goto do_dtor;
3481 }
3482 else
3483 {
3484 x = omp_reduction_init (c, TREE_TYPE (new_var));
3485 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
3486 if (is_simd
3487 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3488 idx, lane, ivar, lvar))
3489 {
3490 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
3491 tree ref = build_outer_var_ref (var, ctx);
3492
3493 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
3494
3495 /* reduction(-:var) sums up the partial results, so it
3496 acts identically to reduction(+:var). */
3497 if (code == MINUS_EXPR)
3498 code = PLUS_EXPR;
3499
3500 x = build2 (code, TREE_TYPE (ref), ref, ivar);
3d483a94 3501 ref = build_outer_var_ref (var, ctx);
3502 gimplify_assign (ref, x, &llist[1]);
3503 }
3504 else
3505 {
3506 gimplify_assign (new_var, x, ilist);
3507 if (is_simd)
3508 gimplify_assign (build_outer_var_ref (var, ctx),
3509 new_var, dlist);
3510 }
1e8e9920 3511 }
3512 break;
3513
3514 default:
3515 gcc_unreachable ();
3516 }
3517 }
3518 }
3519
3d483a94 3520 if (lane)
3521 {
3522 tree uid = create_tmp_var (ptr_type_node, "simduid");
8e1a382d 3523 /* Don't want uninit warnings on simduid, it is always uninitialized,
3524 but we use it not for the value, but for the DECL_UID only. */
3525 TREE_NO_WARNING (uid) = 1;
3d483a94 3526 gimple g
3527 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
3528 gimple_call_set_lhs (g, lane);
3529 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3530 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
3531 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
3532 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
3533 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3534 gimple_omp_for_set_clauses (ctx->stmt, c);
3535 g = gimple_build_assign_with_ops (INTEGER_CST, lane,
3536 build_int_cst (unsigned_type_node, 0),
3537 NULL_TREE);
3538 gimple_seq_add_stmt (ilist, g);
3539 for (int i = 0; i < 2; i++)
3540 if (llist[i])
3541 {
3542 tree vf = create_tmp_var (unsigned_type_node, NULL);
3543 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
3544 gimple_call_set_lhs (g, vf);
3545 gimple_seq *seq = i == 0 ? ilist : dlist;
3546 gimple_seq_add_stmt (seq, g);
3547 tree t = build_int_cst (unsigned_type_node, 0);
3548 g = gimple_build_assign_with_ops (INTEGER_CST, idx, t, NULL_TREE);
3549 gimple_seq_add_stmt (seq, g);
3550 tree body = create_artificial_label (UNKNOWN_LOCATION);
3551 tree header = create_artificial_label (UNKNOWN_LOCATION);
3552 tree end = create_artificial_label (UNKNOWN_LOCATION);
3553 gimple_seq_add_stmt (seq, gimple_build_goto (header));
3554 gimple_seq_add_stmt (seq, gimple_build_label (body));
3555 gimple_seq_add_seq (seq, llist[i]);
3556 t = build_int_cst (unsigned_type_node, 1);
3557 g = gimple_build_assign_with_ops (PLUS_EXPR, idx, idx, t);
3558 gimple_seq_add_stmt (seq, g);
3559 gimple_seq_add_stmt (seq, gimple_build_label (header));
3560 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
3561 gimple_seq_add_stmt (seq, g);
3562 gimple_seq_add_stmt (seq, gimple_build_label (end));
3563 }
3564 }
3565
1e8e9920 3566 /* The copyin sequence is not to be executed by the main thread, since
3567 that would result in self-copies. Perhaps not visible to scalars,
3568 but it certainly is to C++ operator=. */
3569 if (copyin_seq)
3570 {
b9a16870 3571 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
3572 0);
1e8e9920 3573 x = build2 (NE_EXPR, boolean_type_node, x,
3574 build_int_cst (TREE_TYPE (x), 0));
3575 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
3576 gimplify_and_add (x, ilist);
3577 }
3578
3579 /* If any copyin variable is passed by reference, we must ensure the
3580 master thread doesn't modify it before it is copied over in all
f49d7bb5 3581 threads. Similarly for variables in both firstprivate and
3582 lastprivate clauses we need to ensure the lastprivate copying
bc7bff74 3583 happens after firstprivate copying in all threads. And similarly
3584 for UDRs if initializer expression refers to omp_orig. */
3585 if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
3d483a94 3586 {
3587 /* Don't add any barrier for #pragma omp simd or
3588 #pragma omp distribute. */
3589 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
3590 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
bc7bff74 3591 gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
3d483a94 3592 }
3593
3594 /* If max_vf is non-zero, then we can use only a vectorization factor
3595 up to the max_vf we chose. So stick it into the safelen clause. */
3596 if (max_vf)
3597 {
3598 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3599 OMP_CLAUSE_SAFELEN);
3600 if (c == NULL_TREE
3601 || compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3602 max_vf) == 1)
3603 {
3604 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
3605 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
3606 max_vf);
3607 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3608 gimple_omp_for_set_clauses (ctx->stmt, c);
3609 }
3610 }
1e8e9920 3611}
3612
773c5ba7 3613
1e8e9920 3614/* Generate code to implement the LASTPRIVATE clauses. This is used for
3615 both parallel and workshare constructs. PREDICATE may be NULL if it's
3616 always true. */
3617
3618static void
75a70cf9 3619lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
bc7bff74 3620 omp_context *ctx)
1e8e9920 3621{
3d483a94 3622 tree x, c, label = NULL, orig_clauses = clauses;
fd6481cf 3623 bool par_clauses = false;
3d483a94 3624 tree simduid = NULL, lastlane = NULL;
1e8e9920 3625
3d483a94 3626 /* Early exit if there are no lastprivate or linear clauses. */
3627 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
3628 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
3629 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
3630 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
3631 break;
1e8e9920 3632 if (clauses == NULL)
3633 {
3634 /* If this was a workshare clause, see if it had been combined
3635 with its parallel. In that case, look for the clauses on the
3636 parallel statement itself. */
3637 if (is_parallel_ctx (ctx))
3638 return;
3639
3640 ctx = ctx->outer;
3641 if (ctx == NULL || !is_parallel_ctx (ctx))
3642 return;
3643
75a70cf9 3644 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
1e8e9920 3645 OMP_CLAUSE_LASTPRIVATE);
3646 if (clauses == NULL)
3647 return;
fd6481cf 3648 par_clauses = true;
1e8e9920 3649 }
3650
75a70cf9 3651 if (predicate)
3652 {
3653 gimple stmt;
3654 tree label_true, arm1, arm2;
3655
e60a6f7b 3656 label = create_artificial_label (UNKNOWN_LOCATION);
3657 label_true = create_artificial_label (UNKNOWN_LOCATION);
75a70cf9 3658 arm1 = TREE_OPERAND (predicate, 0);
3659 arm2 = TREE_OPERAND (predicate, 1);
3660 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
3661 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
3662 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
3663 label_true, label);
3664 gimple_seq_add_stmt (stmt_list, stmt);
3665 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
3666 }
1e8e9920 3667
3d483a94 3668 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3669 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
3670 {
3671 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
3672 if (simduid)
3673 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
3674 }
3675
fd6481cf 3676 for (c = clauses; c ;)
1e8e9920 3677 {
3678 tree var, new_var;
389dd41b 3679 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 3680
3d483a94 3681 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3682 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3683 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
fd6481cf 3684 {
3685 var = OMP_CLAUSE_DECL (c);
3686 new_var = lookup_decl (var, ctx);
1e8e9920 3687
3d483a94 3688 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
3689 {
3690 tree val = DECL_VALUE_EXPR (new_var);
3691 if (TREE_CODE (val) == ARRAY_REF
3692 && VAR_P (TREE_OPERAND (val, 0))
3693 && lookup_attribute ("omp simd array",
3694 DECL_ATTRIBUTES (TREE_OPERAND (val,
3695 0))))
3696 {
3697 if (lastlane == NULL)
3698 {
3699 lastlane = create_tmp_var (unsigned_type_node, NULL);
3700 gimple g
3701 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
3702 2, simduid,
3703 TREE_OPERAND (val, 1));
3704 gimple_call_set_lhs (g, lastlane);
3705 gimple_seq_add_stmt (stmt_list, g);
3706 }
3707 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
3708 TREE_OPERAND (val, 0), lastlane,
3709 NULL_TREE, NULL_TREE);
3710 }
3711 }
3712
3713 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3714 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
75a70cf9 3715 {
e3a19533 3716 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
75a70cf9 3717 gimple_seq_add_seq (stmt_list,
3718 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
3d483a94 3719 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
75a70cf9 3720 }
1e8e9920 3721
fd6481cf 3722 x = build_outer_var_ref (var, ctx);
3723 if (is_reference (var))
182cf5a9 3724 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
fd6481cf 3725 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
75a70cf9 3726 gimplify_and_add (x, stmt_list);
fd6481cf 3727 }
3728 c = OMP_CLAUSE_CHAIN (c);
3729 if (c == NULL && !par_clauses)
3730 {
3731 /* If this was a workshare clause, see if it had been combined
3732 with its parallel. In that case, continue looking for the
3733 clauses also on the parallel statement itself. */
3734 if (is_parallel_ctx (ctx))
3735 break;
3736
3737 ctx = ctx->outer;
3738 if (ctx == NULL || !is_parallel_ctx (ctx))
3739 break;
3740
75a70cf9 3741 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
fd6481cf 3742 OMP_CLAUSE_LASTPRIVATE);
3743 par_clauses = true;
3744 }
1e8e9920 3745 }
3746
75a70cf9 3747 if (label)
3748 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
1e8e9920 3749}
3750
773c5ba7 3751
1e8e9920 3752/* Generate code to implement the REDUCTION clauses. */
3753
3754static void
75a70cf9 3755lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
1e8e9920 3756{
75a70cf9 3757 gimple_seq sub_seq = NULL;
3758 gimple stmt;
3759 tree x, c;
1e8e9920 3760 int count = 0;
3761
3d483a94 3762 /* SIMD reductions are handled in lower_rec_input_clauses. */
3763 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3764 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
3765 return;
3766
1e8e9920 3767 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
3768 update in that case, otherwise use a lock. */
3769 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
55d6e7cd 3770 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
1e8e9920 3771 {
3772 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3773 {
bc7bff74 3774 /* Never use OMP_ATOMIC for array reductions or UDRs. */
1e8e9920 3775 count = -1;
3776 break;
3777 }
3778 count++;
3779 }
3780
3781 if (count == 0)
3782 return;
3783
3784 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3785 {
3786 tree var, ref, new_var;
3787 enum tree_code code;
389dd41b 3788 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 3789
55d6e7cd 3790 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
1e8e9920 3791 continue;
3792
3793 var = OMP_CLAUSE_DECL (c);
3794 new_var = lookup_decl (var, ctx);
3795 if (is_reference (var))
182cf5a9 3796 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
1e8e9920 3797 ref = build_outer_var_ref (var, ctx);
3798 code = OMP_CLAUSE_REDUCTION_CODE (c);
773c5ba7 3799
3800 /* reduction(-:var) sums up the partial results, so it acts
3801 identically to reduction(+:var). */
1e8e9920 3802 if (code == MINUS_EXPR)
3803 code = PLUS_EXPR;
3804
3805 if (count == 1)
3806 {
389dd41b 3807 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
1e8e9920 3808
3809 addr = save_expr (addr);
3810 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
389dd41b 3811 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
1e8e9920 3812 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
75a70cf9 3813 gimplify_and_add (x, stmt_seqp);
1e8e9920 3814 return;
3815 }
3816
3817 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3818 {
3819 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3820
bc7bff74 3821 if (is_reference (var)
3822 && !useless_type_conversion_p (TREE_TYPE (placeholder),
3823 TREE_TYPE (ref)))
389dd41b 3824 ref = build_fold_addr_expr_loc (clause_loc, ref);
1e8e9920 3825 SET_DECL_VALUE_EXPR (placeholder, ref);
3826 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
e3a19533 3827 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
75a70cf9 3828 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
3829 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
1e8e9920 3830 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
3831 }
3832 else
3833 {
3834 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3835 ref = build_outer_var_ref (var, ctx);
75a70cf9 3836 gimplify_assign (ref, x, &sub_seq);
1e8e9920 3837 }
3838 }
3839
b9a16870 3840 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
3841 0);
75a70cf9 3842 gimple_seq_add_stmt (stmt_seqp, stmt);
1e8e9920 3843
75a70cf9 3844 gimple_seq_add_seq (stmt_seqp, sub_seq);
1e8e9920 3845
b9a16870 3846 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
3847 0);
75a70cf9 3848 gimple_seq_add_stmt (stmt_seqp, stmt);
1e8e9920 3849}
3850
773c5ba7 3851
1e8e9920 3852/* Generate code to implement the COPYPRIVATE clauses. */
3853
3854static void
75a70cf9 3855lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
1e8e9920 3856 omp_context *ctx)
3857{
3858 tree c;
3859
3860 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3861 {
cb561506 3862 tree var, new_var, ref, x;
1e8e9920 3863 bool by_ref;
389dd41b 3864 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 3865
55d6e7cd 3866 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
1e8e9920 3867 continue;
3868
3869 var = OMP_CLAUSE_DECL (c);
e8a588af 3870 by_ref = use_pointer_for_field (var, NULL);
1e8e9920 3871
3872 ref = build_sender_ref (var, ctx);
cb561506 3873 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
3874 if (by_ref)
3875 {
3876 x = build_fold_addr_expr_loc (clause_loc, new_var);
3877 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
3878 }
75a70cf9 3879 gimplify_assign (ref, x, slist);
1e8e9920 3880
cb561506 3881 ref = build_receiver_ref (var, false, ctx);
3882 if (by_ref)
3883 {
3884 ref = fold_convert_loc (clause_loc,
3885 build_pointer_type (TREE_TYPE (new_var)),
3886 ref);
3887 ref = build_fold_indirect_ref_loc (clause_loc, ref);
3888 }
1e8e9920 3889 if (is_reference (var))
3890 {
cb561506 3891 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
182cf5a9 3892 ref = build_simple_mem_ref_loc (clause_loc, ref);
3893 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
1e8e9920 3894 }
cb561506 3895 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
1e8e9920 3896 gimplify_and_add (x, rlist);
3897 }
3898}
3899
773c5ba7 3900
1e8e9920 3901/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
3902 and REDUCTION from the sender (aka parent) side. */
3903
3904static void
75a70cf9 3905lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
3906 omp_context *ctx)
1e8e9920 3907{
3908 tree c;
3909
3910 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3911 {
773c5ba7 3912 tree val, ref, x, var;
1e8e9920 3913 bool by_ref, do_in = false, do_out = false;
389dd41b 3914 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 3915
55d6e7cd 3916 switch (OMP_CLAUSE_CODE (c))
1e8e9920 3917 {
fd6481cf 3918 case OMP_CLAUSE_PRIVATE:
3919 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3920 break;
3921 continue;
1e8e9920 3922 case OMP_CLAUSE_FIRSTPRIVATE:
3923 case OMP_CLAUSE_COPYIN:
3924 case OMP_CLAUSE_LASTPRIVATE:
3925 case OMP_CLAUSE_REDUCTION:
bc7bff74 3926 case OMP_CLAUSE__LOOPTEMP_:
1e8e9920 3927 break;
3928 default:
3929 continue;
3930 }
3931
87b31375 3932 val = OMP_CLAUSE_DECL (c);
3933 var = lookup_decl_in_outer_ctx (val, ctx);
773c5ba7 3934
f49d7bb5 3935 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
3936 && is_global_var (var))
3937 continue;
1e8e9920 3938 if (is_variable_sized (val))
3939 continue;
e8a588af 3940 by_ref = use_pointer_for_field (val, NULL);
1e8e9920 3941
55d6e7cd 3942 switch (OMP_CLAUSE_CODE (c))
1e8e9920 3943 {
fd6481cf 3944 case OMP_CLAUSE_PRIVATE:
1e8e9920 3945 case OMP_CLAUSE_FIRSTPRIVATE:
3946 case OMP_CLAUSE_COPYIN:
bc7bff74 3947 case OMP_CLAUSE__LOOPTEMP_:
1e8e9920 3948 do_in = true;
3949 break;
3950
3951 case OMP_CLAUSE_LASTPRIVATE:
3952 if (by_ref || is_reference (val))
3953 {
3954 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3955 continue;
3956 do_in = true;
3957 }
3958 else
fd6481cf 3959 {
3960 do_out = true;
3961 if (lang_hooks.decls.omp_private_outer_ref (val))
3962 do_in = true;
3963 }
1e8e9920 3964 break;
3965
3966 case OMP_CLAUSE_REDUCTION:
3967 do_in = true;
3968 do_out = !(by_ref || is_reference (val));
3969 break;
3970
3971 default:
3972 gcc_unreachable ();
3973 }
3974
3975 if (do_in)
3976 {
3977 ref = build_sender_ref (val, ctx);
389dd41b 3978 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
75a70cf9 3979 gimplify_assign (ref, x, ilist);
fd6481cf 3980 if (is_task_ctx (ctx))
3981 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
1e8e9920 3982 }
773c5ba7 3983
1e8e9920 3984 if (do_out)
3985 {
3986 ref = build_sender_ref (val, ctx);
75a70cf9 3987 gimplify_assign (var, ref, olist);
1e8e9920 3988 }
3989 }
3990}
3991
75a70cf9 3992/* Generate code to implement SHARED from the sender (aka parent)
3993 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
3994 list things that got automatically shared. */
1e8e9920 3995
3996static void
75a70cf9 3997lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
1e8e9920 3998{
fd6481cf 3999 tree var, ovar, nvar, f, x, record_type;
1e8e9920 4000
4001 if (ctx->record_type == NULL)
4002 return;
773c5ba7 4003
fd6481cf 4004 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
1767a056 4005 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
1e8e9920 4006 {
4007 ovar = DECL_ABSTRACT_ORIGIN (f);
4008 nvar = maybe_lookup_decl (ovar, ctx);
4009 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
4010 continue;
4011
773c5ba7 4012 /* If CTX is a nested parallel directive. Find the immediately
4013 enclosing parallel or workshare construct that contains a
4014 mapping for OVAR. */
87b31375 4015 var = lookup_decl_in_outer_ctx (ovar, ctx);
773c5ba7 4016
e8a588af 4017 if (use_pointer_for_field (ovar, ctx))
1e8e9920 4018 {
4019 x = build_sender_ref (ovar, ctx);
773c5ba7 4020 var = build_fold_addr_expr (var);
75a70cf9 4021 gimplify_assign (x, var, ilist);
1e8e9920 4022 }
4023 else
4024 {
4025 x = build_sender_ref (ovar, ctx);
75a70cf9 4026 gimplify_assign (x, var, ilist);
1e8e9920 4027
d2263ebb 4028 if (!TREE_READONLY (var)
4029 /* We don't need to receive a new reference to a result
4030 or parm decl. In fact we may not store to it as we will
4031 invalidate any pending RSO and generate wrong gimple
4032 during inlining. */
4033 && !((TREE_CODE (var) == RESULT_DECL
4034 || TREE_CODE (var) == PARM_DECL)
4035 && DECL_BY_REFERENCE (var)))
fd6481cf 4036 {
4037 x = build_sender_ref (ovar, ctx);
75a70cf9 4038 gimplify_assign (var, x, olist);
fd6481cf 4039 }
1e8e9920 4040 }
4041 }
4042}
4043
75a70cf9 4044
4045/* A convenience function to build an empty GIMPLE_COND with just the
4046 condition. */
4047
4048static gimple
4049gimple_build_cond_empty (tree cond)
4050{
4051 enum tree_code pred_code;
4052 tree lhs, rhs;
4053
4054 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
4055 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
4056}
4057
4058
48e1416a 4059/* Build the function calls to GOMP_parallel_start etc to actually
773c5ba7 4060 generate the parallel operation. REGION is the parallel region
4061 being expanded. BB is the block where to insert the code. WS_ARGS
4062 will be set if this is a call to a combined parallel+workshare
4063 construct, it contains the list of additional arguments needed by
4064 the workshare construct. */
1e8e9920 4065
4066static void
61e47ac8 4067expand_parallel_call (struct omp_region *region, basic_block bb,
f1f41a6c 4068 gimple entry_stmt, vec<tree, va_gc> *ws_args)
1e8e9920 4069{
bc7bff74 4070 tree t, t1, t2, val, cond, c, clauses, flags;
75a70cf9 4071 gimple_stmt_iterator gsi;
4072 gimple stmt;
b9a16870 4073 enum built_in_function start_ix;
4074 int start_ix2;
389dd41b 4075 location_t clause_loc;
f1f41a6c 4076 vec<tree, va_gc> *args;
773c5ba7 4077
75a70cf9 4078 clauses = gimple_omp_parallel_clauses (entry_stmt);
773c5ba7 4079
bc7bff74 4080 /* Determine what flavor of GOMP_parallel we will be
773c5ba7 4081 emitting. */
bc7bff74 4082 start_ix = BUILT_IN_GOMP_PARALLEL;
773c5ba7 4083 if (is_combined_parallel (region))
4084 {
61e47ac8 4085 switch (region->inner->type)
773c5ba7 4086 {
75a70cf9 4087 case GIMPLE_OMP_FOR:
fd6481cf 4088 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
bc7bff74 4089 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
b9a16870 4090 + (region->inner->sched_kind
4091 == OMP_CLAUSE_SCHEDULE_RUNTIME
4092 ? 3 : region->inner->sched_kind));
4093 start_ix = (enum built_in_function)start_ix2;
61e47ac8 4094 break;
75a70cf9 4095 case GIMPLE_OMP_SECTIONS:
bc7bff74 4096 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
61e47ac8 4097 break;
4098 default:
4099 gcc_unreachable ();
773c5ba7 4100 }
773c5ba7 4101 }
1e8e9920 4102
4103 /* By default, the value of NUM_THREADS is zero (selected at run time)
4104 and there is no conditional. */
4105 cond = NULL_TREE;
4106 val = build_int_cst (unsigned_type_node, 0);
bc7bff74 4107 flags = build_int_cst (unsigned_type_node, 0);
1e8e9920 4108
4109 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4110 if (c)
4111 cond = OMP_CLAUSE_IF_EXPR (c);
4112
4113 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
4114 if (c)
389dd41b 4115 {
4116 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
4117 clause_loc = OMP_CLAUSE_LOCATION (c);
4118 }
4119 else
4120 clause_loc = gimple_location (entry_stmt);
1e8e9920 4121
bc7bff74 4122 c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
4123 if (c)
4124 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
4125
1e8e9920 4126 /* Ensure 'val' is of the correct type. */
389dd41b 4127 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
1e8e9920 4128
4129 /* If we found the clause 'if (cond)', build either
4130 (cond != 0) or (cond ? val : 1u). */
4131 if (cond)
4132 {
75a70cf9 4133 gimple_stmt_iterator gsi;
773c5ba7 4134
4135 cond = gimple_boolify (cond);
4136
1e8e9920 4137 if (integer_zerop (val))
389dd41b 4138 val = fold_build2_loc (clause_loc,
4139 EQ_EXPR, unsigned_type_node, cond,
79acaae1 4140 build_int_cst (TREE_TYPE (cond), 0));
1e8e9920 4141 else
773c5ba7 4142 {
4143 basic_block cond_bb, then_bb, else_bb;
79acaae1 4144 edge e, e_then, e_else;
75a70cf9 4145 tree tmp_then, tmp_else, tmp_join, tmp_var;
79acaae1 4146
4147 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
4148 if (gimple_in_ssa_p (cfun))
4149 {
75a70cf9 4150 tmp_then = make_ssa_name (tmp_var, NULL);
4151 tmp_else = make_ssa_name (tmp_var, NULL);
4152 tmp_join = make_ssa_name (tmp_var, NULL);
79acaae1 4153 }
4154 else
4155 {
4156 tmp_then = tmp_var;
4157 tmp_else = tmp_var;
4158 tmp_join = tmp_var;
4159 }
773c5ba7 4160
773c5ba7 4161 e = split_block (bb, NULL);
4162 cond_bb = e->src;
4163 bb = e->dest;
4164 remove_edge (e);
4165
4166 then_bb = create_empty_bb (cond_bb);
4167 else_bb = create_empty_bb (then_bb);
79acaae1 4168 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
4169 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
773c5ba7 4170
75a70cf9 4171 stmt = gimple_build_cond_empty (cond);
4172 gsi = gsi_start_bb (cond_bb);
4173 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 4174
75a70cf9 4175 gsi = gsi_start_bb (then_bb);
4176 stmt = gimple_build_assign (tmp_then, val);
4177 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 4178
75a70cf9 4179 gsi = gsi_start_bb (else_bb);
4180 stmt = gimple_build_assign
4181 (tmp_else, build_int_cst (unsigned_type_node, 1));
4182 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 4183
4184 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
4185 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
f6568ea4 4186 if (current_loops)
4187 {
4188 add_bb_to_loop (then_bb, cond_bb->loop_father);
4189 add_bb_to_loop (else_bb, cond_bb->loop_father);
4190 }
79acaae1 4191 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
4192 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
773c5ba7 4193
79acaae1 4194 if (gimple_in_ssa_p (cfun))
4195 {
75a70cf9 4196 gimple phi = create_phi_node (tmp_join, bb);
60d535d2 4197 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
4198 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
79acaae1 4199 }
4200
4201 val = tmp_join;
773c5ba7 4202 }
4203
75a70cf9 4204 gsi = gsi_start_bb (bb);
4205 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
4206 false, GSI_CONTINUE_LINKING);
1e8e9920 4207 }
4208
75a70cf9 4209 gsi = gsi_last_bb (bb);
4210 t = gimple_omp_parallel_data_arg (entry_stmt);
1e8e9920 4211 if (t == NULL)
c2f47e15 4212 t1 = null_pointer_node;
1e8e9920 4213 else
c2f47e15 4214 t1 = build_fold_addr_expr (t);
75a70cf9 4215 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
773c5ba7 4216
bc7bff74 4217 vec_alloc (args, 4 + vec_safe_length (ws_args));
f1f41a6c 4218 args->quick_push (t2);
4219 args->quick_push (t1);
4220 args->quick_push (val);
4221 if (ws_args)
4222 args->splice (*ws_args);
bc7bff74 4223 args->quick_push (flags);
414c3a2c 4224
4225 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
b9a16870 4226 builtin_decl_explicit (start_ix), args);
773c5ba7 4227
75a70cf9 4228 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4229 false, GSI_CONTINUE_LINKING);
1e8e9920 4230}
4231
773c5ba7 4232
fd6481cf 4233/* Build the function call to GOMP_task to actually
4234 generate the task operation. BB is the block where to insert the code. */
4235
4236static void
75a70cf9 4237expand_task_call (basic_block bb, gimple entry_stmt)
fd6481cf 4238{
bc7bff74 4239 tree t, t1, t2, t3, flags, cond, c, c2, clauses, depend;
75a70cf9 4240 gimple_stmt_iterator gsi;
389dd41b 4241 location_t loc = gimple_location (entry_stmt);
fd6481cf 4242
75a70cf9 4243 clauses = gimple_omp_task_clauses (entry_stmt);
fd6481cf 4244
fd6481cf 4245 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4246 if (c)
4247 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
4248 else
4249 cond = boolean_true_node;
4250
4251 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
2169f33b 4252 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
bc7bff74 4253 depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
2169f33b 4254 flags = build_int_cst (unsigned_type_node,
bc7bff74 4255 (c ? 1 : 0) + (c2 ? 4 : 0) + (depend ? 8 : 0));
2169f33b 4256
4257 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
4258 if (c)
4259 {
4260 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
4261 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
4262 build_int_cst (unsigned_type_node, 2),
4263 build_int_cst (unsigned_type_node, 0));
4264 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
4265 }
bc7bff74 4266 if (depend)
4267 depend = OMP_CLAUSE_DECL (depend);
4268 else
4269 depend = build_int_cst (ptr_type_node, 0);
fd6481cf 4270
75a70cf9 4271 gsi = gsi_last_bb (bb);
4272 t = gimple_omp_task_data_arg (entry_stmt);
fd6481cf 4273 if (t == NULL)
4274 t2 = null_pointer_node;
4275 else
389dd41b 4276 t2 = build_fold_addr_expr_loc (loc, t);
4277 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
75a70cf9 4278 t = gimple_omp_task_copy_fn (entry_stmt);
fd6481cf 4279 if (t == NULL)
4280 t3 = null_pointer_node;
4281 else
389dd41b 4282 t3 = build_fold_addr_expr_loc (loc, t);
fd6481cf 4283
b9a16870 4284 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
bc7bff74 4285 8, t1, t2, t3,
75a70cf9 4286 gimple_omp_task_arg_size (entry_stmt),
bc7bff74 4287 gimple_omp_task_arg_align (entry_stmt), cond, flags,
4288 depend);
fd6481cf 4289
75a70cf9 4290 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4291 false, GSI_CONTINUE_LINKING);
fd6481cf 4292}
4293
4294
75a70cf9 4295/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
4296 catch handler and return it. This prevents programs from violating the
4297 structured block semantics with throws. */
1e8e9920 4298
75a70cf9 4299static gimple_seq
4300maybe_catch_exception (gimple_seq body)
1e8e9920 4301{
e38def9c 4302 gimple g;
4303 tree decl;
1e8e9920 4304
4305 if (!flag_exceptions)
75a70cf9 4306 return body;
1e8e9920 4307
596981c8 4308 if (lang_hooks.eh_protect_cleanup_actions != NULL)
4309 decl = lang_hooks.eh_protect_cleanup_actions ();
1e8e9920 4310 else
b9a16870 4311 decl = builtin_decl_explicit (BUILT_IN_TRAP);
75a70cf9 4312
e38def9c 4313 g = gimple_build_eh_must_not_throw (decl);
4314 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
75a70cf9 4315 GIMPLE_TRY_CATCH);
1e8e9920 4316
e38def9c 4317 return gimple_seq_alloc_with_stmt (g);
1e8e9920 4318}
4319
773c5ba7 4320/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
1e8e9920 4321
773c5ba7 4322static tree
f1f41a6c 4323vec2chain (vec<tree, va_gc> *v)
1e8e9920 4324{
2ab2ce89 4325 tree chain = NULL_TREE, t;
4326 unsigned ix;
1e8e9920 4327
f1f41a6c 4328 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
773c5ba7 4329 {
1767a056 4330 DECL_CHAIN (t) = chain;
2ab2ce89 4331 chain = t;
773c5ba7 4332 }
1e8e9920 4333
2ab2ce89 4334 return chain;
773c5ba7 4335}
1e8e9920 4336
1e8e9920 4337
773c5ba7 4338/* Remove barriers in REGION->EXIT's block. Note that this is only
75a70cf9 4339 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
4340 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
4341 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
773c5ba7 4342 removed. */
1e8e9920 4343
773c5ba7 4344static void
4345remove_exit_barrier (struct omp_region *region)
4346{
75a70cf9 4347 gimple_stmt_iterator gsi;
773c5ba7 4348 basic_block exit_bb;
61e47ac8 4349 edge_iterator ei;
4350 edge e;
75a70cf9 4351 gimple stmt;
4a04f4b4 4352 int any_addressable_vars = -1;
1e8e9920 4353
61e47ac8 4354 exit_bb = region->exit;
1e8e9920 4355
5056ba1a 4356 /* If the parallel region doesn't return, we don't have REGION->EXIT
4357 block at all. */
4358 if (! exit_bb)
4359 return;
4360
75a70cf9 4361 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
4362 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
61e47ac8 4363 statements that can appear in between are extremely limited -- no
4364 memory operations at all. Here, we allow nothing at all, so the
75a70cf9 4365 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
4366 gsi = gsi_last_bb (exit_bb);
4367 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4368 gsi_prev (&gsi);
4369 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
773c5ba7 4370 return;
1e8e9920 4371
61e47ac8 4372 FOR_EACH_EDGE (e, ei, exit_bb->preds)
4373 {
75a70cf9 4374 gsi = gsi_last_bb (e->src);
4375 if (gsi_end_p (gsi))
61e47ac8 4376 continue;
75a70cf9 4377 stmt = gsi_stmt (gsi);
4a04f4b4 4378 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
4379 && !gimple_omp_return_nowait_p (stmt))
4380 {
4381 /* OpenMP 3.0 tasks unfortunately prevent this optimization
4382 in many cases. If there could be tasks queued, the barrier
4383 might be needed to let the tasks run before some local
4384 variable of the parallel that the task uses as shared
4385 runs out of scope. The task can be spawned either
4386 from within current function (this would be easy to check)
4387 or from some function it calls and gets passed an address
4388 of such a variable. */
4389 if (any_addressable_vars < 0)
4390 {
4391 gimple parallel_stmt = last_stmt (region->entry);
4392 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
2ab2ce89 4393 tree local_decls, block, decl;
4394 unsigned ix;
4a04f4b4 4395
4396 any_addressable_vars = 0;
2ab2ce89 4397 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
4398 if (TREE_ADDRESSABLE (decl))
4a04f4b4 4399 {
4400 any_addressable_vars = 1;
4401 break;
4402 }
4403 for (block = gimple_block (stmt);
4404 !any_addressable_vars
4405 && block
4406 && TREE_CODE (block) == BLOCK;
4407 block = BLOCK_SUPERCONTEXT (block))
4408 {
4409 for (local_decls = BLOCK_VARS (block);
4410 local_decls;
1767a056 4411 local_decls = DECL_CHAIN (local_decls))
4a04f4b4 4412 if (TREE_ADDRESSABLE (local_decls))
4413 {
4414 any_addressable_vars = 1;
4415 break;
4416 }
4417 if (block == gimple_block (parallel_stmt))
4418 break;
4419 }
4420 }
4421 if (!any_addressable_vars)
4422 gimple_omp_return_set_nowait (stmt);
4423 }
61e47ac8 4424 }
1e8e9920 4425}
4426
61e47ac8 4427static void
4428remove_exit_barriers (struct omp_region *region)
4429{
75a70cf9 4430 if (region->type == GIMPLE_OMP_PARALLEL)
61e47ac8 4431 remove_exit_barrier (region);
4432
4433 if (region->inner)
4434 {
4435 region = region->inner;
4436 remove_exit_barriers (region);
4437 while (region->next)
4438 {
4439 region = region->next;
4440 remove_exit_barriers (region);
4441 }
4442 }
4443}
773c5ba7 4444
658b4427 4445/* Optimize omp_get_thread_num () and omp_get_num_threads ()
4446 calls. These can't be declared as const functions, but
4447 within one parallel body they are constant, so they can be
4448 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
fd6481cf 4449 which are declared const. Similarly for task body, except
4450 that in untied task omp_get_thread_num () can change at any task
4451 scheduling point. */
658b4427 4452
4453static void
75a70cf9 4454optimize_omp_library_calls (gimple entry_stmt)
658b4427 4455{
4456 basic_block bb;
75a70cf9 4457 gimple_stmt_iterator gsi;
b9a16870 4458 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4459 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
4460 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4461 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
75a70cf9 4462 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
4463 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
fd6481cf 4464 OMP_CLAUSE_UNTIED) != NULL);
658b4427 4465
4466 FOR_EACH_BB (bb)
75a70cf9 4467 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
658b4427 4468 {
75a70cf9 4469 gimple call = gsi_stmt (gsi);
658b4427 4470 tree decl;
4471
75a70cf9 4472 if (is_gimple_call (call)
4473 && (decl = gimple_call_fndecl (call))
658b4427 4474 && DECL_EXTERNAL (decl)
4475 && TREE_PUBLIC (decl)
4476 && DECL_INITIAL (decl) == NULL)
4477 {
4478 tree built_in;
4479
4480 if (DECL_NAME (decl) == thr_num_id)
fd6481cf 4481 {
4482 /* In #pragma omp task untied omp_get_thread_num () can change
4483 during the execution of the task region. */
4484 if (untied_task)
4485 continue;
b9a16870 4486 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
fd6481cf 4487 }
658b4427 4488 else if (DECL_NAME (decl) == num_thr_id)
b9a16870 4489 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
658b4427 4490 else
4491 continue;
4492
4493 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
75a70cf9 4494 || gimple_call_num_args (call) != 0)
658b4427 4495 continue;
4496
4497 if (flag_exceptions && !TREE_NOTHROW (decl))
4498 continue;
4499
4500 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
1ea6a73c 4501 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
4502 TREE_TYPE (TREE_TYPE (built_in))))
658b4427 4503 continue;
4504
0acacf9e 4505 gimple_call_set_fndecl (call, built_in);
658b4427 4506 }
4507 }
4508}
4509
8e6b4515 4510/* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
4511 regimplified. */
4512
4513static tree
4514expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
4515{
4516 tree t = *tp;
4517
4518 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
4519 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
4520 return t;
4521
4522 if (TREE_CODE (t) == ADDR_EXPR)
4523 recompute_tree_invariant_for_addr_expr (t);
4524
4525 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
4526 return NULL_TREE;
4527}
4528
3d483a94 4529/* Prepend TO = FROM assignment before *GSI_P. */
4530
4531static void
4532expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
4533{
4534 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
4535 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
4536 true, GSI_SAME_STMT);
4537 gimple stmt = gimple_build_assign (to, from);
4538 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
4539 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
4540 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
4541 {
4542 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4543 gimple_regimplify_operands (stmt, &gsi);
4544 }
4545}
4546
fd6481cf 4547/* Expand the OpenMP parallel or task directive starting at REGION. */
1e8e9920 4548
4549static void
fd6481cf 4550expand_omp_taskreg (struct omp_region *region)
1e8e9920 4551{
773c5ba7 4552 basic_block entry_bb, exit_bb, new_bb;
87d4aa85 4553 struct function *child_cfun;
414c3a2c 4554 tree child_fn, block, t;
75a70cf9 4555 gimple_stmt_iterator gsi;
4556 gimple entry_stmt, stmt;
773c5ba7 4557 edge e;
f1f41a6c 4558 vec<tree, va_gc> *ws_args;
773c5ba7 4559
61e47ac8 4560 entry_stmt = last_stmt (region->entry);
75a70cf9 4561 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
773c5ba7 4562 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
773c5ba7 4563
61e47ac8 4564 entry_bb = region->entry;
4565 exit_bb = region->exit;
773c5ba7 4566
773c5ba7 4567 if (is_combined_parallel (region))
61e47ac8 4568 ws_args = region->ws_args;
773c5ba7 4569 else
414c3a2c 4570 ws_args = NULL;
1e8e9920 4571
61e47ac8 4572 if (child_cfun->cfg)
1e8e9920 4573 {
773c5ba7 4574 /* Due to inlining, it may happen that we have already outlined
4575 the region, in which case all we need to do is make the
4576 sub-graph unreachable and emit the parallel call. */
4577 edge entry_succ_e, exit_succ_e;
75a70cf9 4578 gimple_stmt_iterator gsi;
773c5ba7 4579
4580 entry_succ_e = single_succ_edge (entry_bb);
773c5ba7 4581
75a70cf9 4582 gsi = gsi_last_bb (entry_bb);
4583 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
4584 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
4585 gsi_remove (&gsi, true);
773c5ba7 4586
4587 new_bb = entry_bb;
03ed154b 4588 if (exit_bb)
4589 {
4590 exit_succ_e = single_succ_edge (exit_bb);
4591 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
4592 }
79acaae1 4593 remove_edge_and_dominated_blocks (entry_succ_e);
1e8e9920 4594 }
773c5ba7 4595 else
4596 {
501bdd19 4597 unsigned srcidx, dstidx, num;
2ab2ce89 4598
773c5ba7 4599 /* If the parallel region needs data sent from the parent
3480139d 4600 function, then the very first statement (except possible
4601 tree profile counter updates) of the parallel body
773c5ba7 4602 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
4603 &.OMP_DATA_O is passed as an argument to the child function,
4604 we need to replace it with the argument as seen by the child
4605 function.
4606
4607 In most cases, this will end up being the identity assignment
4608 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
4609 a function call that has been inlined, the original PARM_DECL
4610 .OMP_DATA_I may have been converted into a different local
4611 variable. In which case, we need to keep the assignment. */
75a70cf9 4612 if (gimple_omp_taskreg_data_arg (entry_stmt))
773c5ba7 4613 {
4614 basic_block entry_succ_bb = single_succ (entry_bb);
75a70cf9 4615 gimple_stmt_iterator gsi;
4616 tree arg, narg;
4617 gimple parcopy_stmt = NULL;
1e8e9920 4618
75a70cf9 4619 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3480139d 4620 {
75a70cf9 4621 gimple stmt;
3480139d 4622
75a70cf9 4623 gcc_assert (!gsi_end_p (gsi));
4624 stmt = gsi_stmt (gsi);
4625 if (gimple_code (stmt) != GIMPLE_ASSIGN)
cc6b725b 4626 continue;
4627
75a70cf9 4628 if (gimple_num_ops (stmt) == 2)
3480139d 4629 {
75a70cf9 4630 tree arg = gimple_assign_rhs1 (stmt);
4631
4632 /* We're ignore the subcode because we're
4633 effectively doing a STRIP_NOPS. */
4634
4635 if (TREE_CODE (arg) == ADDR_EXPR
4636 && TREE_OPERAND (arg, 0)
4637 == gimple_omp_taskreg_data_arg (entry_stmt))
4638 {
4639 parcopy_stmt = stmt;
4640 break;
4641 }
3480139d 4642 }
4643 }
79acaae1 4644
75a70cf9 4645 gcc_assert (parcopy_stmt != NULL);
79acaae1 4646 arg = DECL_ARGUMENTS (child_fn);
4647
4648 if (!gimple_in_ssa_p (cfun))
4649 {
75a70cf9 4650 if (gimple_assign_lhs (parcopy_stmt) == arg)
4651 gsi_remove (&gsi, true);
79acaae1 4652 else
75a70cf9 4653 {
4654 /* ?? Is setting the subcode really necessary ?? */
4655 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
4656 gimple_assign_set_rhs1 (parcopy_stmt, arg);
4657 }
79acaae1 4658 }
4659 else
4660 {
4661 /* If we are in ssa form, we must load the value from the default
4662 definition of the argument. That should not be defined now,
4663 since the argument is not used uninitialized. */
c6dfe037 4664 gcc_assert (ssa_default_def (cfun, arg) == NULL);
75a70cf9 4665 narg = make_ssa_name (arg, gimple_build_nop ());
c6dfe037 4666 set_ssa_default_def (cfun, arg, narg);
75a70cf9 4667 /* ?? Is setting the subcode really necessary ?? */
4668 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
4669 gimple_assign_set_rhs1 (parcopy_stmt, narg);
79acaae1 4670 update_stmt (parcopy_stmt);
4671 }
773c5ba7 4672 }
4673
4674 /* Declare local variables needed in CHILD_CFUN. */
4675 block = DECL_INITIAL (child_fn);
2ab2ce89 4676 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
e1a7ccb9 4677 /* The gimplifier could record temporaries in parallel/task block
4678 rather than in containing function's local_decls chain,
4679 which would mean cgraph missed finalizing them. Do it now. */
1767a056 4680 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
e1a7ccb9 4681 if (TREE_CODE (t) == VAR_DECL
4682 && TREE_STATIC (t)
4683 && !DECL_EXTERNAL (t))
4684 varpool_finalize_decl (t);
75a70cf9 4685 DECL_SAVED_TREE (child_fn) = NULL;
e3a19533 4686 /* We'll create a CFG for child_fn, so no gimple body is needed. */
4687 gimple_set_body (child_fn, NULL);
1d22f541 4688 TREE_USED (block) = 1;
773c5ba7 4689
79acaae1 4690 /* Reset DECL_CONTEXT on function arguments. */
1767a056 4691 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
773c5ba7 4692 DECL_CONTEXT (t) = child_fn;
4693
75a70cf9 4694 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
4695 so that it can be moved to the child function. */
4696 gsi = gsi_last_bb (entry_bb);
4697 stmt = gsi_stmt (gsi);
4698 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
4699 || gimple_code (stmt) == GIMPLE_OMP_TASK));
4700 gsi_remove (&gsi, true);
4701 e = split_block (entry_bb, stmt);
773c5ba7 4702 entry_bb = e->dest;
4703 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4704
75a70cf9 4705 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
5056ba1a 4706 if (exit_bb)
4707 {
75a70cf9 4708 gsi = gsi_last_bb (exit_bb);
4709 gcc_assert (!gsi_end_p (gsi)
4710 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4711 stmt = gimple_build_return (NULL);
4712 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4713 gsi_remove (&gsi, true);
5056ba1a 4714 }
79acaae1 4715
4716 /* Move the parallel region into CHILD_CFUN. */
48e1416a 4717
79acaae1 4718 if (gimple_in_ssa_p (cfun))
4719 {
bcaa2770 4720 init_tree_ssa (child_cfun);
5084b2e4 4721 init_ssa_operands (child_cfun);
4722 child_cfun->gimple_df->in_ssa_p = true;
1d22f541 4723 block = NULL_TREE;
79acaae1 4724 }
1d22f541 4725 else
75a70cf9 4726 block = gimple_block (entry_stmt);
1d22f541 4727
4728 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
79acaae1 4729 if (exit_bb)
4730 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
04c2922b 4731 /* When the OMP expansion process cannot guarantee an up-to-date
4732 loop tree arrange for the child function to fixup loops. */
4733 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
4734 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
79acaae1 4735
1d22f541 4736 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
f1f41a6c 4737 num = vec_safe_length (child_cfun->local_decls);
501bdd19 4738 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
4739 {
f1f41a6c 4740 t = (*child_cfun->local_decls)[srcidx];
501bdd19 4741 if (DECL_CONTEXT (t) == cfun->decl)
4742 continue;
4743 if (srcidx != dstidx)
f1f41a6c 4744 (*child_cfun->local_decls)[dstidx] = t;
501bdd19 4745 dstidx++;
4746 }
4747 if (dstidx != num)
f1f41a6c 4748 vec_safe_truncate (child_cfun->local_decls, dstidx);
1d22f541 4749
79acaae1 4750 /* Inform the callgraph about the new function. */
82b40354 4751 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
79acaae1 4752 cgraph_add_new_function (child_fn, true);
4753
4754 /* Fix the callgraph edges for child_cfun. Those for cfun will be
4755 fixed in a following pass. */
4756 push_cfun (child_cfun);
658b4427 4757 if (optimize)
fd6481cf 4758 optimize_omp_library_calls (entry_stmt);
79acaae1 4759 rebuild_cgraph_edges ();
fbe86b1b 4760
4761 /* Some EH regions might become dead, see PR34608. If
4762 pass_cleanup_cfg isn't the first pass to happen with the
4763 new child, these dead EH edges might cause problems.
4764 Clean them up now. */
4765 if (flag_exceptions)
4766 {
4767 basic_block bb;
fbe86b1b 4768 bool changed = false;
4769
fbe86b1b 4770 FOR_EACH_BB (bb)
75a70cf9 4771 changed |= gimple_purge_dead_eh_edges (bb);
fbe86b1b 4772 if (changed)
4773 cleanup_tree_cfg ();
fbe86b1b 4774 }
dd277d48 4775 if (gimple_in_ssa_p (cfun))
4776 update_ssa (TODO_update_ssa);
79acaae1 4777 pop_cfun ();
773c5ba7 4778 }
48e1416a 4779
773c5ba7 4780 /* Emit a library call to launch the children threads. */
75a70cf9 4781 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
fd6481cf 4782 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
4783 else
4784 expand_task_call (new_bb, entry_stmt);
083152fb 4785 if (gimple_in_ssa_p (cfun))
4786 update_ssa (TODO_update_ssa_only_virtuals);
1e8e9920 4787}
4788
773c5ba7 4789
3d483a94 4790/* Helper function for expand_omp_{for_*,simd}. If this is the outermost
4791 of the combined collapse > 1 loop constructs, generate code like:
4792 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
4793 if (cond3 is <)
4794 adj = STEP3 - 1;
4795 else
4796 adj = STEP3 + 1;
4797 count3 = (adj + N32 - N31) / STEP3;
4798 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
4799 if (cond2 is <)
4800 adj = STEP2 - 1;
4801 else
4802 adj = STEP2 + 1;
4803 count2 = (adj + N22 - N21) / STEP2;
4804 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
4805 if (cond1 is <)
4806 adj = STEP1 - 1;
4807 else
4808 adj = STEP1 + 1;
4809 count1 = (adj + N12 - N11) / STEP1;
4810 count = count1 * count2 * count3;
4811 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
4812 count = 0;
bc7bff74 4813 and set ZERO_ITER_BB to that bb. If this isn't the outermost
4814 of the combined loop constructs, just initialize COUNTS array
4815 from the _looptemp_ clauses. */
3d483a94 4816
4817/* NOTE: It *could* be better to moosh all of the BBs together,
4818 creating one larger BB with all the computation and the unexpected
4819 jump at the end. I.e.
4820
4821 bool zero3, zero2, zero1, zero;
4822
4823 zero3 = N32 c3 N31;
4824 count3 = (N32 - N31) /[cl] STEP3;
4825 zero2 = N22 c2 N21;
4826 count2 = (N22 - N21) /[cl] STEP2;
4827 zero1 = N12 c1 N11;
4828 count1 = (N12 - N11) /[cl] STEP1;
4829 zero = zero3 || zero2 || zero1;
4830 count = count1 * count2 * count3;
4831 if (__builtin_expect(zero, false)) goto zero_iter_bb;
4832
4833 After all, we expect the zero=false, and thus we expect to have to
4834 evaluate all of the comparison expressions, so short-circuiting
4835 oughtn't be a win. Since the condition isn't protecting a
4836 denominator, we're not concerned about divide-by-zero, so we can
4837 fully evaluate count even if a numerator turned out to be wrong.
4838
4839 It seems like putting this all together would create much better
4840 scheduling opportunities, and less pressure on the chip's branch
4841 predictor. */
4842
4843static void
4844expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
4845 basic_block &entry_bb, tree *counts,
4846 basic_block &zero_iter_bb, int &first_zero_iter,
4847 basic_block &l2_dom_bb)
4848{
4849 tree t, type = TREE_TYPE (fd->loop.v);
4850 gimple stmt;
4851 edge e, ne;
4852 int i;
4853
4854 /* Collapsed loops need work for expansion into SSA form. */
4855 gcc_assert (!gimple_in_ssa_p (cfun));
4856
bc7bff74 4857 if (gimple_omp_for_combined_into_p (fd->for_stmt)
4858 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
4859 {
4860 /* First two _looptemp_ clauses are for istart/iend, counts[0]
4861 isn't supposed to be handled, as the inner loop doesn't
4862 use it. */
4863 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
4864 OMP_CLAUSE__LOOPTEMP_);
4865 gcc_assert (innerc);
4866 for (i = 0; i < fd->collapse; i++)
4867 {
4868 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
4869 OMP_CLAUSE__LOOPTEMP_);
4870 gcc_assert (innerc);
4871 if (i)
4872 counts[i] = OMP_CLAUSE_DECL (innerc);
4873 else
4874 counts[0] = NULL_TREE;
4875 }
4876 return;
4877 }
4878
3d483a94 4879 for (i = 0; i < fd->collapse; i++)
4880 {
4881 tree itype = TREE_TYPE (fd->loops[i].v);
4882
4883 if (SSA_VAR_P (fd->loop.n2)
4884 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
4885 fold_convert (itype, fd->loops[i].n1),
4886 fold_convert (itype, fd->loops[i].n2)))
4887 == NULL_TREE || !integer_onep (t)))
4888 {
4889 tree n1, n2;
4890 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
4891 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
4892 true, GSI_SAME_STMT);
4893 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
4894 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
4895 true, GSI_SAME_STMT);
4896 stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
4897 NULL_TREE, NULL_TREE);
4898 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
4899 if (walk_tree (gimple_cond_lhs_ptr (stmt),
4900 expand_omp_regimplify_p, NULL, NULL)
4901 || walk_tree (gimple_cond_rhs_ptr (stmt),
4902 expand_omp_regimplify_p, NULL, NULL))
4903 {
4904 *gsi = gsi_for_stmt (stmt);
4905 gimple_regimplify_operands (stmt, gsi);
4906 }
4907 e = split_block (entry_bb, stmt);
4908 if (zero_iter_bb == NULL)
4909 {
4910 first_zero_iter = i;
4911 zero_iter_bb = create_empty_bb (entry_bb);
4912 if (current_loops)
4913 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
4914 *gsi = gsi_after_labels (zero_iter_bb);
4915 stmt = gimple_build_assign (fd->loop.n2,
4916 build_zero_cst (type));
4917 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
4918 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
4919 entry_bb);
4920 }
4921 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
4922 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
4923 e->flags = EDGE_TRUE_VALUE;
4924 e->probability = REG_BR_PROB_BASE - ne->probability;
4925 if (l2_dom_bb == NULL)
4926 l2_dom_bb = entry_bb;
4927 entry_bb = e->dest;
4928 *gsi = gsi_last_bb (entry_bb);
4929 }
4930
4931 if (POINTER_TYPE_P (itype))
4932 itype = signed_type_for (itype);
4933 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
4934 ? -1 : 1));
4935 t = fold_build2 (PLUS_EXPR, itype,
4936 fold_convert (itype, fd->loops[i].step), t);
4937 t = fold_build2 (PLUS_EXPR, itype, t,
4938 fold_convert (itype, fd->loops[i].n2));
4939 t = fold_build2 (MINUS_EXPR, itype, t,
4940 fold_convert (itype, fd->loops[i].n1));
4941 /* ?? We could probably use CEIL_DIV_EXPR instead of
4942 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
4943 generate the same code in the end because generically we
4944 don't know that the values involved must be negative for
4945 GT?? */
4946 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
4947 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4948 fold_build1 (NEGATE_EXPR, itype, t),
4949 fold_build1 (NEGATE_EXPR, itype,
4950 fold_convert (itype,
4951 fd->loops[i].step)));
4952 else
4953 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
4954 fold_convert (itype, fd->loops[i].step));
4955 t = fold_convert (type, t);
4956 if (TREE_CODE (t) == INTEGER_CST)
4957 counts[i] = t;
4958 else
4959 {
4960 counts[i] = create_tmp_reg (type, ".count");
4961 expand_omp_build_assign (gsi, counts[i], t);
4962 }
4963 if (SSA_VAR_P (fd->loop.n2))
4964 {
4965 if (i == 0)
4966 t = counts[0];
4967 else
4968 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
4969 expand_omp_build_assign (gsi, fd->loop.n2, t);
4970 }
4971 }
4972}
4973
4974
4975/* Helper function for expand_omp_{for_*,simd}. Generate code like:
4976 T = V;
4977 V3 = N31 + (T % count3) * STEP3;
4978 T = T / count3;
4979 V2 = N21 + (T % count2) * STEP2;
4980 T = T / count2;
4981 V1 = N11 + T * STEP1;
bc7bff74 4982 if this loop doesn't have an inner loop construct combined with it.
4983 If it does have an inner loop construct combined with it and the
4984 iteration count isn't known constant, store values from counts array
4985 into its _looptemp_ temporaries instead. */
3d483a94 4986
4987static void
4988expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
bc7bff74 4989 tree *counts, gimple inner_stmt, tree startvar)
3d483a94 4990{
4991 int i;
bc7bff74 4992 if (gimple_omp_for_combined_p (fd->for_stmt))
4993 {
4994 /* If fd->loop.n2 is constant, then no propagation of the counts
4995 is needed, they are constant. */
4996 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
4997 return;
4998
4999 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
5000 ? gimple_omp_parallel_clauses (inner_stmt)
5001 : gimple_omp_for_clauses (inner_stmt);
5002 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5003 isn't supposed to be handled, as the inner loop doesn't
5004 use it. */
5005 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5006 gcc_assert (innerc);
5007 for (i = 0; i < fd->collapse; i++)
5008 {
5009 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5010 OMP_CLAUSE__LOOPTEMP_);
5011 gcc_assert (innerc);
5012 if (i)
5013 {
5014 tree tem = OMP_CLAUSE_DECL (innerc);
5015 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
5016 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5017 false, GSI_CONTINUE_LINKING);
5018 gimple stmt = gimple_build_assign (tem, t);
5019 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5020 }
5021 }
5022 return;
5023 }
5024
3d483a94 5025 tree type = TREE_TYPE (fd->loop.v);
5026 tree tem = create_tmp_reg (type, ".tem");
5027 gimple stmt = gimple_build_assign (tem, startvar);
5028 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5029
5030 for (i = fd->collapse - 1; i >= 0; i--)
5031 {
5032 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
5033 itype = vtype;
5034 if (POINTER_TYPE_P (vtype))
5035 itype = signed_type_for (vtype);
5036 if (i != 0)
5037 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
5038 else
5039 t = tem;
5040 t = fold_convert (itype, t);
5041 t = fold_build2 (MULT_EXPR, itype, t,
5042 fold_convert (itype, fd->loops[i].step));
5043 if (POINTER_TYPE_P (vtype))
5044 t = fold_build_pointer_plus (fd->loops[i].n1, t);
5045 else
5046 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
5047 t = force_gimple_operand_gsi (gsi, t,
5048 DECL_P (fd->loops[i].v)
5049 && TREE_ADDRESSABLE (fd->loops[i].v),
5050 NULL_TREE, false,
5051 GSI_CONTINUE_LINKING);
5052 stmt = gimple_build_assign (fd->loops[i].v, t);
5053 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5054 if (i != 0)
5055 {
5056 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
5057 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5058 false, GSI_CONTINUE_LINKING);
5059 stmt = gimple_build_assign (tem, t);
5060 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5061 }
5062 }
5063}
5064
5065
5066/* Helper function for expand_omp_for_*. Generate code like:
5067 L10:
5068 V3 += STEP3;
5069 if (V3 cond3 N32) goto BODY_BB; else goto L11;
5070 L11:
5071 V3 = N31;
5072 V2 += STEP2;
5073 if (V2 cond2 N22) goto BODY_BB; else goto L12;
5074 L12:
5075 V2 = N21;
5076 V1 += STEP1;
5077 goto BODY_BB; */
5078
5079static basic_block
5080extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
5081 basic_block body_bb)
5082{
5083 basic_block last_bb, bb, collapse_bb = NULL;
5084 int i;
5085 gimple_stmt_iterator gsi;
5086 edge e;
5087 tree t;
5088 gimple stmt;
5089
5090 last_bb = cont_bb;
5091 for (i = fd->collapse - 1; i >= 0; i--)
5092 {
5093 tree vtype = TREE_TYPE (fd->loops[i].v);
5094
5095 bb = create_empty_bb (last_bb);
5096 if (current_loops)
5097 add_bb_to_loop (bb, last_bb->loop_father);
5098 gsi = gsi_start_bb (bb);
5099
5100 if (i < fd->collapse - 1)
5101 {
5102 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
5103 e->probability = REG_BR_PROB_BASE / 8;
5104
5105 t = fd->loops[i + 1].n1;
5106 t = force_gimple_operand_gsi (&gsi, t,
5107 DECL_P (fd->loops[i + 1].v)
5108 && TREE_ADDRESSABLE (fd->loops[i
5109 + 1].v),
5110 NULL_TREE, false,
5111 GSI_CONTINUE_LINKING);
5112 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
5113 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5114 }
5115 else
5116 collapse_bb = bb;
5117
5118 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
5119
5120 if (POINTER_TYPE_P (vtype))
5121 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
5122 else
5123 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
5124 t = force_gimple_operand_gsi (&gsi, t,
5125 DECL_P (fd->loops[i].v)
5126 && TREE_ADDRESSABLE (fd->loops[i].v),
5127 NULL_TREE, false, GSI_CONTINUE_LINKING);
5128 stmt = gimple_build_assign (fd->loops[i].v, t);
5129 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5130
5131 if (i > 0)
5132 {
5133 t = fd->loops[i].n2;
5134 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5135 false, GSI_CONTINUE_LINKING);
5136 tree v = fd->loops[i].v;
5137 if (DECL_P (v) && TREE_ADDRESSABLE (v))
5138 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
5139 false, GSI_CONTINUE_LINKING);
5140 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
5141 stmt = gimple_build_cond_empty (t);
5142 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5143 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
5144 e->probability = REG_BR_PROB_BASE * 7 / 8;
5145 }
5146 else
5147 make_edge (bb, body_bb, EDGE_FALLTHRU);
5148 last_bb = bb;
5149 }
5150
5151 return collapse_bb;
5152}
5153
5154
773c5ba7 5155/* A subroutine of expand_omp_for. Generate code for a parallel
1e8e9920 5156 loop with any schedule. Given parameters:
5157
5158 for (V = N1; V cond N2; V += STEP) BODY;
5159
5160 where COND is "<" or ">", we generate pseudocode
5161
5162 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
773c5ba7 5163 if (more) goto L0; else goto L3;
1e8e9920 5164 L0:
5165 V = istart0;
5166 iend = iend0;
5167 L1:
5168 BODY;
5169 V += STEP;
773c5ba7 5170 if (V cond iend) goto L1; else goto L2;
1e8e9920 5171 L2:
773c5ba7 5172 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5173 L3:
1e8e9920 5174
773c5ba7 5175 If this is a combined omp parallel loop, instead of the call to
fd6481cf 5176 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
bc7bff74 5177 If this is gimple_omp_for_combined_p loop, then instead of assigning
5178 V and iend in L0 we assign the first two _looptemp_ clause decls of the
5179 inner GIMPLE_OMP_FOR and V += STEP; and
5180 if (V cond iend) goto L1; else goto L2; are removed.
fd6481cf 5181
5182 For collapsed loops, given parameters:
5183 collapse(3)
5184 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5185 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5186 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5187 BODY;
5188
5189 we generate pseudocode
5190
8e6b4515 5191 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
fd6481cf 5192 if (cond3 is <)
5193 adj = STEP3 - 1;
5194 else
5195 adj = STEP3 + 1;
5196 count3 = (adj + N32 - N31) / STEP3;
8e6b4515 5197 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
fd6481cf 5198 if (cond2 is <)
5199 adj = STEP2 - 1;
5200 else
5201 adj = STEP2 + 1;
5202 count2 = (adj + N22 - N21) / STEP2;
8e6b4515 5203 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
fd6481cf 5204 if (cond1 is <)
5205 adj = STEP1 - 1;
5206 else
5207 adj = STEP1 + 1;
5208 count1 = (adj + N12 - N11) / STEP1;
5209 count = count1 * count2 * count3;
8e6b4515 5210 goto Z1;
5211 Z0:
5212 count = 0;
5213 Z1:
fd6481cf 5214 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
5215 if (more) goto L0; else goto L3;
5216 L0:
5217 V = istart0;
5218 T = V;
5219 V3 = N31 + (T % count3) * STEP3;
5220 T = T / count3;
5221 V2 = N21 + (T % count2) * STEP2;
5222 T = T / count2;
5223 V1 = N11 + T * STEP1;
5224 iend = iend0;
5225 L1:
5226 BODY;
5227 V += 1;
5228 if (V < iend) goto L10; else goto L2;
5229 L10:
5230 V3 += STEP3;
5231 if (V3 cond3 N32) goto L1; else goto L11;
5232 L11:
5233 V3 = N31;
5234 V2 += STEP2;
5235 if (V2 cond2 N22) goto L1; else goto L12;
5236 L12:
5237 V2 = N21;
5238 V1 += STEP1;
5239 goto L1;
5240 L2:
5241 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5242 L3:
5243
5244 */
1e8e9920 5245
61e47ac8 5246static void
773c5ba7 5247expand_omp_for_generic (struct omp_region *region,
5248 struct omp_for_data *fd,
1e8e9920 5249 enum built_in_function start_fn,
bc7bff74 5250 enum built_in_function next_fn,
5251 gimple inner_stmt)
1e8e9920 5252{
75a70cf9 5253 tree type, istart0, iend0, iend;
fd6481cf 5254 tree t, vmain, vback, bias = NULL_TREE;
5255 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
03ed154b 5256 basic_block l2_bb = NULL, l3_bb = NULL;
75a70cf9 5257 gimple_stmt_iterator gsi;
5258 gimple stmt;
773c5ba7 5259 bool in_combined_parallel = is_combined_parallel (region);
ac6e3339 5260 bool broken_loop = region->cont == NULL;
79acaae1 5261 edge e, ne;
fd6481cf 5262 tree *counts = NULL;
5263 int i;
ac6e3339 5264
5265 gcc_assert (!broken_loop || !in_combined_parallel);
fd6481cf 5266 gcc_assert (fd->iter_type == long_integer_type_node
5267 || !in_combined_parallel);
1e8e9920 5268
fd6481cf 5269 type = TREE_TYPE (fd->loop.v);
5270 istart0 = create_tmp_var (fd->iter_type, ".istart0");
5271 iend0 = create_tmp_var (fd->iter_type, ".iend0");
6d63fc03 5272 TREE_ADDRESSABLE (istart0) = 1;
5273 TREE_ADDRESSABLE (iend0) = 1;
1e8e9920 5274
fd6481cf 5275 /* See if we need to bias by LLONG_MIN. */
5276 if (fd->iter_type == long_long_unsigned_type_node
5277 && TREE_CODE (type) == INTEGER_TYPE
5278 && !TYPE_UNSIGNED (type))
5279 {
5280 tree n1, n2;
5281
5282 if (fd->loop.cond_code == LT_EXPR)
5283 {
5284 n1 = fd->loop.n1;
5285 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
5286 }
5287 else
5288 {
5289 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
5290 n2 = fd->loop.n1;
5291 }
5292 if (TREE_CODE (n1) != INTEGER_CST
5293 || TREE_CODE (n2) != INTEGER_CST
5294 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
5295 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
5296 }
5297
61e47ac8 5298 entry_bb = region->entry;
03ed154b 5299 cont_bb = region->cont;
fd6481cf 5300 collapse_bb = NULL;
ac6e3339 5301 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5302 gcc_assert (broken_loop
5303 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
5304 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5305 l1_bb = single_succ (l0_bb);
5306 if (!broken_loop)
03ed154b 5307 {
5308 l2_bb = create_empty_bb (cont_bb);
ac6e3339 5309 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
5310 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
03ed154b 5311 }
ac6e3339 5312 else
5313 l2_bb = NULL;
5314 l3_bb = BRANCH_EDGE (entry_bb)->dest;
5315 exit_bb = region->exit;
773c5ba7 5316
75a70cf9 5317 gsi = gsi_last_bb (entry_bb);
fd6481cf 5318
75a70cf9 5319 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
fd6481cf 5320 if (fd->collapse > 1)
5321 {
8e6b4515 5322 int first_zero_iter = -1;
3d483a94 5323 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
8e6b4515 5324
3d483a94 5325 counts = XALLOCAVEC (tree, fd->collapse);
5326 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5327 zero_iter_bb, first_zero_iter,
5328 l2_dom_bb);
fd6481cf 5329
8e6b4515 5330 if (zero_iter_bb)
5331 {
5332 /* Some counts[i] vars might be uninitialized if
5333 some loop has zero iterations. But the body shouldn't
5334 be executed in that case, so just avoid uninit warnings. */
5335 for (i = first_zero_iter; i < fd->collapse; i++)
5336 if (SSA_VAR_P (counts[i]))
5337 TREE_NO_WARNING (counts[i]) = 1;
5338 gsi_prev (&gsi);
5339 e = split_block (entry_bb, gsi_stmt (gsi));
5340 entry_bb = e->dest;
5341 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
5342 gsi = gsi_last_bb (entry_bb);
5343 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
5344 get_immediate_dominator (CDI_DOMINATORS,
5345 zero_iter_bb));
5346 }
fd6481cf 5347 }
79acaae1 5348 if (in_combined_parallel)
5349 {
5350 /* In a combined parallel loop, emit a call to
5351 GOMP_loop_foo_next. */
b9a16870 5352 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
79acaae1 5353 build_fold_addr_expr (istart0),
5354 build_fold_addr_expr (iend0));
5355 }
5356 else
1e8e9920 5357 {
c2f47e15 5358 tree t0, t1, t2, t3, t4;
773c5ba7 5359 /* If this is not a combined parallel loop, emit a call to
5360 GOMP_loop_foo_start in ENTRY_BB. */
c2f47e15 5361 t4 = build_fold_addr_expr (iend0);
5362 t3 = build_fold_addr_expr (istart0);
fd6481cf 5363 t2 = fold_convert (fd->iter_type, fd->loop.step);
3d483a94 5364 t1 = fd->loop.n2;
5365 t0 = fd->loop.n1;
bc7bff74 5366 if (gimple_omp_for_combined_into_p (fd->for_stmt))
5367 {
5368 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5369 OMP_CLAUSE__LOOPTEMP_);
5370 gcc_assert (innerc);
5371 t0 = OMP_CLAUSE_DECL (innerc);
5372 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5373 OMP_CLAUSE__LOOPTEMP_);
5374 gcc_assert (innerc);
5375 t1 = OMP_CLAUSE_DECL (innerc);
5376 }
3d483a94 5377 if (POINTER_TYPE_P (TREE_TYPE (t0))
5378 && TYPE_PRECISION (TREE_TYPE (t0))
5379 != TYPE_PRECISION (fd->iter_type))
c799f233 5380 {
5381 /* Avoid casting pointers to integer of a different size. */
3cea8318 5382 tree itype = signed_type_for (type);
3d483a94 5383 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
5384 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
c799f233 5385 }
5386 else
5387 {
3d483a94 5388 t1 = fold_convert (fd->iter_type, t1);
5389 t0 = fold_convert (fd->iter_type, t0);
c799f233 5390 }
fd6481cf 5391 if (bias)
1e8e9920 5392 {
fd6481cf 5393 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
5394 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
5395 }
5396 if (fd->iter_type == long_integer_type_node)
5397 {
5398 if (fd->chunk_size)
5399 {
5400 t = fold_convert (fd->iter_type, fd->chunk_size);
b9a16870 5401 t = build_call_expr (builtin_decl_explicit (start_fn),
5402 6, t0, t1, t2, t, t3, t4);
fd6481cf 5403 }
5404 else
b9a16870 5405 t = build_call_expr (builtin_decl_explicit (start_fn),
5406 5, t0, t1, t2, t3, t4);
1e8e9920 5407 }
c2f47e15 5408 else
fd6481cf 5409 {
5410 tree t5;
5411 tree c_bool_type;
b9a16870 5412 tree bfn_decl;
fd6481cf 5413
5414 /* The GOMP_loop_ull_*start functions have additional boolean
5415 argument, true for < loops and false for > loops.
5416 In Fortran, the C bool type can be different from
5417 boolean_type_node. */
b9a16870 5418 bfn_decl = builtin_decl_explicit (start_fn);
5419 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
fd6481cf 5420 t5 = build_int_cst (c_bool_type,
5421 fd->loop.cond_code == LT_EXPR ? 1 : 0);
5422 if (fd->chunk_size)
5423 {
b9a16870 5424 tree bfn_decl = builtin_decl_explicit (start_fn);
fd6481cf 5425 t = fold_convert (fd->iter_type, fd->chunk_size);
b9a16870 5426 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
fd6481cf 5427 }
5428 else
b9a16870 5429 t = build_call_expr (builtin_decl_explicit (start_fn),
5430 6, t5, t0, t1, t2, t3, t4);
fd6481cf 5431 }
1e8e9920 5432 }
fd6481cf 5433 if (TREE_TYPE (t) != boolean_type_node)
5434 t = fold_build2 (NE_EXPR, boolean_type_node,
5435 t, build_int_cst (TREE_TYPE (t), 0));
75a70cf9 5436 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5437 true, GSI_SAME_STMT);
5438 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
79acaae1 5439
75a70cf9 5440 /* Remove the GIMPLE_OMP_FOR statement. */
5441 gsi_remove (&gsi, true);
1e8e9920 5442
773c5ba7 5443 /* Iteration setup for sequential loop goes in L0_BB. */
3d483a94 5444 tree startvar = fd->loop.v;
5445 tree endvar = NULL_TREE;
5446
bc7bff74 5447 if (gimple_omp_for_combined_p (fd->for_stmt))
5448 {
5449 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
5450 && gimple_omp_for_kind (inner_stmt)
5451 == GF_OMP_FOR_KIND_SIMD);
5452 tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
5453 OMP_CLAUSE__LOOPTEMP_);
5454 gcc_assert (innerc);
5455 startvar = OMP_CLAUSE_DECL (innerc);
5456 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5457 OMP_CLAUSE__LOOPTEMP_);
5458 gcc_assert (innerc);
5459 endvar = OMP_CLAUSE_DECL (innerc);
5460 }
5461
75a70cf9 5462 gsi = gsi_start_bb (l0_bb);
1efcacec 5463 t = istart0;
fd6481cf 5464 if (bias)
1efcacec 5465 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3d483a94 5466 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5467 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5468 t = fold_convert (TREE_TYPE (startvar), t);
4abecb72 5469 t = force_gimple_operand_gsi (&gsi, t,
3d483a94 5470 DECL_P (startvar)
5471 && TREE_ADDRESSABLE (startvar),
4abecb72 5472 NULL_TREE, false, GSI_CONTINUE_LINKING);
3d483a94 5473 stmt = gimple_build_assign (startvar, t);
75a70cf9 5474 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1e8e9920 5475
1efcacec 5476 t = iend0;
fd6481cf 5477 if (bias)
1efcacec 5478 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3d483a94 5479 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5480 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5481 t = fold_convert (TREE_TYPE (startvar), t);
75a70cf9 5482 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5483 false, GSI_CONTINUE_LINKING);
3d483a94 5484 if (endvar)
fd6481cf 5485 {
3d483a94 5486 stmt = gimple_build_assign (endvar, iend);
75a70cf9 5487 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 5488 }
3d483a94 5489 if (fd->collapse > 1)
bc7bff74 5490 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
773c5ba7 5491
ac6e3339 5492 if (!broken_loop)
03ed154b 5493 {
ac6e3339 5494 /* Code to control the increment and predicate for the sequential
5495 loop goes in the CONT_BB. */
75a70cf9 5496 gsi = gsi_last_bb (cont_bb);
5497 stmt = gsi_stmt (gsi);
5498 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5499 vmain = gimple_omp_continue_control_use (stmt);
5500 vback = gimple_omp_continue_control_def (stmt);
79acaae1 5501
bc7bff74 5502 if (!gimple_omp_for_combined_p (fd->for_stmt))
3d483a94 5503 {
5504 if (POINTER_TYPE_P (type))
5505 t = fold_build_pointer_plus (vmain, fd->loop.step);
5506 else
5507 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
5508 t = force_gimple_operand_gsi (&gsi, t,
5509 DECL_P (vback)
5510 && TREE_ADDRESSABLE (vback),
5511 NULL_TREE, true, GSI_SAME_STMT);
5512 stmt = gimple_build_assign (vback, t);
5513 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5514
5515 t = build2 (fd->loop.cond_code, boolean_type_node,
5516 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
5517 iend);
5518 stmt = gimple_build_cond_empty (t);
5519 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5520 }
773c5ba7 5521
75a70cf9 5522 /* Remove GIMPLE_OMP_CONTINUE. */
5523 gsi_remove (&gsi, true);
773c5ba7 5524
bc7bff74 5525 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
3d483a94 5526 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
fd6481cf 5527
ac6e3339 5528 /* Emit code to get the next parallel iteration in L2_BB. */
75a70cf9 5529 gsi = gsi_start_bb (l2_bb);
773c5ba7 5530
b9a16870 5531 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
ac6e3339 5532 build_fold_addr_expr (istart0),
5533 build_fold_addr_expr (iend0));
75a70cf9 5534 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5535 false, GSI_CONTINUE_LINKING);
fd6481cf 5536 if (TREE_TYPE (t) != boolean_type_node)
5537 t = fold_build2 (NE_EXPR, boolean_type_node,
5538 t, build_int_cst (TREE_TYPE (t), 0));
75a70cf9 5539 stmt = gimple_build_cond_empty (t);
5540 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
ac6e3339 5541 }
1e8e9920 5542
61e47ac8 5543 /* Add the loop cleanup function. */
75a70cf9 5544 gsi = gsi_last_bb (exit_bb);
5545 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
b9a16870 5546 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
bc7bff74 5547 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5548 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
61e47ac8 5549 else
b9a16870 5550 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
75a70cf9 5551 stmt = gimple_build_call (t, 0);
bc7bff74 5552 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5553 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
75a70cf9 5554 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
5555 gsi_remove (&gsi, true);
773c5ba7 5556
5557 /* Connect the new blocks. */
79acaae1 5558 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
5559 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
1e8e9920 5560
ac6e3339 5561 if (!broken_loop)
5562 {
75a70cf9 5563 gimple_seq phis;
5564
79acaae1 5565 e = find_edge (cont_bb, l3_bb);
5566 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
5567
75a70cf9 5568 phis = phi_nodes (l3_bb);
5569 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
5570 {
5571 gimple phi = gsi_stmt (gsi);
5572 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
5573 PHI_ARG_DEF_FROM_EDGE (phi, e));
5574 }
79acaae1 5575 remove_edge (e);
5576
ac6e3339 5577 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
f6568ea4 5578 if (current_loops)
5579 add_bb_to_loop (l2_bb, cont_bb->loop_father);
3d483a94 5580 e = find_edge (cont_bb, l1_bb);
bc7bff74 5581 if (gimple_omp_for_combined_p (fd->for_stmt))
5582 {
5583 remove_edge (e);
5584 e = NULL;
5585 }
3d483a94 5586 else if (fd->collapse > 1)
fd6481cf 5587 {
fd6481cf 5588 remove_edge (e);
5589 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
5590 }
5591 else
3d483a94 5592 e->flags = EDGE_TRUE_VALUE;
5593 if (e)
fd6481cf 5594 {
3d483a94 5595 e->probability = REG_BR_PROB_BASE * 7 / 8;
5596 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
5597 }
5598 else
5599 {
5600 e = find_edge (cont_bb, l2_bb);
5601 e->flags = EDGE_FALLTHRU;
fd6481cf 5602 }
ac6e3339 5603 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
79acaae1 5604
5605 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
5606 recompute_dominator (CDI_DOMINATORS, l2_bb));
5607 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
5608 recompute_dominator (CDI_DOMINATORS, l3_bb));
5609 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
5610 recompute_dominator (CDI_DOMINATORS, l0_bb));
5611 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
5612 recompute_dominator (CDI_DOMINATORS, l1_bb));
04c2922b 5613
5614 struct loop *outer_loop = alloc_loop ();
5615 outer_loop->header = l0_bb;
5616 outer_loop->latch = l2_bb;
5617 add_loop (outer_loop, l0_bb->loop_father);
5618
bc7bff74 5619 if (!gimple_omp_for_combined_p (fd->for_stmt))
3d483a94 5620 {
5621 struct loop *loop = alloc_loop ();
5622 loop->header = l1_bb;
5623 /* The loop may have multiple latches. */
5624 add_loop (loop, outer_loop);
5625 }
ac6e3339 5626 }
1e8e9920 5627}
5628
5629
773c5ba7 5630/* A subroutine of expand_omp_for. Generate code for a parallel
5631 loop with static schedule and no specified chunk size. Given
5632 parameters:
1e8e9920 5633
5634 for (V = N1; V cond N2; V += STEP) BODY;
5635
5636 where COND is "<" or ">", we generate pseudocode
5637
8e6b4515 5638 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
1e8e9920 5639 if (cond is <)
5640 adj = STEP - 1;
5641 else
5642 adj = STEP + 1;
fd6481cf 5643 if ((__typeof (V)) -1 > 0 && cond is >)
5644 n = -(adj + N2 - N1) / -STEP;
5645 else
5646 n = (adj + N2 - N1) / STEP;
1e8e9920 5647 q = n / nthreads;
31712e83 5648 tt = n % nthreads;
5649 if (threadid < tt) goto L3; else goto L4;
5650 L3:
5651 tt = 0;
5652 q = q + 1;
5653 L4:
5654 s0 = q * threadid + tt;
5655 e0 = s0 + q;
79acaae1 5656 V = s0 * STEP + N1;
1e8e9920 5657 if (s0 >= e0) goto L2; else goto L0;
5658 L0:
1e8e9920 5659 e = e0 * STEP + N1;
5660 L1:
5661 BODY;
5662 V += STEP;
5663 if (V cond e) goto L1;
1e8e9920 5664 L2:
5665*/
5666
61e47ac8 5667static void
773c5ba7 5668expand_omp_for_static_nochunk (struct omp_region *region,
bc7bff74 5669 struct omp_for_data *fd,
5670 gimple inner_stmt)
1e8e9920 5671{
31712e83 5672 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
fd6481cf 5673 tree type, itype, vmain, vback;
31712e83 5674 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
bc7bff74 5675 basic_block body_bb, cont_bb, collapse_bb = NULL;
61e47ac8 5676 basic_block fin_bb;
75a70cf9 5677 gimple_stmt_iterator gsi;
5678 gimple stmt;
31712e83 5679 edge ep;
bc7bff74 5680 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
5681 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
5682 bool broken_loop = region->cont == NULL;
5683 tree *counts = NULL;
5684 tree n1, n2, step;
1e8e9920 5685
fd6481cf 5686 itype = type = TREE_TYPE (fd->loop.v);
5687 if (POINTER_TYPE_P (type))
3cea8318 5688 itype = signed_type_for (type);
1e8e9920 5689
61e47ac8 5690 entry_bb = region->entry;
61e47ac8 5691 cont_bb = region->cont;
ac6e3339 5692 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
bc7bff74 5693 fin_bb = BRANCH_EDGE (entry_bb)->dest;
5694 gcc_assert (broken_loop
5695 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
ac6e3339 5696 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5697 body_bb = single_succ (seq_start_bb);
bc7bff74 5698 if (!broken_loop)
5699 {
5700 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
5701 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5702 }
61e47ac8 5703 exit_bb = region->exit;
5704
773c5ba7 5705 /* Iteration space partitioning goes in ENTRY_BB. */
75a70cf9 5706 gsi = gsi_last_bb (entry_bb);
5707 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
61e47ac8 5708
bc7bff74 5709 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
5710 {
5711 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
5712 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
5713 }
5714
5715 if (fd->collapse > 1)
5716 {
5717 int first_zero_iter = -1;
5718 basic_block l2_dom_bb = NULL;
5719
5720 counts = XALLOCAVEC (tree, fd->collapse);
5721 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5722 fin_bb, first_zero_iter,
5723 l2_dom_bb);
5724 t = NULL_TREE;
5725 }
5726 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
5727 t = integer_one_node;
5728 else
5729 t = fold_binary (fd->loop.cond_code, boolean_type_node,
5730 fold_convert (type, fd->loop.n1),
5731 fold_convert (type, fd->loop.n2));
5732 if (fd->collapse == 1
5733 && TYPE_UNSIGNED (type)
8e6b4515 5734 && (t == NULL_TREE || !integer_onep (t)))
5735 {
8e6b4515 5736 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
5737 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
5738 true, GSI_SAME_STMT);
5739 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
5740 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
5741 true, GSI_SAME_STMT);
5742 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
5743 NULL_TREE, NULL_TREE);
5744 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5745 if (walk_tree (gimple_cond_lhs_ptr (stmt),
5746 expand_omp_regimplify_p, NULL, NULL)
5747 || walk_tree (gimple_cond_rhs_ptr (stmt),
5748 expand_omp_regimplify_p, NULL, NULL))
5749 {
5750 gsi = gsi_for_stmt (stmt);
5751 gimple_regimplify_operands (stmt, &gsi);
5752 }
5753 ep = split_block (entry_bb, stmt);
5754 ep->flags = EDGE_TRUE_VALUE;
5755 entry_bb = ep->dest;
5756 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
5757 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
5758 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
5759 if (gimple_in_ssa_p (cfun))
5760 {
5761 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
5762 for (gsi = gsi_start_phis (fin_bb);
5763 !gsi_end_p (gsi); gsi_next (&gsi))
5764 {
5765 gimple phi = gsi_stmt (gsi);
5766 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
5767 ep, UNKNOWN_LOCATION);
5768 }
5769 }
5770 gsi = gsi_last_bb (entry_bb);
5771 }
5772
bc7bff74 5773 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
fd6481cf 5774 t = fold_convert (itype, t);
75a70cf9 5775 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5776 true, GSI_SAME_STMT);
48e1416a 5777
bc7bff74 5778 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
fd6481cf 5779 t = fold_convert (itype, t);
75a70cf9 5780 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5781 true, GSI_SAME_STMT);
1e8e9920 5782
bc7bff74 5783 n1 = fd->loop.n1;
5784 n2 = fd->loop.n2;
5785 step = fd->loop.step;
5786 if (gimple_omp_for_combined_into_p (fd->for_stmt))
5787 {
5788 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5789 OMP_CLAUSE__LOOPTEMP_);
5790 gcc_assert (innerc);
5791 n1 = OMP_CLAUSE_DECL (innerc);
5792 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5793 OMP_CLAUSE__LOOPTEMP_);
5794 gcc_assert (innerc);
5795 n2 = OMP_CLAUSE_DECL (innerc);
5796 }
5797 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
5798 true, NULL_TREE, true, GSI_SAME_STMT);
5799 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
5800 true, NULL_TREE, true, GSI_SAME_STMT);
5801 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
5802 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 5803
5804 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
bc7bff74 5805 t = fold_build2 (PLUS_EXPR, itype, step, t);
5806 t = fold_build2 (PLUS_EXPR, itype, t, n2);
5807 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
fd6481cf 5808 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
5809 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5810 fold_build1 (NEGATE_EXPR, itype, t),
bc7bff74 5811 fold_build1 (NEGATE_EXPR, itype, step));
fd6481cf 5812 else
bc7bff74 5813 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
fd6481cf 5814 t = fold_convert (itype, t);
75a70cf9 5815 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 5816
072f7ab1 5817 q = create_tmp_reg (itype, "q");
fd6481cf 5818 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
31712e83 5819 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
5820 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
5821
072f7ab1 5822 tt = create_tmp_reg (itype, "tt");
31712e83 5823 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
5824 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
5825 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
1e8e9920 5826
31712e83 5827 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
5828 stmt = gimple_build_cond_empty (t);
5829 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5830
5831 second_bb = split_block (entry_bb, stmt)->dest;
5832 gsi = gsi_last_bb (second_bb);
5833 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5834
5835 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
5836 GSI_SAME_STMT);
5837 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
5838 build_int_cst (itype, 1));
5839 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5840
5841 third_bb = split_block (second_bb, stmt)->dest;
5842 gsi = gsi_last_bb (third_bb);
5843 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
1e8e9920 5844
fd6481cf 5845 t = build2 (MULT_EXPR, itype, q, threadid);
31712e83 5846 t = build2 (PLUS_EXPR, itype, t, tt);
75a70cf9 5847 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 5848
fd6481cf 5849 t = fold_build2 (PLUS_EXPR, itype, s0, q);
75a70cf9 5850 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
79acaae1 5851
1e8e9920 5852 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
75a70cf9 5853 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
773c5ba7 5854
75a70cf9 5855 /* Remove the GIMPLE_OMP_FOR statement. */
5856 gsi_remove (&gsi, true);
773c5ba7 5857
5858 /* Setup code for sequential iteration goes in SEQ_START_BB. */
75a70cf9 5859 gsi = gsi_start_bb (seq_start_bb);
1e8e9920 5860
bc7bff74 5861 tree startvar = fd->loop.v;
5862 tree endvar = NULL_TREE;
5863
5864 if (gimple_omp_for_combined_p (fd->for_stmt))
5865 {
5866 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
5867 ? gimple_omp_parallel_clauses (inner_stmt)
5868 : gimple_omp_for_clauses (inner_stmt);
5869 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5870 gcc_assert (innerc);
5871 startvar = OMP_CLAUSE_DECL (innerc);
5872 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5873 OMP_CLAUSE__LOOPTEMP_);
5874 gcc_assert (innerc);
5875 endvar = OMP_CLAUSE_DECL (innerc);
5876 }
fd6481cf 5877 t = fold_convert (itype, s0);
bc7bff74 5878 t = fold_build2 (MULT_EXPR, itype, t, step);
fd6481cf 5879 if (POINTER_TYPE_P (type))
bc7bff74 5880 t = fold_build_pointer_plus (n1, t);
fd6481cf 5881 else
bc7bff74 5882 t = fold_build2 (PLUS_EXPR, type, t, n1);
5883 t = fold_convert (TREE_TYPE (startvar), t);
4abecb72 5884 t = force_gimple_operand_gsi (&gsi, t,
bc7bff74 5885 DECL_P (startvar)
5886 && TREE_ADDRESSABLE (startvar),
4abecb72 5887 NULL_TREE, false, GSI_CONTINUE_LINKING);
bc7bff74 5888 stmt = gimple_build_assign (startvar, t);
75a70cf9 5889 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
48e1416a 5890
fd6481cf 5891 t = fold_convert (itype, e0);
bc7bff74 5892 t = fold_build2 (MULT_EXPR, itype, t, step);
fd6481cf 5893 if (POINTER_TYPE_P (type))
bc7bff74 5894 t = fold_build_pointer_plus (n1, t);
fd6481cf 5895 else
bc7bff74 5896 t = fold_build2 (PLUS_EXPR, type, t, n1);
5897 t = fold_convert (TREE_TYPE (startvar), t);
75a70cf9 5898 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5899 false, GSI_CONTINUE_LINKING);
bc7bff74 5900 if (endvar)
5901 {
5902 stmt = gimple_build_assign (endvar, e);
5903 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5904 }
5905 if (fd->collapse > 1)
5906 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
1e8e9920 5907
bc7bff74 5908 if (!broken_loop)
5909 {
5910 /* The code controlling the sequential loop replaces the
5911 GIMPLE_OMP_CONTINUE. */
5912 gsi = gsi_last_bb (cont_bb);
5913 stmt = gsi_stmt (gsi);
5914 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5915 vmain = gimple_omp_continue_control_use (stmt);
5916 vback = gimple_omp_continue_control_def (stmt);
79acaae1 5917
bc7bff74 5918 if (!gimple_omp_for_combined_p (fd->for_stmt))
5919 {
5920 if (POINTER_TYPE_P (type))
5921 t = fold_build_pointer_plus (vmain, step);
5922 else
5923 t = fold_build2 (PLUS_EXPR, type, vmain, step);
5924 t = force_gimple_operand_gsi (&gsi, t,
5925 DECL_P (vback)
5926 && TREE_ADDRESSABLE (vback),
5927 NULL_TREE, true, GSI_SAME_STMT);
5928 stmt = gimple_build_assign (vback, t);
5929 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
79acaae1 5930
bc7bff74 5931 t = build2 (fd->loop.cond_code, boolean_type_node,
5932 DECL_P (vback) && TREE_ADDRESSABLE (vback)
5933 ? t : vback, e);
5934 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5935 }
1e8e9920 5936
bc7bff74 5937 /* Remove the GIMPLE_OMP_CONTINUE statement. */
5938 gsi_remove (&gsi, true);
5939
5940 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
5941 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
5942 }
773c5ba7 5943
75a70cf9 5944 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
5945 gsi = gsi_last_bb (exit_bb);
5946 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
bc7bff74 5947 {
5948 t = gimple_omp_return_lhs (gsi_stmt (gsi));
5949 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
5950 }
75a70cf9 5951 gsi_remove (&gsi, true);
773c5ba7 5952
5953 /* Connect all the blocks. */
31712e83 5954 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
5955 ep->probability = REG_BR_PROB_BASE / 4 * 3;
5956 ep = find_edge (entry_bb, second_bb);
5957 ep->flags = EDGE_TRUE_VALUE;
5958 ep->probability = REG_BR_PROB_BASE / 4;
5959 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
5960 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
79acaae1 5961
bc7bff74 5962 if (!broken_loop)
5963 {
5964 ep = find_edge (cont_bb, body_bb);
5965 if (gimple_omp_for_combined_p (fd->for_stmt))
5966 {
5967 remove_edge (ep);
5968 ep = NULL;
5969 }
5970 else if (fd->collapse > 1)
5971 {
5972 remove_edge (ep);
5973 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
5974 }
5975 else
5976 ep->flags = EDGE_TRUE_VALUE;
5977 find_edge (cont_bb, fin_bb)->flags
5978 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
5979 }
48e1416a 5980
31712e83 5981 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
5982 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
5983 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
bc7bff74 5984
79acaae1 5985 set_immediate_dominator (CDI_DOMINATORS, body_bb,
5986 recompute_dominator (CDI_DOMINATORS, body_bb));
5987 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
5988 recompute_dominator (CDI_DOMINATORS, fin_bb));
04c2922b 5989
bc7bff74 5990 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
5991 {
5992 struct loop *loop = alloc_loop ();
5993 loop->header = body_bb;
5994 if (collapse_bb == NULL)
5995 loop->latch = cont_bb;
5996 add_loop (loop, body_bb->loop_father);
5997 }
1e8e9920 5998}
5999
773c5ba7 6000
6001/* A subroutine of expand_omp_for. Generate code for a parallel
6002 loop with static schedule and a specified chunk size. Given
6003 parameters:
1e8e9920 6004
6005 for (V = N1; V cond N2; V += STEP) BODY;
6006
6007 where COND is "<" or ">", we generate pseudocode
6008
8e6b4515 6009 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
1e8e9920 6010 if (cond is <)
6011 adj = STEP - 1;
6012 else
6013 adj = STEP + 1;
fd6481cf 6014 if ((__typeof (V)) -1 > 0 && cond is >)
6015 n = -(adj + N2 - N1) / -STEP;
6016 else
6017 n = (adj + N2 - N1) / STEP;
1e8e9920 6018 trip = 0;
79acaae1 6019 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
6020 here so that V is defined
6021 if the loop is not entered
1e8e9920 6022 L0:
6023 s0 = (trip * nthreads + threadid) * CHUNK;
6024 e0 = min(s0 + CHUNK, n);
6025 if (s0 < n) goto L1; else goto L4;
6026 L1:
6027 V = s0 * STEP + N1;
6028 e = e0 * STEP + N1;
6029 L2:
6030 BODY;
6031 V += STEP;
6032 if (V cond e) goto L2; else goto L3;
6033 L3:
6034 trip += 1;
6035 goto L0;
6036 L4:
1e8e9920 6037*/
6038
61e47ac8 6039static void
bc7bff74 6040expand_omp_for_static_chunk (struct omp_region *region,
6041 struct omp_for_data *fd, gimple inner_stmt)
1e8e9920 6042{
75a70cf9 6043 tree n, s0, e0, e, t;
79acaae1 6044 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
75a70cf9 6045 tree type, itype, v_main, v_back, v_extra;
773c5ba7 6046 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
bc7bff74 6047 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
75a70cf9 6048 gimple_stmt_iterator si;
6049 gimple stmt;
6050 edge se;
bc7bff74 6051 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
6052 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
6053 bool broken_loop = region->cont == NULL;
6054 tree *counts = NULL;
6055 tree n1, n2, step;
1e8e9920 6056
fd6481cf 6057 itype = type = TREE_TYPE (fd->loop.v);
6058 if (POINTER_TYPE_P (type))
3cea8318 6059 itype = signed_type_for (type);
1e8e9920 6060
61e47ac8 6061 entry_bb = region->entry;
ac6e3339 6062 se = split_block (entry_bb, last_stmt (entry_bb));
6063 entry_bb = se->src;
6064 iter_part_bb = se->dest;
61e47ac8 6065 cont_bb = region->cont;
ac6e3339 6066 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
bc7bff74 6067 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
6068 gcc_assert (broken_loop
6069 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
ac6e3339 6070 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
6071 body_bb = single_succ (seq_start_bb);
bc7bff74 6072 if (!broken_loop)
6073 {
6074 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6075 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6076 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
6077 }
61e47ac8 6078 exit_bb = region->exit;
773c5ba7 6079
773c5ba7 6080 /* Trip and adjustment setup goes in ENTRY_BB. */
75a70cf9 6081 si = gsi_last_bb (entry_bb);
6082 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
773c5ba7 6083
bc7bff74 6084 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
6085 {
6086 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
6087 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
6088 }
6089
6090 if (fd->collapse > 1)
6091 {
6092 int first_zero_iter = -1;
6093 basic_block l2_dom_bb = NULL;
6094
6095 counts = XALLOCAVEC (tree, fd->collapse);
6096 expand_omp_for_init_counts (fd, &si, entry_bb, counts,
6097 fin_bb, first_zero_iter,
6098 l2_dom_bb);
6099 t = NULL_TREE;
6100 }
6101 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6102 t = integer_one_node;
6103 else
6104 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6105 fold_convert (type, fd->loop.n1),
6106 fold_convert (type, fd->loop.n2));
6107 if (fd->collapse == 1
6108 && TYPE_UNSIGNED (type)
8e6b4515 6109 && (t == NULL_TREE || !integer_onep (t)))
6110 {
8e6b4515 6111 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6112 n1 = force_gimple_operand_gsi (&si, n1, true, NULL_TREE,
6113 true, GSI_SAME_STMT);
6114 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6115 n2 = force_gimple_operand_gsi (&si, n2, true, NULL_TREE,
6116 true, GSI_SAME_STMT);
6117 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6118 NULL_TREE, NULL_TREE);
6119 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6120 if (walk_tree (gimple_cond_lhs_ptr (stmt),
6121 expand_omp_regimplify_p, NULL, NULL)
6122 || walk_tree (gimple_cond_rhs_ptr (stmt),
6123 expand_omp_regimplify_p, NULL, NULL))
6124 {
6125 si = gsi_for_stmt (stmt);
6126 gimple_regimplify_operands (stmt, &si);
6127 }
6128 se = split_block (entry_bb, stmt);
6129 se->flags = EDGE_TRUE_VALUE;
6130 entry_bb = se->dest;
6131 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6132 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
6133 se->probability = REG_BR_PROB_BASE / 2000 - 1;
6134 if (gimple_in_ssa_p (cfun))
6135 {
6136 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6137 for (si = gsi_start_phis (fin_bb);
6138 !gsi_end_p (si); gsi_next (&si))
6139 {
6140 gimple phi = gsi_stmt (si);
6141 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6142 se, UNKNOWN_LOCATION);
6143 }
6144 }
6145 si = gsi_last_bb (entry_bb);
6146 }
6147
bc7bff74 6148 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
fd6481cf 6149 t = fold_convert (itype, t);
75a70cf9 6150 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6151 true, GSI_SAME_STMT);
48e1416a 6152
bc7bff74 6153 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
fd6481cf 6154 t = fold_convert (itype, t);
75a70cf9 6155 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6156 true, GSI_SAME_STMT);
79acaae1 6157
bc7bff74 6158 n1 = fd->loop.n1;
6159 n2 = fd->loop.n2;
6160 step = fd->loop.step;
6161 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6162 {
6163 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6164 OMP_CLAUSE__LOOPTEMP_);
6165 gcc_assert (innerc);
6166 n1 = OMP_CLAUSE_DECL (innerc);
6167 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6168 OMP_CLAUSE__LOOPTEMP_);
6169 gcc_assert (innerc);
6170 n2 = OMP_CLAUSE_DECL (innerc);
6171 }
6172 n1 = force_gimple_operand_gsi (&si, fold_convert (type, n1),
6173 true, NULL_TREE, true, GSI_SAME_STMT);
6174 n2 = force_gimple_operand_gsi (&si, fold_convert (itype, n2),
6175 true, NULL_TREE, true, GSI_SAME_STMT);
6176 step = force_gimple_operand_gsi (&si, fold_convert (itype, step),
6177 true, NULL_TREE, true, GSI_SAME_STMT);
79acaae1 6178 fd->chunk_size
75a70cf9 6179 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
6180 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 6181
6182 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
bc7bff74 6183 t = fold_build2 (PLUS_EXPR, itype, step, t);
6184 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6185 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
fd6481cf 6186 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6187 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6188 fold_build1 (NEGATE_EXPR, itype, t),
bc7bff74 6189 fold_build1 (NEGATE_EXPR, itype, step));
fd6481cf 6190 else
bc7bff74 6191 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
fd6481cf 6192 t = fold_convert (itype, t);
75a70cf9 6193 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6194 true, GSI_SAME_STMT);
79acaae1 6195
083152fb 6196 trip_var = create_tmp_reg (itype, ".trip");
79acaae1 6197 if (gimple_in_ssa_p (cfun))
6198 {
75a70cf9 6199 trip_init = make_ssa_name (trip_var, NULL);
6200 trip_main = make_ssa_name (trip_var, NULL);
6201 trip_back = make_ssa_name (trip_var, NULL);
79acaae1 6202 }
1e8e9920 6203 else
79acaae1 6204 {
6205 trip_init = trip_var;
6206 trip_main = trip_var;
6207 trip_back = trip_var;
6208 }
1e8e9920 6209
75a70cf9 6210 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
6211 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
773c5ba7 6212
fd6481cf 6213 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
bc7bff74 6214 t = fold_build2 (MULT_EXPR, itype, t, step);
fd6481cf 6215 if (POINTER_TYPE_P (type))
bc7bff74 6216 t = fold_build_pointer_plus (n1, t);
fd6481cf 6217 else
bc7bff74 6218 t = fold_build2 (PLUS_EXPR, type, t, n1);
75a70cf9 6219 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6220 true, GSI_SAME_STMT);
79acaae1 6221
75a70cf9 6222 /* Remove the GIMPLE_OMP_FOR. */
6223 gsi_remove (&si, true);
773c5ba7 6224
6225 /* Iteration space partitioning goes in ITER_PART_BB. */
75a70cf9 6226 si = gsi_last_bb (iter_part_bb);
1e8e9920 6227
fd6481cf 6228 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
6229 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
6230 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
75a70cf9 6231 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6232 false, GSI_CONTINUE_LINKING);
1e8e9920 6233
fd6481cf 6234 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
6235 t = fold_build2 (MIN_EXPR, itype, t, n);
75a70cf9 6236 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6237 false, GSI_CONTINUE_LINKING);
1e8e9920 6238
6239 t = build2 (LT_EXPR, boolean_type_node, s0, n);
75a70cf9 6240 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
773c5ba7 6241
6242 /* Setup code for sequential iteration goes in SEQ_START_BB. */
75a70cf9 6243 si = gsi_start_bb (seq_start_bb);
1e8e9920 6244
bc7bff74 6245 tree startvar = fd->loop.v;
6246 tree endvar = NULL_TREE;
6247
6248 if (gimple_omp_for_combined_p (fd->for_stmt))
6249 {
6250 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6251 ? gimple_omp_parallel_clauses (inner_stmt)
6252 : gimple_omp_for_clauses (inner_stmt);
6253 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6254 gcc_assert (innerc);
6255 startvar = OMP_CLAUSE_DECL (innerc);
6256 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6257 OMP_CLAUSE__LOOPTEMP_);
6258 gcc_assert (innerc);
6259 endvar = OMP_CLAUSE_DECL (innerc);
6260 }
6261
fd6481cf 6262 t = fold_convert (itype, s0);
bc7bff74 6263 t = fold_build2 (MULT_EXPR, itype, t, step);
fd6481cf 6264 if (POINTER_TYPE_P (type))
bc7bff74 6265 t = fold_build_pointer_plus (n1, t);
fd6481cf 6266 else
bc7bff74 6267 t = fold_build2 (PLUS_EXPR, type, t, n1);
6268 t = fold_convert (TREE_TYPE (startvar), t);
4abecb72 6269 t = force_gimple_operand_gsi (&si, t,
bc7bff74 6270 DECL_P (startvar)
6271 && TREE_ADDRESSABLE (startvar),
4abecb72 6272 NULL_TREE, false, GSI_CONTINUE_LINKING);
bc7bff74 6273 stmt = gimple_build_assign (startvar, t);
75a70cf9 6274 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
1e8e9920 6275
fd6481cf 6276 t = fold_convert (itype, e0);
bc7bff74 6277 t = fold_build2 (MULT_EXPR, itype, t, step);
fd6481cf 6278 if (POINTER_TYPE_P (type))
bc7bff74 6279 t = fold_build_pointer_plus (n1, t);
fd6481cf 6280 else
bc7bff74 6281 t = fold_build2 (PLUS_EXPR, type, t, n1);
6282 t = fold_convert (TREE_TYPE (startvar), t);
75a70cf9 6283 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6284 false, GSI_CONTINUE_LINKING);
bc7bff74 6285 if (endvar)
6286 {
6287 stmt = gimple_build_assign (endvar, e);
6288 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
6289 }
6290 if (fd->collapse > 1)
6291 expand_omp_for_init_vars (fd, &si, counts, inner_stmt, startvar);
6292
6293 if (!broken_loop)
6294 {
6295 /* The code controlling the sequential loop goes in CONT_BB,
6296 replacing the GIMPLE_OMP_CONTINUE. */
6297 si = gsi_last_bb (cont_bb);
6298 stmt = gsi_stmt (si);
6299 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6300 v_main = gimple_omp_continue_control_use (stmt);
6301 v_back = gimple_omp_continue_control_def (stmt);
1e8e9920 6302
bc7bff74 6303 if (!gimple_omp_for_combined_p (fd->for_stmt))
6304 {
6305 if (POINTER_TYPE_P (type))
6306 t = fold_build_pointer_plus (v_main, step);
6307 else
6308 t = fold_build2 (PLUS_EXPR, type, v_main, step);
6309 if (DECL_P (v_back) && TREE_ADDRESSABLE (v_back))
6310 t = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6311 true, GSI_SAME_STMT);
6312 stmt = gimple_build_assign (v_back, t);
6313 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
79acaae1 6314
bc7bff74 6315 t = build2 (fd->loop.cond_code, boolean_type_node,
6316 DECL_P (v_back) && TREE_ADDRESSABLE (v_back)
6317 ? t : v_back, e);
6318 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
6319 }
79acaae1 6320
bc7bff74 6321 /* Remove GIMPLE_OMP_CONTINUE. */
6322 gsi_remove (&si, true);
48e1416a 6323
bc7bff74 6324 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6325 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
773c5ba7 6326
bc7bff74 6327 /* Trip update code goes into TRIP_UPDATE_BB. */
6328 si = gsi_start_bb (trip_update_bb);
1e8e9920 6329
bc7bff74 6330 t = build_int_cst (itype, 1);
6331 t = build2 (PLUS_EXPR, itype, trip_main, t);
6332 stmt = gimple_build_assign (trip_back, t);
6333 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
6334 }
1e8e9920 6335
75a70cf9 6336 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6337 si = gsi_last_bb (exit_bb);
6338 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
bc7bff74 6339 {
6340 t = gimple_omp_return_lhs (gsi_stmt (si));
6341 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
6342 }
75a70cf9 6343 gsi_remove (&si, true);
1e8e9920 6344
773c5ba7 6345 /* Connect the new blocks. */
ac6e3339 6346 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
6347 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
79acaae1 6348
bc7bff74 6349 if (!broken_loop)
6350 {
6351 se = find_edge (cont_bb, body_bb);
6352 if (gimple_omp_for_combined_p (fd->for_stmt))
6353 {
6354 remove_edge (se);
6355 se = NULL;
6356 }
6357 else if (fd->collapse > 1)
6358 {
6359 remove_edge (se);
6360 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6361 }
6362 else
6363 se->flags = EDGE_TRUE_VALUE;
6364 find_edge (cont_bb, trip_update_bb)->flags
6365 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
79acaae1 6366
bc7bff74 6367 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
6368 }
79acaae1 6369
6370 if (gimple_in_ssa_p (cfun))
6371 {
75a70cf9 6372 gimple_stmt_iterator psi;
6373 gimple phi;
6374 edge re, ene;
f1f41a6c 6375 edge_var_map_vector *head;
75a70cf9 6376 edge_var_map *vm;
6377 size_t i;
6378
bc7bff74 6379 gcc_assert (fd->collapse == 1 && !broken_loop);
6380
79acaae1 6381 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
6382 remove arguments of the phi nodes in fin_bb. We need to create
6383 appropriate phi nodes in iter_part_bb instead. */
6384 se = single_pred_edge (fin_bb);
6385 re = single_succ_edge (trip_update_bb);
75a70cf9 6386 head = redirect_edge_var_map_vector (re);
79acaae1 6387 ene = single_succ_edge (entry_bb);
6388
75a70cf9 6389 psi = gsi_start_phis (fin_bb);
f1f41a6c 6390 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
75a70cf9 6391 gsi_next (&psi), ++i)
79acaae1 6392 {
75a70cf9 6393 gimple nphi;
efbcb6de 6394 source_location locus;
75a70cf9 6395
6396 phi = gsi_stmt (psi);
6397 t = gimple_phi_result (phi);
6398 gcc_assert (t == redirect_edge_var_map_result (vm));
79acaae1 6399 nphi = create_phi_node (t, iter_part_bb);
79acaae1 6400
6401 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
efbcb6de 6402 locus = gimple_phi_arg_location_from_edge (phi, se);
6403
fd6481cf 6404 /* A special case -- fd->loop.v is not yet computed in
6405 iter_part_bb, we need to use v_extra instead. */
6406 if (t == fd->loop.v)
79acaae1 6407 t = v_extra;
60d535d2 6408 add_phi_arg (nphi, t, ene, locus);
efbcb6de 6409 locus = redirect_edge_var_map_location (vm);
60d535d2 6410 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
75a70cf9 6411 }
f1f41a6c 6412 gcc_assert (!gsi_end_p (psi) && i == head->length ());
75a70cf9 6413 redirect_edge_var_map_clear (re);
6414 while (1)
6415 {
6416 psi = gsi_start_phis (fin_bb);
6417 if (gsi_end_p (psi))
6418 break;
6419 remove_phi_node (&psi, false);
79acaae1 6420 }
79acaae1 6421
6422 /* Make phi node for trip. */
6423 phi = create_phi_node (trip_main, iter_part_bb);
efbcb6de 6424 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
60d535d2 6425 UNKNOWN_LOCATION);
efbcb6de 6426 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
60d535d2 6427 UNKNOWN_LOCATION);
79acaae1 6428 }
6429
bc7bff74 6430 if (!broken_loop)
6431 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
79acaae1 6432 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
6433 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
6434 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6435 recompute_dominator (CDI_DOMINATORS, fin_bb));
6436 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
6437 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
6438 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6439 recompute_dominator (CDI_DOMINATORS, body_bb));
04c2922b 6440
bc7bff74 6441 if (!broken_loop)
6442 {
6443 struct loop *trip_loop = alloc_loop ();
6444 trip_loop->header = iter_part_bb;
6445 trip_loop->latch = trip_update_bb;
6446 add_loop (trip_loop, iter_part_bb->loop_father);
04c2922b 6447
bc7bff74 6448 if (!gimple_omp_for_combined_p (fd->for_stmt))
6449 {
6450 struct loop *loop = alloc_loop ();
6451 loop->header = body_bb;
6452 loop->latch = cont_bb;
6453 add_loop (loop, trip_loop);
6454 }
6455 }
1e8e9920 6456}
6457
bc7bff74 6458
3d483a94 6459/* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
6460 loop. Given parameters:
6461
6462 for (V = N1; V cond N2; V += STEP) BODY;
6463
6464 where COND is "<" or ">", we generate pseudocode
6465
6466 V = N1;
6467 goto L1;
6468 L0:
6469 BODY;
6470 V += STEP;
6471 L1:
6472 if (V cond N2) goto L0; else goto L2;
6473 L2:
6474
6475 For collapsed loops, given parameters:
6476 collapse(3)
6477 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
6478 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
6479 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
6480 BODY;
6481
6482 we generate pseudocode
6483
6484 if (cond3 is <)
6485 adj = STEP3 - 1;
6486 else
6487 adj = STEP3 + 1;
6488 count3 = (adj + N32 - N31) / STEP3;
6489 if (cond2 is <)
6490 adj = STEP2 - 1;
6491 else
6492 adj = STEP2 + 1;
6493 count2 = (adj + N22 - N21) / STEP2;
6494 if (cond1 is <)
6495 adj = STEP1 - 1;
6496 else
6497 adj = STEP1 + 1;
6498 count1 = (adj + N12 - N11) / STEP1;
6499 count = count1 * count2 * count3;
6500 V = 0;
6501 V1 = N11;
6502 V2 = N21;
6503 V3 = N31;
6504 goto L1;
6505 L0:
6506 BODY;
6507 V += 1;
6508 V3 += STEP3;
6509 V2 += (V3 cond3 N32) ? 0 : STEP2;
6510 V3 = (V3 cond3 N32) ? V3 : N31;
6511 V1 += (V2 cond2 N22) ? 0 : STEP1;
6512 V2 = (V2 cond2 N22) ? V2 : N21;
6513 L1:
6514 if (V < count) goto L0; else goto L2;
6515 L2:
6516
6517 */
6518
6519static void
6520expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
6521{
6522 tree type, t;
6523 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
6524 gimple_stmt_iterator gsi;
6525 gimple stmt;
6526 bool broken_loop = region->cont == NULL;
6527 edge e, ne;
6528 tree *counts = NULL;
6529 int i;
6530 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6531 OMP_CLAUSE_SAFELEN);
6532 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6533 OMP_CLAUSE__SIMDUID_);
bc7bff74 6534 tree n1, n2;
3d483a94 6535
6536 type = TREE_TYPE (fd->loop.v);
6537 entry_bb = region->entry;
6538 cont_bb = region->cont;
6539 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6540 gcc_assert (broken_loop
6541 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
6542 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
6543 if (!broken_loop)
6544 {
6545 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
6546 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6547 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
6548 l2_bb = BRANCH_EDGE (entry_bb)->dest;
6549 }
6550 else
6551 {
6552 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
6553 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
6554 l2_bb = single_succ (l1_bb);
6555 }
6556 exit_bb = region->exit;
6557 l2_dom_bb = NULL;
6558
6559 gsi = gsi_last_bb (entry_bb);
6560
6561 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6562 /* Not needed in SSA form right now. */
6563 gcc_assert (!gimple_in_ssa_p (cfun));
6564 if (fd->collapse > 1)
6565 {
6566 int first_zero_iter = -1;
6567 basic_block zero_iter_bb = l2_bb;
6568
6569 counts = XALLOCAVEC (tree, fd->collapse);
6570 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6571 zero_iter_bb, first_zero_iter,
6572 l2_dom_bb);
6573 }
6574 if (l2_dom_bb == NULL)
6575 l2_dom_bb = l1_bb;
6576
bc7bff74 6577 n1 = fd->loop.n1;
3d483a94 6578 n2 = fd->loop.n2;
bc7bff74 6579 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6580 {
6581 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6582 OMP_CLAUSE__LOOPTEMP_);
6583 gcc_assert (innerc);
6584 n1 = OMP_CLAUSE_DECL (innerc);
6585 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6586 OMP_CLAUSE__LOOPTEMP_);
6587 gcc_assert (innerc);
6588 n2 = OMP_CLAUSE_DECL (innerc);
6589 expand_omp_build_assign (&gsi, fd->loop.v,
6590 fold_convert (type, n1));
6591 if (fd->collapse > 1)
6592 {
6593 gsi_prev (&gsi);
6594 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
6595 gsi_next (&gsi);
6596 }
6597 }
3d483a94 6598 else
6599 {
6600 expand_omp_build_assign (&gsi, fd->loop.v,
6601 fold_convert (type, fd->loop.n1));
6602 if (fd->collapse > 1)
6603 for (i = 0; i < fd->collapse; i++)
6604 {
6605 tree itype = TREE_TYPE (fd->loops[i].v);
6606 if (POINTER_TYPE_P (itype))
6607 itype = signed_type_for (itype);
6608 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
6609 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
6610 }
6611 }
6612
6613 /* Remove the GIMPLE_OMP_FOR statement. */
6614 gsi_remove (&gsi, true);
6615
6616 if (!broken_loop)
6617 {
6618 /* Code to control the increment goes in the CONT_BB. */
6619 gsi = gsi_last_bb (cont_bb);
6620 stmt = gsi_stmt (gsi);
6621 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6622
6623 if (POINTER_TYPE_P (type))
6624 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
6625 else
6626 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
6627 expand_omp_build_assign (&gsi, fd->loop.v, t);
6628
6629 if (fd->collapse > 1)
6630 {
6631 i = fd->collapse - 1;
6632 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
6633 {
6634 t = fold_convert (sizetype, fd->loops[i].step);
6635 t = fold_build_pointer_plus (fd->loops[i].v, t);
6636 }
6637 else
6638 {
6639 t = fold_convert (TREE_TYPE (fd->loops[i].v),
6640 fd->loops[i].step);
6641 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
6642 fd->loops[i].v, t);
6643 }
6644 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
6645
6646 for (i = fd->collapse - 1; i > 0; i--)
6647 {
6648 tree itype = TREE_TYPE (fd->loops[i].v);
6649 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
6650 if (POINTER_TYPE_P (itype2))
6651 itype2 = signed_type_for (itype2);
6652 t = build3 (COND_EXPR, itype2,
6653 build2 (fd->loops[i].cond_code, boolean_type_node,
6654 fd->loops[i].v,
6655 fold_convert (itype, fd->loops[i].n2)),
6656 build_int_cst (itype2, 0),
6657 fold_convert (itype2, fd->loops[i - 1].step));
6658 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
6659 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
6660 else
6661 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
6662 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
6663
6664 t = build3 (COND_EXPR, itype,
6665 build2 (fd->loops[i].cond_code, boolean_type_node,
6666 fd->loops[i].v,
6667 fold_convert (itype, fd->loops[i].n2)),
6668 fd->loops[i].v,
6669 fold_convert (itype, fd->loops[i].n1));
6670 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
6671 }
6672 }
6673
6674 /* Remove GIMPLE_OMP_CONTINUE. */
6675 gsi_remove (&gsi, true);
6676 }
6677
6678 /* Emit the condition in L1_BB. */
6679 gsi = gsi_start_bb (l1_bb);
6680
6681 t = fold_convert (type, n2);
6682 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6683 false, GSI_CONTINUE_LINKING);
6684 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
6685 stmt = gimple_build_cond_empty (t);
6686 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6687 if (walk_tree (gimple_cond_lhs_ptr (stmt), expand_omp_regimplify_p,
6688 NULL, NULL)
6689 || walk_tree (gimple_cond_rhs_ptr (stmt), expand_omp_regimplify_p,
6690 NULL, NULL))
6691 {
6692 gsi = gsi_for_stmt (stmt);
6693 gimple_regimplify_operands (stmt, &gsi);
6694 }
6695
6696 /* Remove GIMPLE_OMP_RETURN. */
6697 gsi = gsi_last_bb (exit_bb);
6698 gsi_remove (&gsi, true);
6699
6700 /* Connect the new blocks. */
6701 remove_edge (FALLTHRU_EDGE (entry_bb));
6702
6703 if (!broken_loop)
6704 {
6705 remove_edge (BRANCH_EDGE (entry_bb));
6706 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
6707
6708 e = BRANCH_EDGE (l1_bb);
6709 ne = FALLTHRU_EDGE (l1_bb);
6710 e->flags = EDGE_TRUE_VALUE;
6711 }
6712 else
6713 {
6714 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6715
6716 ne = single_succ_edge (l1_bb);
6717 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
6718
6719 }
6720 ne->flags = EDGE_FALSE_VALUE;
6721 e->probability = REG_BR_PROB_BASE * 7 / 8;
6722 ne->probability = REG_BR_PROB_BASE / 8;
6723
6724 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
6725 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
6726 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
6727
6728 if (!broken_loop)
6729 {
6730 struct loop *loop = alloc_loop ();
6731 loop->header = l1_bb;
6732 loop->latch = e->dest;
6733 add_loop (loop, l1_bb->loop_father);
6734 if (safelen == NULL_TREE)
6735 loop->safelen = INT_MAX;
6736 else
6737 {
6738 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
6739 if (!host_integerp (safelen, 1)
6740 || (unsigned HOST_WIDE_INT) tree_low_cst (safelen, 1)
6741 > INT_MAX)
6742 loop->safelen = INT_MAX;
6743 else
6744 loop->safelen = tree_low_cst (safelen, 1);
6745 if (loop->safelen == 1)
6746 loop->safelen = 0;
6747 }
6748 if (simduid)
6749 {
6750 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
6751 cfun->has_simduid_loops = true;
6752 }
043115ec 6753 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
3d483a94 6754 the loop. */
043115ec 6755 if ((flag_tree_loop_vectorize
6756 || (!global_options_set.x_flag_tree_loop_vectorize
6757 && !global_options_set.x_flag_tree_vectorize))
3d483a94 6758 && loop->safelen > 1)
6759 {
6760 loop->force_vect = true;
6761 cfun->has_force_vect_loops = true;
6762 }
6763 }
6764}
6765
1e8e9920 6766
773c5ba7 6767/* Expand the OpenMP loop defined by REGION. */
1e8e9920 6768
773c5ba7 6769static void
bc7bff74 6770expand_omp_for (struct omp_region *region, gimple inner_stmt)
773c5ba7 6771{
6772 struct omp_for_data fd;
fd6481cf 6773 struct omp_for_data_loop *loops;
1e8e9920 6774
fd6481cf 6775 loops
6776 = (struct omp_for_data_loop *)
75a70cf9 6777 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
fd6481cf 6778 * sizeof (struct omp_for_data_loop));
fd6481cf 6779 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
f77459c5 6780 region->sched_kind = fd.sched_kind;
1e8e9920 6781
b3a3ddec 6782 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
6783 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
6784 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
6785 if (region->cont)
6786 {
6787 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
6788 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
6789 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
6790 }
04c2922b 6791 else
75de4aa2 6792 /* If there isn't a continue then this is a degerate case where
04c2922b 6793 the introduction of abnormal edges during lowering will prevent
6794 original loops from being detected. Fix that up. */
6795 loops_state_set (LOOPS_NEED_FIXUP);
b3a3ddec 6796
3d483a94 6797 if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_SIMD)
6798 expand_omp_simd (region, &fd);
6799 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
bc7bff74 6800 && !fd.have_ordered)
1e8e9920 6801 {
6802 if (fd.chunk_size == NULL)
bc7bff74 6803 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
1e8e9920 6804 else
bc7bff74 6805 expand_omp_for_static_chunk (region, &fd, inner_stmt);
1e8e9920 6806 }
6807 else
6808 {
fd6481cf 6809 int fn_index, start_ix, next_ix;
6810
3d483a94 6811 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
6812 == GF_OMP_FOR_KIND_FOR);
0416ca72 6813 if (fd.chunk_size == NULL
6814 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
6815 fd.chunk_size = integer_zero_node;
fd6481cf 6816 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
6817 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
75a70cf9 6818 ? 3 : fd.sched_kind;
fd6481cf 6819 fn_index += fd.have_ordered * 4;
b9a16870 6820 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
6821 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
fd6481cf 6822 if (fd.iter_type == long_long_unsigned_type_node)
6823 {
b9a16870 6824 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
6825 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
6826 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
6827 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
fd6481cf 6828 }
b9c74b4d 6829 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
bc7bff74 6830 (enum built_in_function) next_ix, inner_stmt);
1e8e9920 6831 }
28c92cbb 6832
083152fb 6833 if (gimple_in_ssa_p (cfun))
6834 update_ssa (TODO_update_ssa_only_virtuals);
1e8e9920 6835}
6836
1e8e9920 6837
6838/* Expand code for an OpenMP sections directive. In pseudo code, we generate
6839
1e8e9920 6840 v = GOMP_sections_start (n);
6841 L0:
6842 switch (v)
6843 {
6844 case 0:
6845 goto L2;
6846 case 1:
6847 section 1;
6848 goto L1;
6849 case 2:
6850 ...
6851 case n:
6852 ...
1e8e9920 6853 default:
6854 abort ();
6855 }
6856 L1:
6857 v = GOMP_sections_next ();
6858 goto L0;
6859 L2:
6860 reduction;
6861
773c5ba7 6862 If this is a combined parallel sections, replace the call to
79acaae1 6863 GOMP_sections_start with call to GOMP_sections_next. */
1e8e9920 6864
6865static void
773c5ba7 6866expand_omp_sections (struct omp_region *region)
1e8e9920 6867{
f018d957 6868 tree t, u, vin = NULL, vmain, vnext, l2;
f1f41a6c 6869 vec<tree> label_vec;
75a70cf9 6870 unsigned len;
ac6e3339 6871 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
75a70cf9 6872 gimple_stmt_iterator si, switch_si;
6873 gimple sections_stmt, stmt, cont;
9884aaf8 6874 edge_iterator ei;
6875 edge e;
61e47ac8 6876 struct omp_region *inner;
75a70cf9 6877 unsigned i, casei;
ac6e3339 6878 bool exit_reachable = region->cont != NULL;
1e8e9920 6879
d244d9de 6880 gcc_assert (region->exit != NULL);
61e47ac8 6881 entry_bb = region->entry;
ac6e3339 6882 l0_bb = single_succ (entry_bb);
61e47ac8 6883 l1_bb = region->cont;
ac6e3339 6884 l2_bb = region->exit;
d244d9de 6885 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
6886 l2 = gimple_block_label (l2_bb);
6887 else
03ed154b 6888 {
d244d9de 6889 /* This can happen if there are reductions. */
6890 len = EDGE_COUNT (l0_bb->succs);
6891 gcc_assert (len > 0);
6892 e = EDGE_SUCC (l0_bb, len - 1);
6893 si = gsi_last_bb (e->dest);
6894 l2 = NULL_TREE;
6895 if (gsi_end_p (si)
6896 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
6897 l2 = gimple_block_label (e->dest);
9884aaf8 6898 else
d244d9de 6899 FOR_EACH_EDGE (e, ei, l0_bb->succs)
6900 {
6901 si = gsi_last_bb (e->dest);
6902 if (gsi_end_p (si)
6903 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
9884aaf8 6904 {
d244d9de 6905 l2 = gimple_block_label (e->dest);
6906 break;
9884aaf8 6907 }
d244d9de 6908 }
03ed154b 6909 }
d244d9de 6910 if (exit_reachable)
6911 default_bb = create_empty_bb (l1_bb->prev_bb);
03ed154b 6912 else
d244d9de 6913 default_bb = create_empty_bb (l0_bb);
773c5ba7 6914
6915 /* We will build a switch() with enough cases for all the
75a70cf9 6916 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
773c5ba7 6917 and a default case to abort if something goes wrong. */
ac6e3339 6918 len = EDGE_COUNT (l0_bb->succs);
75a70cf9 6919
f1f41a6c 6920 /* Use vec::quick_push on label_vec throughout, since we know the size
75a70cf9 6921 in advance. */
f1f41a6c 6922 label_vec.create (len);
1e8e9920 6923
61e47ac8 6924 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
75a70cf9 6925 GIMPLE_OMP_SECTIONS statement. */
6926 si = gsi_last_bb (entry_bb);
6927 sections_stmt = gsi_stmt (si);
6928 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
6929 vin = gimple_omp_sections_control (sections_stmt);
773c5ba7 6930 if (!is_combined_parallel (region))
1e8e9920 6931 {
773c5ba7 6932 /* If we are not inside a combined parallel+sections region,
6933 call GOMP_sections_start. */
39cb6d68 6934 t = build_int_cst (unsigned_type_node, len - 1);
b9a16870 6935 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
75a70cf9 6936 stmt = gimple_build_call (u, 1, t);
1e8e9920 6937 }
79acaae1 6938 else
6939 {
6940 /* Otherwise, call GOMP_sections_next. */
b9a16870 6941 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
75a70cf9 6942 stmt = gimple_build_call (u, 0);
79acaae1 6943 }
75a70cf9 6944 gimple_call_set_lhs (stmt, vin);
6945 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
6946 gsi_remove (&si, true);
6947
6948 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
6949 L0_BB. */
6950 switch_si = gsi_last_bb (l0_bb);
6951 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
79acaae1 6952 if (exit_reachable)
6953 {
6954 cont = last_stmt (l1_bb);
75a70cf9 6955 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
6956 vmain = gimple_omp_continue_control_use (cont);
6957 vnext = gimple_omp_continue_control_def (cont);
79acaae1 6958 }
6959 else
6960 {
6961 vmain = vin;
6962 vnext = NULL_TREE;
6963 }
1e8e9920 6964
d244d9de 6965 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
f1f41a6c 6966 label_vec.quick_push (t);
d244d9de 6967 i = 1;
03ed154b 6968
75a70cf9 6969 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
ac6e3339 6970 for (inner = region->inner, casei = 1;
6971 inner;
6972 inner = inner->next, i++, casei++)
1e8e9920 6973 {
773c5ba7 6974 basic_block s_entry_bb, s_exit_bb;
6975
9884aaf8 6976 /* Skip optional reduction region. */
75a70cf9 6977 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
9884aaf8 6978 {
6979 --i;
6980 --casei;
6981 continue;
6982 }
6983
61e47ac8 6984 s_entry_bb = inner->entry;
6985 s_exit_bb = inner->exit;
1e8e9920 6986
75a70cf9 6987 t = gimple_block_label (s_entry_bb);
ac6e3339 6988 u = build_int_cst (unsigned_type_node, casei);
b6e3dd65 6989 u = build_case_label (u, NULL, t);
f1f41a6c 6990 label_vec.quick_push (u);
61e47ac8 6991
75a70cf9 6992 si = gsi_last_bb (s_entry_bb);
6993 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
6994 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
6995 gsi_remove (&si, true);
61e47ac8 6996 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
03ed154b 6997
6998 if (s_exit_bb == NULL)
6999 continue;
7000
75a70cf9 7001 si = gsi_last_bb (s_exit_bb);
7002 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7003 gsi_remove (&si, true);
03ed154b 7004
773c5ba7 7005 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
1e8e9920 7006 }
7007
773c5ba7 7008 /* Error handling code goes in DEFAULT_BB. */
75a70cf9 7009 t = gimple_block_label (default_bb);
b6e3dd65 7010 u = build_case_label (NULL, NULL, t);
61e47ac8 7011 make_edge (l0_bb, default_bb, 0);
f6568ea4 7012 if (current_loops)
04c2922b 7013 add_bb_to_loop (default_bb, current_loops->tree_root);
1e8e9920 7014
49a70175 7015 stmt = gimple_build_switch (vmain, u, label_vec);
75a70cf9 7016 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
7017 gsi_remove (&switch_si, true);
f1f41a6c 7018 label_vec.release ();
75a70cf9 7019
7020 si = gsi_start_bb (default_bb);
b9a16870 7021 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
75a70cf9 7022 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
773c5ba7 7023
ac6e3339 7024 if (exit_reachable)
03ed154b 7025 {
b9a16870 7026 tree bfn_decl;
7027
ac6e3339 7028 /* Code to get the next section goes in L1_BB. */
75a70cf9 7029 si = gsi_last_bb (l1_bb);
7030 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
1e8e9920 7031
b9a16870 7032 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7033 stmt = gimple_build_call (bfn_decl, 0);
75a70cf9 7034 gimple_call_set_lhs (stmt, vnext);
7035 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7036 gsi_remove (&si, true);
773c5ba7 7037
ac6e3339 7038 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
03ed154b 7039 }
773c5ba7 7040
d244d9de 7041 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
7042 si = gsi_last_bb (l2_bb);
7043 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
7044 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
bc7bff74 7045 else if (gimple_omp_return_lhs (gsi_stmt (si)))
7046 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
d244d9de 7047 else
7048 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
7049 stmt = gimple_build_call (t, 0);
bc7bff74 7050 if (gimple_omp_return_lhs (gsi_stmt (si)))
7051 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
d244d9de 7052 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7053 gsi_remove (&si, true);
7054
79acaae1 7055 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
773c5ba7 7056}
1e8e9920 7057
1e8e9920 7058
61e47ac8 7059/* Expand code for an OpenMP single directive. We've already expanded
7060 much of the code, here we simply place the GOMP_barrier call. */
7061
7062static void
7063expand_omp_single (struct omp_region *region)
7064{
7065 basic_block entry_bb, exit_bb;
75a70cf9 7066 gimple_stmt_iterator si;
61e47ac8 7067
7068 entry_bb = region->entry;
7069 exit_bb = region->exit;
7070
75a70cf9 7071 si = gsi_last_bb (entry_bb);
75a70cf9 7072 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
7073 gsi_remove (&si, true);
61e47ac8 7074 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7075
75a70cf9 7076 si = gsi_last_bb (exit_bb);
bc7bff74 7077 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
7078 {
7079 tree t = gimple_omp_return_lhs (gsi_stmt (si));
7080 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
7081 }
75a70cf9 7082 gsi_remove (&si, true);
61e47ac8 7083 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7084}
7085
7086
7087/* Generic expansion for OpenMP synchronization directives: master,
7088 ordered and critical. All we need to do here is remove the entry
7089 and exit markers for REGION. */
773c5ba7 7090
7091static void
7092expand_omp_synch (struct omp_region *region)
7093{
7094 basic_block entry_bb, exit_bb;
75a70cf9 7095 gimple_stmt_iterator si;
773c5ba7 7096
61e47ac8 7097 entry_bb = region->entry;
7098 exit_bb = region->exit;
773c5ba7 7099
75a70cf9 7100 si = gsi_last_bb (entry_bb);
7101 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
7102 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
bc7bff74 7103 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
75a70cf9 7104 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
bc7bff74 7105 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
7106 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
75a70cf9 7107 gsi_remove (&si, true);
773c5ba7 7108 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7109
03ed154b 7110 if (exit_bb)
7111 {
75a70cf9 7112 si = gsi_last_bb (exit_bb);
7113 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7114 gsi_remove (&si, true);
03ed154b 7115 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7116 }
773c5ba7 7117}
1e8e9920 7118
2169f33b 7119/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7120 operation as a normal volatile load. */
7121
7122static bool
3ec11c49 7123expand_omp_atomic_load (basic_block load_bb, tree addr,
7124 tree loaded_val, int index)
2169f33b 7125{
3ec11c49 7126 enum built_in_function tmpbase;
7127 gimple_stmt_iterator gsi;
7128 basic_block store_bb;
7129 location_t loc;
7130 gimple stmt;
7131 tree decl, call, type, itype;
7132
7133 gsi = gsi_last_bb (load_bb);
7134 stmt = gsi_stmt (gsi);
7135 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7136 loc = gimple_location (stmt);
7137
7138 /* ??? If the target does not implement atomic_load_optab[mode], and mode
7139 is smaller than word size, then expand_atomic_load assumes that the load
7140 is atomic. We could avoid the builtin entirely in this case. */
7141
7142 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
7143 decl = builtin_decl_explicit (tmpbase);
7144 if (decl == NULL_TREE)
7145 return false;
7146
7147 type = TREE_TYPE (loaded_val);
7148 itype = TREE_TYPE (TREE_TYPE (decl));
7149
7150 call = build_call_expr_loc (loc, decl, 2, addr,
bc7bff74 7151 build_int_cst (NULL,
7152 gimple_omp_atomic_seq_cst_p (stmt)
7153 ? MEMMODEL_SEQ_CST
7154 : MEMMODEL_RELAXED));
3ec11c49 7155 if (!useless_type_conversion_p (type, itype))
7156 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7157 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7158
7159 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7160 gsi_remove (&gsi, true);
7161
7162 store_bb = single_succ (load_bb);
7163 gsi = gsi_last_bb (store_bb);
7164 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7165 gsi_remove (&gsi, true);
7166
7167 if (gimple_in_ssa_p (cfun))
7168 update_ssa (TODO_update_ssa_no_phi);
7169
7170 return true;
2169f33b 7171}
7172
7173/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7174 operation as a normal volatile store. */
7175
7176static bool
3ec11c49 7177expand_omp_atomic_store (basic_block load_bb, tree addr,
7178 tree loaded_val, tree stored_val, int index)
2169f33b 7179{
3ec11c49 7180 enum built_in_function tmpbase;
7181 gimple_stmt_iterator gsi;
7182 basic_block store_bb = single_succ (load_bb);
7183 location_t loc;
7184 gimple stmt;
7185 tree decl, call, type, itype;
7186 enum machine_mode imode;
7187 bool exchange;
7188
7189 gsi = gsi_last_bb (load_bb);
7190 stmt = gsi_stmt (gsi);
7191 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7192
7193 /* If the load value is needed, then this isn't a store but an exchange. */
7194 exchange = gimple_omp_atomic_need_value_p (stmt);
7195
7196 gsi = gsi_last_bb (store_bb);
7197 stmt = gsi_stmt (gsi);
7198 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
7199 loc = gimple_location (stmt);
7200
7201 /* ??? If the target does not implement atomic_store_optab[mode], and mode
7202 is smaller than word size, then expand_atomic_store assumes that the store
7203 is atomic. We could avoid the builtin entirely in this case. */
7204
7205 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
7206 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
7207 decl = builtin_decl_explicit (tmpbase);
7208 if (decl == NULL_TREE)
7209 return false;
7210
7211 type = TREE_TYPE (stored_val);
7212
7213 /* Dig out the type of the function's second argument. */
7214 itype = TREE_TYPE (decl);
7215 itype = TYPE_ARG_TYPES (itype);
7216 itype = TREE_CHAIN (itype);
7217 itype = TREE_VALUE (itype);
7218 imode = TYPE_MODE (itype);
7219
7220 if (exchange && !can_atomic_exchange_p (imode, true))
7221 return false;
7222
7223 if (!useless_type_conversion_p (itype, type))
7224 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
7225 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
bc7bff74 7226 build_int_cst (NULL,
7227 gimple_omp_atomic_seq_cst_p (stmt)
7228 ? MEMMODEL_SEQ_CST
7229 : MEMMODEL_RELAXED));
3ec11c49 7230 if (exchange)
7231 {
7232 if (!useless_type_conversion_p (type, itype))
7233 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7234 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7235 }
7236
7237 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7238 gsi_remove (&gsi, true);
7239
7240 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
7241 gsi = gsi_last_bb (load_bb);
7242 gsi_remove (&gsi, true);
7243
7244 if (gimple_in_ssa_p (cfun))
7245 update_ssa (TODO_update_ssa_no_phi);
7246
7247 return true;
2169f33b 7248}
7249
cb7f680b 7250/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
1cd6e20d 7251 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
cb7f680b 7252 size of the data type, and thus usable to find the index of the builtin
7253 decl. Returns false if the expression is not of the proper form. */
7254
7255static bool
7256expand_omp_atomic_fetch_op (basic_block load_bb,
7257 tree addr, tree loaded_val,
7258 tree stored_val, int index)
7259{
b9a16870 7260 enum built_in_function oldbase, newbase, tmpbase;
cb7f680b 7261 tree decl, itype, call;
2169f33b 7262 tree lhs, rhs;
cb7f680b 7263 basic_block store_bb = single_succ (load_bb);
75a70cf9 7264 gimple_stmt_iterator gsi;
7265 gimple stmt;
389dd41b 7266 location_t loc;
1cd6e20d 7267 enum tree_code code;
2169f33b 7268 bool need_old, need_new;
1cd6e20d 7269 enum machine_mode imode;
bc7bff74 7270 bool seq_cst;
cb7f680b 7271
7272 /* We expect to find the following sequences:
48e1416a 7273
cb7f680b 7274 load_bb:
75a70cf9 7275 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
cb7f680b 7276
7277 store_bb:
7278 val = tmp OP something; (or: something OP tmp)
48e1416a 7279 GIMPLE_OMP_STORE (val)
cb7f680b 7280
48e1416a 7281 ???FIXME: Allow a more flexible sequence.
cb7f680b 7282 Perhaps use data flow to pick the statements.
48e1416a 7283
cb7f680b 7284 */
7285
75a70cf9 7286 gsi = gsi_after_labels (store_bb);
7287 stmt = gsi_stmt (gsi);
389dd41b 7288 loc = gimple_location (stmt);
75a70cf9 7289 if (!is_gimple_assign (stmt))
cb7f680b 7290 return false;
75a70cf9 7291 gsi_next (&gsi);
7292 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
cb7f680b 7293 return false;
2169f33b 7294 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
7295 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
bc7bff74 7296 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
2169f33b 7297 gcc_checking_assert (!need_old || !need_new);
cb7f680b 7298
75a70cf9 7299 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
cb7f680b 7300 return false;
7301
cb7f680b 7302 /* Check for one of the supported fetch-op operations. */
1cd6e20d 7303 code = gimple_assign_rhs_code (stmt);
7304 switch (code)
cb7f680b 7305 {
7306 case PLUS_EXPR:
7307 case POINTER_PLUS_EXPR:
1cd6e20d 7308 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
7309 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
cb7f680b 7310 break;
7311 case MINUS_EXPR:
1cd6e20d 7312 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
7313 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
cb7f680b 7314 break;
7315 case BIT_AND_EXPR:
1cd6e20d 7316 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
7317 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
cb7f680b 7318 break;
7319 case BIT_IOR_EXPR:
1cd6e20d 7320 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
7321 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
cb7f680b 7322 break;
7323 case BIT_XOR_EXPR:
1cd6e20d 7324 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
7325 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
cb7f680b 7326 break;
7327 default:
7328 return false;
7329 }
1cd6e20d 7330
cb7f680b 7331 /* Make sure the expression is of the proper form. */
75a70cf9 7332 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
7333 rhs = gimple_assign_rhs2 (stmt);
7334 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
7335 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
7336 rhs = gimple_assign_rhs1 (stmt);
cb7f680b 7337 else
7338 return false;
7339
b9a16870 7340 tmpbase = ((enum built_in_function)
7341 ((need_new ? newbase : oldbase) + index + 1));
7342 decl = builtin_decl_explicit (tmpbase);
0f94f46b 7343 if (decl == NULL_TREE)
7344 return false;
cb7f680b 7345 itype = TREE_TYPE (TREE_TYPE (decl));
1cd6e20d 7346 imode = TYPE_MODE (itype);
cb7f680b 7347
1cd6e20d 7348 /* We could test all of the various optabs involved, but the fact of the
7349 matter is that (with the exception of i486 vs i586 and xadd) all targets
7350 that support any atomic operaton optab also implements compare-and-swap.
7351 Let optabs.c take care of expanding any compare-and-swap loop. */
29139cdc 7352 if (!can_compare_and_swap_p (imode, true))
cb7f680b 7353 return false;
7354
75a70cf9 7355 gsi = gsi_last_bb (load_bb);
7356 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
1cd6e20d 7357
7358 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
7359 It only requires that the operation happen atomically. Thus we can
7360 use the RELAXED memory model. */
7361 call = build_call_expr_loc (loc, decl, 3, addr,
7362 fold_convert_loc (loc, itype, rhs),
bc7bff74 7363 build_int_cst (NULL,
7364 seq_cst ? MEMMODEL_SEQ_CST
7365 : MEMMODEL_RELAXED));
1cd6e20d 7366
2169f33b 7367 if (need_old || need_new)
7368 {
7369 lhs = need_old ? loaded_val : stored_val;
7370 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
7371 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
7372 }
7373 else
7374 call = fold_convert_loc (loc, void_type_node, call);
75a70cf9 7375 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7376 gsi_remove (&gsi, true);
cb7f680b 7377
75a70cf9 7378 gsi = gsi_last_bb (store_bb);
7379 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7380 gsi_remove (&gsi, true);
7381 gsi = gsi_last_bb (store_bb);
7382 gsi_remove (&gsi, true);
cb7f680b 7383
7384 if (gimple_in_ssa_p (cfun))
7385 update_ssa (TODO_update_ssa_no_phi);
7386
7387 return true;
7388}
7389
7390/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7391
7392 oldval = *addr;
7393 repeat:
7394 newval = rhs; // with oldval replacing *addr in rhs
7395 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
7396 if (oldval != newval)
7397 goto repeat;
7398
7399 INDEX is log2 of the size of the data type, and thus usable to find the
7400 index of the builtin decl. */
7401
7402static bool
7403expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
7404 tree addr, tree loaded_val, tree stored_val,
7405 int index)
7406{
790368c5 7407 tree loadedi, storedi, initial, new_storedi, old_vali;
cb7f680b 7408 tree type, itype, cmpxchg, iaddr;
75a70cf9 7409 gimple_stmt_iterator si;
cb7f680b 7410 basic_block loop_header = single_succ (load_bb);
75a70cf9 7411 gimple phi, stmt;
cb7f680b 7412 edge e;
b9a16870 7413 enum built_in_function fncode;
cb7f680b 7414
1cd6e20d 7415 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
7416 order to use the RELAXED memory model effectively. */
b9a16870 7417 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
7418 + index + 1);
7419 cmpxchg = builtin_decl_explicit (fncode);
0f94f46b 7420 if (cmpxchg == NULL_TREE)
7421 return false;
cb7f680b 7422 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
7423 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
7424
29139cdc 7425 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
cb7f680b 7426 return false;
7427
75a70cf9 7428 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
7429 si = gsi_last_bb (load_bb);
7430 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
7431
790368c5 7432 /* For floating-point values, we'll need to view-convert them to integers
7433 so that we can perform the atomic compare and swap. Simplify the
7434 following code by always setting up the "i"ntegral variables. */
7435 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
7436 {
75a70cf9 7437 tree iaddr_val;
7438
072f7ab1 7439 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
7440 true), NULL);
75a70cf9 7441 iaddr_val
7442 = force_gimple_operand_gsi (&si,
7443 fold_convert (TREE_TYPE (iaddr), addr),
7444 false, NULL_TREE, true, GSI_SAME_STMT);
7445 stmt = gimple_build_assign (iaddr, iaddr_val);
7446 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
790368c5 7447 loadedi = create_tmp_var (itype, NULL);
7448 if (gimple_in_ssa_p (cfun))
b03e5397 7449 loadedi = make_ssa_name (loadedi, NULL);
790368c5 7450 }
7451 else
7452 {
7453 iaddr = addr;
7454 loadedi = loaded_val;
7455 }
75a70cf9 7456
182cf5a9 7457 initial
7458 = force_gimple_operand_gsi (&si,
7459 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
7460 iaddr,
7461 build_int_cst (TREE_TYPE (iaddr), 0)),
7462 true, NULL_TREE, true, GSI_SAME_STMT);
790368c5 7463
7464 /* Move the value to the LOADEDI temporary. */
cb7f680b 7465 if (gimple_in_ssa_p (cfun))
7466 {
75a70cf9 7467 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
790368c5 7468 phi = create_phi_node (loadedi, loop_header);
cb7f680b 7469 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
7470 initial);
7471 }
7472 else
75a70cf9 7473 gsi_insert_before (&si,
7474 gimple_build_assign (loadedi, initial),
7475 GSI_SAME_STMT);
790368c5 7476 if (loadedi != loaded_val)
7477 {
75a70cf9 7478 gimple_stmt_iterator gsi2;
7479 tree x;
790368c5 7480
7481 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
75a70cf9 7482 gsi2 = gsi_start_bb (loop_header);
790368c5 7483 if (gimple_in_ssa_p (cfun))
7484 {
75a70cf9 7485 gimple stmt;
7486 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
7487 true, GSI_SAME_STMT);
7488 stmt = gimple_build_assign (loaded_val, x);
7489 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
790368c5 7490 }
7491 else
7492 {
75a70cf9 7493 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
7494 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
7495 true, GSI_SAME_STMT);
790368c5 7496 }
7497 }
75a70cf9 7498 gsi_remove (&si, true);
cb7f680b 7499
75a70cf9 7500 si = gsi_last_bb (store_bb);
7501 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
cb7f680b 7502
790368c5 7503 if (iaddr == addr)
7504 storedi = stored_val;
cb7f680b 7505 else
790368c5 7506 storedi =
75a70cf9 7507 force_gimple_operand_gsi (&si,
790368c5 7508 build1 (VIEW_CONVERT_EXPR, itype,
7509 stored_val), true, NULL_TREE, true,
75a70cf9 7510 GSI_SAME_STMT);
cb7f680b 7511
7512 /* Build the compare&swap statement. */
7513 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
75a70cf9 7514 new_storedi = force_gimple_operand_gsi (&si,
87f9ffa4 7515 fold_convert (TREE_TYPE (loadedi),
7516 new_storedi),
cb7f680b 7517 true, NULL_TREE,
75a70cf9 7518 true, GSI_SAME_STMT);
cb7f680b 7519
7520 if (gimple_in_ssa_p (cfun))
7521 old_vali = loadedi;
7522 else
7523 {
87f9ffa4 7524 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
75a70cf9 7525 stmt = gimple_build_assign (old_vali, loadedi);
7526 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 7527
75a70cf9 7528 stmt = gimple_build_assign (loadedi, new_storedi);
7529 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 7530 }
7531
7532 /* Note that we always perform the comparison as an integer, even for
48e1416a 7533 floating point. This allows the atomic operation to properly
cb7f680b 7534 succeed even with NaNs and -0.0. */
75a70cf9 7535 stmt = gimple_build_cond_empty
7536 (build2 (NE_EXPR, boolean_type_node,
7537 new_storedi, old_vali));
7538 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 7539
7540 /* Update cfg. */
7541 e = single_succ_edge (store_bb);
7542 e->flags &= ~EDGE_FALLTHRU;
7543 e->flags |= EDGE_FALSE_VALUE;
7544
7545 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
7546
790368c5 7547 /* Copy the new value to loadedi (we already did that before the condition
cb7f680b 7548 if we are not in SSA). */
7549 if (gimple_in_ssa_p (cfun))
7550 {
75a70cf9 7551 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
790368c5 7552 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
cb7f680b 7553 }
7554
75a70cf9 7555 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
7556 gsi_remove (&si, true);
cb7f680b 7557
04c2922b 7558 struct loop *loop = alloc_loop ();
7559 loop->header = loop_header;
5f037457 7560 loop->latch = store_bb;
04c2922b 7561 add_loop (loop, loop_header->loop_father);
7562
cb7f680b 7563 if (gimple_in_ssa_p (cfun))
7564 update_ssa (TODO_update_ssa_no_phi);
7565
7566 return true;
7567}
7568
7569/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7570
7571 GOMP_atomic_start ();
7572 *addr = rhs;
7573 GOMP_atomic_end ();
7574
7575 The result is not globally atomic, but works so long as all parallel
7576 references are within #pragma omp atomic directives. According to
7577 responses received from omp@openmp.org, appears to be within spec.
7578 Which makes sense, since that's how several other compilers handle
48e1416a 7579 this situation as well.
75a70cf9 7580 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
7581 expanding. STORED_VAL is the operand of the matching
7582 GIMPLE_OMP_ATOMIC_STORE.
cb7f680b 7583
48e1416a 7584 We replace
7585 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
cb7f680b 7586 loaded_val = *addr;
7587
7588 and replace
3ec11c49 7589 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
48e1416a 7590 *addr = stored_val;
cb7f680b 7591*/
7592
7593static bool
7594expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
7595 tree addr, tree loaded_val, tree stored_val)
7596{
75a70cf9 7597 gimple_stmt_iterator si;
7598 gimple stmt;
cb7f680b 7599 tree t;
7600
75a70cf9 7601 si = gsi_last_bb (load_bb);
7602 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
cb7f680b 7603
b9a16870 7604 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
414c3a2c 7605 t = build_call_expr (t, 0);
75a70cf9 7606 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
cb7f680b 7607
182cf5a9 7608 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
75a70cf9 7609 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7610 gsi_remove (&si, true);
cb7f680b 7611
75a70cf9 7612 si = gsi_last_bb (store_bb);
7613 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
cb7f680b 7614
182cf5a9 7615 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
7616 stored_val);
75a70cf9 7617 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 7618
b9a16870 7619 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
414c3a2c 7620 t = build_call_expr (t, 0);
75a70cf9 7621 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
7622 gsi_remove (&si, true);
cb7f680b 7623
7624 if (gimple_in_ssa_p (cfun))
7625 update_ssa (TODO_update_ssa_no_phi);
7626 return true;
7627}
7628
48e1416a 7629/* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
7630 using expand_omp_atomic_fetch_op. If it failed, we try to
cb7f680b 7631 call expand_omp_atomic_pipeline, and if it fails too, the
7632 ultimate fallback is wrapping the operation in a mutex
48e1416a 7633 (expand_omp_atomic_mutex). REGION is the atomic region built
7634 by build_omp_regions_1(). */
cb7f680b 7635
7636static void
7637expand_omp_atomic (struct omp_region *region)
7638{
7639 basic_block load_bb = region->entry, store_bb = region->exit;
75a70cf9 7640 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
7641 tree loaded_val = gimple_omp_atomic_load_lhs (load);
7642 tree addr = gimple_omp_atomic_load_rhs (load);
7643 tree stored_val = gimple_omp_atomic_store_val (store);
cb7f680b 7644 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
7645 HOST_WIDE_INT index;
7646
7647 /* Make sure the type is one of the supported sizes. */
7648 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
7649 index = exact_log2 (index);
7650 if (index >= 0 && index <= 4)
7651 {
7652 unsigned int align = TYPE_ALIGN_UNIT (type);
7653
7654 /* __sync builtins require strict data alignment. */
dcf7024c 7655 if (exact_log2 (align) >= index)
cb7f680b 7656 {
3ec11c49 7657 /* Atomic load. */
2169f33b 7658 if (loaded_val == stored_val
7659 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
7660 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
7661 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
3ec11c49 7662 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
2169f33b 7663 return;
7664
3ec11c49 7665 /* Atomic store. */
2169f33b 7666 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
7667 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
7668 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
7669 && store_bb == single_succ (load_bb)
7670 && first_stmt (store_bb) == store
3ec11c49 7671 && expand_omp_atomic_store (load_bb, addr, loaded_val,
7672 stored_val, index))
2169f33b 7673 return;
7674
cb7f680b 7675 /* When possible, use specialized atomic update functions. */
7676 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
3ec11c49 7677 && store_bb == single_succ (load_bb)
7678 && expand_omp_atomic_fetch_op (load_bb, addr,
7679 loaded_val, stored_val, index))
7680 return;
cb7f680b 7681
7682 /* If we don't have specialized __sync builtins, try and implement
7683 as a compare and swap loop. */
7684 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
7685 loaded_val, stored_val, index))
7686 return;
7687 }
7688 }
7689
7690 /* The ultimate fallback is wrapping the operation in a mutex. */
7691 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
7692}
7693
1e8e9920 7694
bc7bff74 7695/* Expand the OpenMP target{, data, update} directive starting at REGION. */
7696
7697static void
7698expand_omp_target (struct omp_region *region)
7699{
7700 basic_block entry_bb, exit_bb, new_bb;
7701 struct function *child_cfun = NULL;
7702 tree child_fn = NULL_TREE, block, t;
7703 gimple_stmt_iterator gsi;
7704 gimple entry_stmt, stmt;
7705 edge e;
7706
7707 entry_stmt = last_stmt (region->entry);
7708 new_bb = region->entry;
7709 int kind = gimple_omp_target_kind (entry_stmt);
7710 if (kind == GF_OMP_TARGET_KIND_REGION)
7711 {
7712 child_fn = gimple_omp_target_child_fn (entry_stmt);
7713 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
7714 }
7715
7716 entry_bb = region->entry;
7717 exit_bb = region->exit;
7718
7719 if (kind == GF_OMP_TARGET_KIND_REGION)
7720 {
7721 unsigned srcidx, dstidx, num;
7722
7723 /* If the target region needs data sent from the parent
7724 function, then the very first statement (except possible
7725 tree profile counter updates) of the parallel body
7726 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
7727 &.OMP_DATA_O is passed as an argument to the child function,
7728 we need to replace it with the argument as seen by the child
7729 function.
7730
7731 In most cases, this will end up being the identity assignment
7732 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
7733 a function call that has been inlined, the original PARM_DECL
7734 .OMP_DATA_I may have been converted into a different local
7735 variable. In which case, we need to keep the assignment. */
7736 if (gimple_omp_target_data_arg (entry_stmt))
7737 {
7738 basic_block entry_succ_bb = single_succ (entry_bb);
7739 gimple_stmt_iterator gsi;
7740 tree arg;
7741 gimple tgtcopy_stmt = NULL;
7742 tree sender
7743 = TREE_VEC_ELT (gimple_omp_target_data_arg (entry_stmt), 0);
7744
7745 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
7746 {
7747 gcc_assert (!gsi_end_p (gsi));
7748 stmt = gsi_stmt (gsi);
7749 if (gimple_code (stmt) != GIMPLE_ASSIGN)
7750 continue;
7751
7752 if (gimple_num_ops (stmt) == 2)
7753 {
7754 tree arg = gimple_assign_rhs1 (stmt);
7755
7756 /* We're ignoring the subcode because we're
7757 effectively doing a STRIP_NOPS. */
7758
7759 if (TREE_CODE (arg) == ADDR_EXPR
7760 && TREE_OPERAND (arg, 0) == sender)
7761 {
7762 tgtcopy_stmt = stmt;
7763 break;
7764 }
7765 }
7766 }
7767
7768 gcc_assert (tgtcopy_stmt != NULL);
7769 arg = DECL_ARGUMENTS (child_fn);
7770
7771 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
7772 gsi_remove (&gsi, true);
7773 }
7774
7775 /* Declare local variables needed in CHILD_CFUN. */
7776 block = DECL_INITIAL (child_fn);
7777 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
7778 /* The gimplifier could record temporaries in target block
7779 rather than in containing function's local_decls chain,
7780 which would mean cgraph missed finalizing them. Do it now. */
7781 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
7782 if (TREE_CODE (t) == VAR_DECL
7783 && TREE_STATIC (t)
7784 && !DECL_EXTERNAL (t))
7785 varpool_finalize_decl (t);
7786 DECL_SAVED_TREE (child_fn) = NULL;
7787 /* We'll create a CFG for child_fn, so no gimple body is needed. */
7788 gimple_set_body (child_fn, NULL);
7789 TREE_USED (block) = 1;
7790
7791 /* Reset DECL_CONTEXT on function arguments. */
7792 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
7793 DECL_CONTEXT (t) = child_fn;
7794
7795 /* Split ENTRY_BB at GIMPLE_OMP_TARGET,
7796 so that it can be moved to the child function. */
7797 gsi = gsi_last_bb (entry_bb);
7798 stmt = gsi_stmt (gsi);
7799 gcc_assert (stmt && gimple_code (stmt) == GIMPLE_OMP_TARGET
7800 && gimple_omp_target_kind (stmt)
7801 == GF_OMP_TARGET_KIND_REGION);
7802 gsi_remove (&gsi, true);
7803 e = split_block (entry_bb, stmt);
7804 entry_bb = e->dest;
7805 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7806
7807 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
7808 if (exit_bb)
7809 {
7810 gsi = gsi_last_bb (exit_bb);
7811 gcc_assert (!gsi_end_p (gsi)
7812 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
7813 stmt = gimple_build_return (NULL);
7814 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
7815 gsi_remove (&gsi, true);
7816 }
7817
7818 /* Move the target region into CHILD_CFUN. */
7819
7820 block = gimple_block (entry_stmt);
7821
7822 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
7823 if (exit_bb)
7824 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
7825 /* When the OMP expansion process cannot guarantee an up-to-date
7826 loop tree arrange for the child function to fixup loops. */
7827 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
7828 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
7829
7830 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
7831 num = vec_safe_length (child_cfun->local_decls);
7832 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
7833 {
7834 t = (*child_cfun->local_decls)[srcidx];
7835 if (DECL_CONTEXT (t) == cfun->decl)
7836 continue;
7837 if (srcidx != dstidx)
7838 (*child_cfun->local_decls)[dstidx] = t;
7839 dstidx++;
7840 }
7841 if (dstidx != num)
7842 vec_safe_truncate (child_cfun->local_decls, dstidx);
7843
7844 /* Inform the callgraph about the new function. */
7845 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
7846 cgraph_add_new_function (child_fn, true);
7847
7848 /* Fix the callgraph edges for child_cfun. Those for cfun will be
7849 fixed in a following pass. */
7850 push_cfun (child_cfun);
7851 rebuild_cgraph_edges ();
7852
7853 /* Some EH regions might become dead, see PR34608. If
7854 pass_cleanup_cfg isn't the first pass to happen with the
7855 new child, these dead EH edges might cause problems.
7856 Clean them up now. */
7857 if (flag_exceptions)
7858 {
7859 basic_block bb;
7860 bool changed = false;
7861
7862 FOR_EACH_BB (bb)
7863 changed |= gimple_purge_dead_eh_edges (bb);
7864 if (changed)
7865 cleanup_tree_cfg ();
7866 }
7867 pop_cfun ();
7868 }
7869
7870 /* Emit a library call to launch the target region, or do data
7871 transfers. */
7872 tree t1, t2, t3, t4, device, cond, c, clauses;
7873 enum built_in_function start_ix;
7874 location_t clause_loc;
7875
7876 clauses = gimple_omp_target_clauses (entry_stmt);
7877
7878 if (kind == GF_OMP_TARGET_KIND_REGION)
7879 start_ix = BUILT_IN_GOMP_TARGET;
7880 else if (kind == GF_OMP_TARGET_KIND_DATA)
7881 start_ix = BUILT_IN_GOMP_TARGET_DATA;
7882 else
7883 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
7884
7885 /* By default, the value of DEVICE is -1 (let runtime library choose)
7886 and there is no conditional. */
7887 cond = NULL_TREE;
7888 device = build_int_cst (integer_type_node, -1);
7889
7890 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
7891 if (c)
7892 cond = OMP_CLAUSE_IF_EXPR (c);
7893
7894 c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
7895 if (c)
7896 {
7897 device = OMP_CLAUSE_DEVICE_ID (c);
7898 clause_loc = OMP_CLAUSE_LOCATION (c);
7899 }
7900 else
7901 clause_loc = gimple_location (entry_stmt);
7902
7903 /* Ensure 'device' is of the correct type. */
7904 device = fold_convert_loc (clause_loc, integer_type_node, device);
7905
7906 /* If we found the clause 'if (cond)', build
7907 (cond ? device : -2). */
7908 if (cond)
7909 {
7910 cond = gimple_boolify (cond);
7911
7912 basic_block cond_bb, then_bb, else_bb;
7913 edge e;
7914 tree tmp_var;
7915
7916 tmp_var = create_tmp_var (TREE_TYPE (device), NULL);
7917 if (kind != GF_OMP_TARGET_KIND_REGION)
7918 {
7919 gsi = gsi_last_bb (new_bb);
7920 gsi_prev (&gsi);
7921 e = split_block (new_bb, gsi_stmt (gsi));
7922 }
7923 else
7924 e = split_block (new_bb, NULL);
7925 cond_bb = e->src;
7926 new_bb = e->dest;
7927 remove_edge (e);
7928
7929 then_bb = create_empty_bb (cond_bb);
7930 else_bb = create_empty_bb (then_bb);
7931 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
7932 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
7933
7934 stmt = gimple_build_cond_empty (cond);
7935 gsi = gsi_last_bb (cond_bb);
7936 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7937
7938 gsi = gsi_start_bb (then_bb);
7939 stmt = gimple_build_assign (tmp_var, device);
7940 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7941
7942 gsi = gsi_start_bb (else_bb);
7943 stmt = gimple_build_assign (tmp_var,
7944 build_int_cst (integer_type_node, -2));
7945 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7946
7947 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
7948 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
7949 if (current_loops)
7950 {
7951 add_bb_to_loop (then_bb, cond_bb->loop_father);
7952 add_bb_to_loop (else_bb, cond_bb->loop_father);
7953 }
7954 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
7955 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
7956
7957 device = tmp_var;
7958 }
7959
7960 gsi = gsi_last_bb (new_bb);
7961 t = gimple_omp_target_data_arg (entry_stmt);
7962 if (t == NULL)
7963 {
7964 t1 = size_zero_node;
7965 t2 = build_zero_cst (ptr_type_node);
7966 t3 = t2;
7967 t4 = t2;
7968 }
7969 else
7970 {
7971 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
7972 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
7973 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
7974 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
7975 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
7976 }
7977
7978 gimple g;
7979 /* FIXME: This will be address of
7980 extern char __OPENMP_TARGET__[] __attribute__((visibility ("hidden")))
7981 symbol, as soon as the linker plugin is able to create it for us. */
7982 tree openmp_target = build_zero_cst (ptr_type_node);
7983 if (kind == GF_OMP_TARGET_KIND_REGION)
7984 {
7985 tree fnaddr = build_fold_addr_expr (child_fn);
7986 g = gimple_build_call (builtin_decl_explicit (start_ix), 7,
7987 device, fnaddr, openmp_target, t1, t2, t3, t4);
7988 }
7989 else
7990 g = gimple_build_call (builtin_decl_explicit (start_ix), 6,
7991 device, openmp_target, t1, t2, t3, t4);
7992 gimple_set_location (g, gimple_location (entry_stmt));
7993 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
7994 if (kind != GF_OMP_TARGET_KIND_REGION)
7995 {
7996 g = gsi_stmt (gsi);
7997 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
7998 gsi_remove (&gsi, true);
7999 }
8000 if (kind == GF_OMP_TARGET_KIND_DATA && region->exit)
8001 {
8002 gsi = gsi_last_bb (region->exit);
8003 g = gsi_stmt (gsi);
8004 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
8005 gsi_remove (&gsi, true);
8006 }
8007}
8008
8009
8010/* Expand the parallel region tree rooted at REGION. Expansion
8011 proceeds in depth-first order. Innermost regions are expanded
8012 first. This way, parallel regions that require a new function to
75a70cf9 8013 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
773c5ba7 8014 internal dependencies in their body. */
8015
8016static void
8017expand_omp (struct omp_region *region)
8018{
8019 while (region)
8020 {
1d22f541 8021 location_t saved_location;
bc7bff74 8022 gimple inner_stmt = NULL;
1d22f541 8023
d1d5b012 8024 /* First, determine whether this is a combined parallel+workshare
8025 region. */
75a70cf9 8026 if (region->type == GIMPLE_OMP_PARALLEL)
d1d5b012 8027 determine_parallel_type (region);
8028
bc7bff74 8029 if (region->type == GIMPLE_OMP_FOR
8030 && gimple_omp_for_combined_p (last_stmt (region->entry)))
8031 inner_stmt = last_stmt (region->inner->entry);
8032
773c5ba7 8033 if (region->inner)
8034 expand_omp (region->inner);
8035
1d22f541 8036 saved_location = input_location;
75a70cf9 8037 if (gimple_has_location (last_stmt (region->entry)))
8038 input_location = gimple_location (last_stmt (region->entry));
1d22f541 8039
61e47ac8 8040 switch (region->type)
773c5ba7 8041 {
75a70cf9 8042 case GIMPLE_OMP_PARALLEL:
8043 case GIMPLE_OMP_TASK:
fd6481cf 8044 expand_omp_taskreg (region);
8045 break;
8046
75a70cf9 8047 case GIMPLE_OMP_FOR:
bc7bff74 8048 expand_omp_for (region, inner_stmt);
61e47ac8 8049 break;
773c5ba7 8050
75a70cf9 8051 case GIMPLE_OMP_SECTIONS:
61e47ac8 8052 expand_omp_sections (region);
8053 break;
773c5ba7 8054
75a70cf9 8055 case GIMPLE_OMP_SECTION:
61e47ac8 8056 /* Individual omp sections are handled together with their
75a70cf9 8057 parent GIMPLE_OMP_SECTIONS region. */
61e47ac8 8058 break;
773c5ba7 8059
75a70cf9 8060 case GIMPLE_OMP_SINGLE:
61e47ac8 8061 expand_omp_single (region);
8062 break;
773c5ba7 8063
75a70cf9 8064 case GIMPLE_OMP_MASTER:
bc7bff74 8065 case GIMPLE_OMP_TASKGROUP:
75a70cf9 8066 case GIMPLE_OMP_ORDERED:
8067 case GIMPLE_OMP_CRITICAL:
bc7bff74 8068 case GIMPLE_OMP_TEAMS:
61e47ac8 8069 expand_omp_synch (region);
8070 break;
773c5ba7 8071
75a70cf9 8072 case GIMPLE_OMP_ATOMIC_LOAD:
cb7f680b 8073 expand_omp_atomic (region);
8074 break;
8075
bc7bff74 8076 case GIMPLE_OMP_TARGET:
8077 expand_omp_target (region);
8078 break;
8079
61e47ac8 8080 default:
8081 gcc_unreachable ();
8082 }
cc5982dc 8083
1d22f541 8084 input_location = saved_location;
773c5ba7 8085 region = region->next;
8086 }
8087}
8088
8089
8090/* Helper for build_omp_regions. Scan the dominator tree starting at
28c92cbb 8091 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
8092 true, the function ends once a single tree is built (otherwise, whole
8093 forest of OMP constructs may be built). */
773c5ba7 8094
8095static void
28c92cbb 8096build_omp_regions_1 (basic_block bb, struct omp_region *parent,
8097 bool single_tree)
773c5ba7 8098{
75a70cf9 8099 gimple_stmt_iterator gsi;
8100 gimple stmt;
773c5ba7 8101 basic_block son;
8102
75a70cf9 8103 gsi = gsi_last_bb (bb);
8104 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
773c5ba7 8105 {
8106 struct omp_region *region;
75a70cf9 8107 enum gimple_code code;
773c5ba7 8108
75a70cf9 8109 stmt = gsi_stmt (gsi);
8110 code = gimple_code (stmt);
8111 if (code == GIMPLE_OMP_RETURN)
773c5ba7 8112 {
8113 /* STMT is the return point out of region PARENT. Mark it
8114 as the exit point and make PARENT the immediately
8115 enclosing region. */
8116 gcc_assert (parent);
8117 region = parent;
61e47ac8 8118 region->exit = bb;
773c5ba7 8119 parent = parent->outer;
773c5ba7 8120 }
75a70cf9 8121 else if (code == GIMPLE_OMP_ATOMIC_STORE)
cb7f680b 8122 {
75a70cf9 8123 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
8124 GIMPLE_OMP_RETURN, but matches with
8125 GIMPLE_OMP_ATOMIC_LOAD. */
cb7f680b 8126 gcc_assert (parent);
75a70cf9 8127 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
cb7f680b 8128 region = parent;
8129 region->exit = bb;
8130 parent = parent->outer;
8131 }
8132
75a70cf9 8133 else if (code == GIMPLE_OMP_CONTINUE)
61e47ac8 8134 {
8135 gcc_assert (parent);
8136 parent->cont = bb;
8137 }
75a70cf9 8138 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
ac6e3339 8139 {
75a70cf9 8140 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
8141 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
8142 ;
ac6e3339 8143 }
bc7bff74 8144 else if (code == GIMPLE_OMP_TARGET
8145 && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_UPDATE)
8146 new_omp_region (bb, code, parent);
773c5ba7 8147 else
8148 {
8149 /* Otherwise, this directive becomes the parent for a new
8150 region. */
61e47ac8 8151 region = new_omp_region (bb, code, parent);
773c5ba7 8152 parent = region;
8153 }
773c5ba7 8154 }
8155
28c92cbb 8156 if (single_tree && !parent)
8157 return;
8158
773c5ba7 8159 for (son = first_dom_son (CDI_DOMINATORS, bb);
8160 son;
8161 son = next_dom_son (CDI_DOMINATORS, son))
28c92cbb 8162 build_omp_regions_1 (son, parent, single_tree);
8163}
8164
8165/* Builds the tree of OMP regions rooted at ROOT, storing it to
8166 root_omp_region. */
8167
8168static void
8169build_omp_regions_root (basic_block root)
8170{
8171 gcc_assert (root_omp_region == NULL);
8172 build_omp_regions_1 (root, NULL, true);
8173 gcc_assert (root_omp_region != NULL);
773c5ba7 8174}
8175
28c92cbb 8176/* Expands omp construct (and its subconstructs) starting in HEAD. */
8177
8178void
8179omp_expand_local (basic_block head)
8180{
8181 build_omp_regions_root (head);
8182 if (dump_file && (dump_flags & TDF_DETAILS))
8183 {
8184 fprintf (dump_file, "\nOMP region tree\n\n");
8185 dump_omp_region (dump_file, root_omp_region, 0);
8186 fprintf (dump_file, "\n");
8187 }
8188
8189 remove_exit_barriers (root_omp_region);
8190 expand_omp (root_omp_region);
8191
8192 free_omp_regions ();
8193}
773c5ba7 8194
8195/* Scan the CFG and build a tree of OMP regions. Return the root of
8196 the OMP region tree. */
8197
8198static void
8199build_omp_regions (void)
8200{
61e47ac8 8201 gcc_assert (root_omp_region == NULL);
773c5ba7 8202 calculate_dominance_info (CDI_DOMINATORS);
28c92cbb 8203 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
773c5ba7 8204}
8205
773c5ba7 8206/* Main entry point for expanding OMP-GIMPLE into runtime calls. */
8207
2a1990e9 8208static unsigned int
773c5ba7 8209execute_expand_omp (void)
8210{
8211 build_omp_regions ();
8212
61e47ac8 8213 if (!root_omp_region)
8214 return 0;
773c5ba7 8215
61e47ac8 8216 if (dump_file)
8217 {
8218 fprintf (dump_file, "\nOMP region tree\n\n");
8219 dump_omp_region (dump_file, root_omp_region, 0);
8220 fprintf (dump_file, "\n");
773c5ba7 8221 }
61e47ac8 8222
8223 remove_exit_barriers (root_omp_region);
8224
8225 expand_omp (root_omp_region);
8226
61e47ac8 8227 cleanup_tree_cfg ();
8228
8229 free_omp_regions ();
8230
2a1990e9 8231 return 0;
773c5ba7 8232}
8233
79acaae1 8234/* OMP expansion -- the default pass, run before creation of SSA form. */
8235
773c5ba7 8236static bool
8237gate_expand_omp (void)
8238{
c630ef93 8239 return ((flag_openmp != 0 || flag_openmp_simd != 0) && !seen_error ());
773c5ba7 8240}
8241
cbe8bda8 8242namespace {
8243
8244const pass_data pass_data_expand_omp =
8245{
8246 GIMPLE_PASS, /* type */
8247 "ompexp", /* name */
8248 OPTGROUP_NONE, /* optinfo_flags */
8249 true, /* has_gate */
8250 true, /* has_execute */
8251 TV_NONE, /* tv_id */
8252 PROP_gimple_any, /* properties_required */
8253 0, /* properties_provided */
8254 0, /* properties_destroyed */
8255 0, /* todo_flags_start */
8256 0, /* todo_flags_finish */
773c5ba7 8257};
cbe8bda8 8258
8259class pass_expand_omp : public gimple_opt_pass
8260{
8261public:
9af5ce0c 8262 pass_expand_omp (gcc::context *ctxt)
8263 : gimple_opt_pass (pass_data_expand_omp, ctxt)
cbe8bda8 8264 {}
8265
8266 /* opt_pass methods: */
8267 bool gate () { return gate_expand_omp (); }
8268 unsigned int execute () { return execute_expand_omp (); }
8269
8270}; // class pass_expand_omp
8271
8272} // anon namespace
8273
8274gimple_opt_pass *
8275make_pass_expand_omp (gcc::context *ctxt)
8276{
8277 return new pass_expand_omp (ctxt);
8278}
773c5ba7 8279\f
8280/* Routines to lower OpenMP directives into OMP-GIMPLE. */
8281
bc7bff74 8282/* If ctx is a worksharing context inside of a cancellable parallel
8283 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
8284 and conditional branch to parallel's cancel_label to handle
8285 cancellation in the implicit barrier. */
8286
8287static void
8288maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
8289{
8290 gimple omp_return = gimple_seq_last_stmt (*body);
8291 gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
8292 if (gimple_omp_return_nowait_p (omp_return))
8293 return;
8294 if (ctx->outer
8295 && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
8296 && ctx->outer->cancellable)
8297 {
8298 tree lhs = create_tmp_var (boolean_type_node, NULL);
8299 gimple_omp_return_set_lhs (omp_return, lhs);
8300 tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
8301 gimple g = gimple_build_cond (NE_EXPR, lhs, boolean_false_node,
8302 ctx->outer->cancel_label, fallthru_label);
8303 gimple_seq_add_stmt (body, g);
8304 gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
8305 }
8306}
8307
75a70cf9 8308/* Lower the OpenMP sections directive in the current statement in GSI_P.
8309 CTX is the enclosing OMP context for the current statement. */
773c5ba7 8310
8311static void
75a70cf9 8312lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 8313{
75a70cf9 8314 tree block, control;
8315 gimple_stmt_iterator tgsi;
75a70cf9 8316 gimple stmt, new_stmt, bind, t;
e3a19533 8317 gimple_seq ilist, dlist, olist, new_body;
dac18d1a 8318 struct gimplify_ctx gctx;
773c5ba7 8319
75a70cf9 8320 stmt = gsi_stmt (*gsi_p);
773c5ba7 8321
dac18d1a 8322 push_gimplify_context (&gctx);
773c5ba7 8323
8324 dlist = NULL;
8325 ilist = NULL;
75a70cf9 8326 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
bc7bff74 8327 &ilist, &dlist, ctx, NULL);
773c5ba7 8328
e3a19533 8329 new_body = gimple_omp_body (stmt);
8330 gimple_omp_set_body (stmt, NULL);
8331 tgsi = gsi_start (new_body);
8332 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
773c5ba7 8333 {
8334 omp_context *sctx;
75a70cf9 8335 gimple sec_start;
773c5ba7 8336
75a70cf9 8337 sec_start = gsi_stmt (tgsi);
773c5ba7 8338 sctx = maybe_lookup_ctx (sec_start);
8339 gcc_assert (sctx);
8340
e3a19533 8341 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
8342 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
8343 GSI_CONTINUE_LINKING);
75a70cf9 8344 gimple_omp_set_body (sec_start, NULL);
773c5ba7 8345
e3a19533 8346 if (gsi_one_before_end_p (tgsi))
773c5ba7 8347 {
75a70cf9 8348 gimple_seq l = NULL;
8349 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
773c5ba7 8350 &l, ctx);
e3a19533 8351 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
75a70cf9 8352 gimple_omp_section_set_last (sec_start);
773c5ba7 8353 }
48e1416a 8354
e3a19533 8355 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
8356 GSI_CONTINUE_LINKING);
773c5ba7 8357 }
1e8e9920 8358
8359 block = make_node (BLOCK);
e3a19533 8360 bind = gimple_build_bind (NULL, new_body, block);
1e8e9920 8361
75a70cf9 8362 olist = NULL;
8363 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
773c5ba7 8364
1d22f541 8365 block = make_node (BLOCK);
75a70cf9 8366 new_stmt = gimple_build_bind (NULL, NULL, block);
e3a19533 8367 gsi_replace (gsi_p, new_stmt, true);
773c5ba7 8368
1d22f541 8369 pop_gimplify_context (new_stmt);
75a70cf9 8370 gimple_bind_append_vars (new_stmt, ctx->block_vars);
8371 BLOCK_VARS (block) = gimple_bind_vars (bind);
1d22f541 8372 if (BLOCK_VARS (block))
8373 TREE_USED (block) = 1;
8374
75a70cf9 8375 new_body = NULL;
8376 gimple_seq_add_seq (&new_body, ilist);
8377 gimple_seq_add_stmt (&new_body, stmt);
8378 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
8379 gimple_seq_add_stmt (&new_body, bind);
61e47ac8 8380
ac6e3339 8381 control = create_tmp_var (unsigned_type_node, ".section");
75a70cf9 8382 t = gimple_build_omp_continue (control, control);
8383 gimple_omp_sections_set_control (stmt, control);
8384 gimple_seq_add_stmt (&new_body, t);
61e47ac8 8385
75a70cf9 8386 gimple_seq_add_seq (&new_body, olist);
bc7bff74 8387 if (ctx->cancellable)
8388 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
75a70cf9 8389 gimple_seq_add_seq (&new_body, dlist);
773c5ba7 8390
75a70cf9 8391 new_body = maybe_catch_exception (new_body);
aade31a0 8392
75a70cf9 8393 t = gimple_build_omp_return
8394 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
8395 OMP_CLAUSE_NOWAIT));
8396 gimple_seq_add_stmt (&new_body, t);
bc7bff74 8397 maybe_add_implicit_barrier_cancel (ctx, &new_body);
61e47ac8 8398
75a70cf9 8399 gimple_bind_set_body (new_stmt, new_body);
1e8e9920 8400}
8401
8402
773c5ba7 8403/* A subroutine of lower_omp_single. Expand the simple form of
75a70cf9 8404 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
1e8e9920 8405
8406 if (GOMP_single_start ())
8407 BODY;
8408 [ GOMP_barrier (); ] -> unless 'nowait' is present.
773c5ba7 8409
8410 FIXME. It may be better to delay expanding the logic of this until
8411 pass_expand_omp. The expanded logic may make the job more difficult
8412 to a synchronization analysis pass. */
1e8e9920 8413
8414static void
75a70cf9 8415lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
1e8e9920 8416{
e60a6f7b 8417 location_t loc = gimple_location (single_stmt);
8418 tree tlabel = create_artificial_label (loc);
8419 tree flabel = create_artificial_label (loc);
75a70cf9 8420 gimple call, cond;
8421 tree lhs, decl;
8422
b9a16870 8423 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
75a70cf9 8424 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
8425 call = gimple_build_call (decl, 0);
8426 gimple_call_set_lhs (call, lhs);
8427 gimple_seq_add_stmt (pre_p, call);
8428
8429 cond = gimple_build_cond (EQ_EXPR, lhs,
389dd41b 8430 fold_convert_loc (loc, TREE_TYPE (lhs),
8431 boolean_true_node),
75a70cf9 8432 tlabel, flabel);
8433 gimple_seq_add_stmt (pre_p, cond);
8434 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
8435 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
8436 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
1e8e9920 8437}
8438
773c5ba7 8439
8440/* A subroutine of lower_omp_single. Expand the simple form of
75a70cf9 8441 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
1e8e9920 8442
8443 #pragma omp single copyprivate (a, b, c)
8444
8445 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
8446
8447 {
8448 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
8449 {
8450 BODY;
8451 copyout.a = a;
8452 copyout.b = b;
8453 copyout.c = c;
8454 GOMP_single_copy_end (&copyout);
8455 }
8456 else
8457 {
8458 a = copyout_p->a;
8459 b = copyout_p->b;
8460 c = copyout_p->c;
8461 }
8462 GOMP_barrier ();
8463 }
773c5ba7 8464
8465 FIXME. It may be better to delay expanding the logic of this until
8466 pass_expand_omp. The expanded logic may make the job more difficult
8467 to a synchronization analysis pass. */
1e8e9920 8468
8469static void
75a70cf9 8470lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
1e8e9920 8471{
b9a16870 8472 tree ptr_type, t, l0, l1, l2, bfn_decl;
75a70cf9 8473 gimple_seq copyin_seq;
e60a6f7b 8474 location_t loc = gimple_location (single_stmt);
1e8e9920 8475
8476 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
8477
8478 ptr_type = build_pointer_type (ctx->record_type);
8479 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
8480
e60a6f7b 8481 l0 = create_artificial_label (loc);
8482 l1 = create_artificial_label (loc);
8483 l2 = create_artificial_label (loc);
1e8e9920 8484
b9a16870 8485 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
8486 t = build_call_expr_loc (loc, bfn_decl, 0);
389dd41b 8487 t = fold_convert_loc (loc, ptr_type, t);
75a70cf9 8488 gimplify_assign (ctx->receiver_decl, t, pre_p);
1e8e9920 8489
8490 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
8491 build_int_cst (ptr_type, 0));
8492 t = build3 (COND_EXPR, void_type_node, t,
8493 build_and_jump (&l0), build_and_jump (&l1));
8494 gimplify_and_add (t, pre_p);
8495
75a70cf9 8496 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
1e8e9920 8497
75a70cf9 8498 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
1e8e9920 8499
8500 copyin_seq = NULL;
75a70cf9 8501 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
1e8e9920 8502 &copyin_seq, ctx);
8503
389dd41b 8504 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
b9a16870 8505 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
8506 t = build_call_expr_loc (loc, bfn_decl, 1, t);
1e8e9920 8507 gimplify_and_add (t, pre_p);
8508
8509 t = build_and_jump (&l2);
8510 gimplify_and_add (t, pre_p);
8511
75a70cf9 8512 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
1e8e9920 8513
75a70cf9 8514 gimple_seq_add_seq (pre_p, copyin_seq);
1e8e9920 8515
75a70cf9 8516 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
1e8e9920 8517}
8518
773c5ba7 8519
1e8e9920 8520/* Expand code for an OpenMP single directive. */
8521
8522static void
75a70cf9 8523lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 8524{
75a70cf9 8525 tree block;
8526 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
bc7bff74 8527 gimple_seq bind_body, bind_body_tail = NULL, dlist;
dac18d1a 8528 struct gimplify_ctx gctx;
1e8e9920 8529
dac18d1a 8530 push_gimplify_context (&gctx);
1e8e9920 8531
e3a19533 8532 block = make_node (BLOCK);
8533 bind = gimple_build_bind (NULL, NULL, block);
8534 gsi_replace (gsi_p, bind, true);
75a70cf9 8535 bind_body = NULL;
e3a19533 8536 dlist = NULL;
75a70cf9 8537 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
bc7bff74 8538 &bind_body, &dlist, ctx, NULL);
e3a19533 8539 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
1e8e9920 8540
75a70cf9 8541 gimple_seq_add_stmt (&bind_body, single_stmt);
1e8e9920 8542
8543 if (ctx->record_type)
75a70cf9 8544 lower_omp_single_copy (single_stmt, &bind_body, ctx);
1e8e9920 8545 else
75a70cf9 8546 lower_omp_single_simple (single_stmt, &bind_body);
8547
8548 gimple_omp_set_body (single_stmt, NULL);
1e8e9920 8549
75a70cf9 8550 gimple_seq_add_seq (&bind_body, dlist);
61e47ac8 8551
75a70cf9 8552 bind_body = maybe_catch_exception (bind_body);
61e47ac8 8553
48e1416a 8554 t = gimple_build_omp_return
75a70cf9 8555 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
8556 OMP_CLAUSE_NOWAIT));
bc7bff74 8557 gimple_seq_add_stmt (&bind_body_tail, t);
8558 maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
8559 if (ctx->record_type)
8560 {
8561 gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
8562 tree clobber = build_constructor (ctx->record_type, NULL);
8563 TREE_THIS_VOLATILE (clobber) = 1;
8564 gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
8565 clobber), GSI_SAME_STMT);
8566 }
8567 gimple_seq_add_seq (&bind_body, bind_body_tail);
e3a19533 8568 gimple_bind_set_body (bind, bind_body);
61e47ac8 8569
1e8e9920 8570 pop_gimplify_context (bind);
773c5ba7 8571
75a70cf9 8572 gimple_bind_append_vars (bind, ctx->block_vars);
8573 BLOCK_VARS (block) = ctx->block_vars;
1d22f541 8574 if (BLOCK_VARS (block))
8575 TREE_USED (block) = 1;
1e8e9920 8576}
8577
773c5ba7 8578
1e8e9920 8579/* Expand code for an OpenMP master directive. */
8580
8581static void
75a70cf9 8582lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 8583{
b9a16870 8584 tree block, lab = NULL, x, bfn_decl;
75a70cf9 8585 gimple stmt = gsi_stmt (*gsi_p), bind;
389dd41b 8586 location_t loc = gimple_location (stmt);
75a70cf9 8587 gimple_seq tseq;
dac18d1a 8588 struct gimplify_ctx gctx;
1e8e9920 8589
dac18d1a 8590 push_gimplify_context (&gctx);
1e8e9920 8591
8592 block = make_node (BLOCK);
e3a19533 8593 bind = gimple_build_bind (NULL, NULL, block);
8594 gsi_replace (gsi_p, bind, true);
8595 gimple_bind_add_stmt (bind, stmt);
61e47ac8 8596
b9a16870 8597 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
8598 x = build_call_expr_loc (loc, bfn_decl, 0);
1e8e9920 8599 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
8600 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
75a70cf9 8601 tseq = NULL;
8602 gimplify_and_add (x, &tseq);
8603 gimple_bind_add_seq (bind, tseq);
1e8e9920 8604
e3a19533 8605 lower_omp (gimple_omp_body_ptr (stmt), ctx);
75a70cf9 8606 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
8607 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8608 gimple_omp_set_body (stmt, NULL);
1e8e9920 8609
75a70cf9 8610 gimple_bind_add_stmt (bind, gimple_build_label (lab));
61e47ac8 8611
75a70cf9 8612 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
61e47ac8 8613
1e8e9920 8614 pop_gimplify_context (bind);
773c5ba7 8615
75a70cf9 8616 gimple_bind_append_vars (bind, ctx->block_vars);
8617 BLOCK_VARS (block) = ctx->block_vars;
1e8e9920 8618}
8619
773c5ba7 8620
bc7bff74 8621/* Expand code for an OpenMP taskgroup directive. */
8622
8623static void
8624lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8625{
8626 gimple stmt = gsi_stmt (*gsi_p), bind, x;
8627 tree block = make_node (BLOCK);
8628
8629 bind = gimple_build_bind (NULL, NULL, block);
8630 gsi_replace (gsi_p, bind, true);
8631 gimple_bind_add_stmt (bind, stmt);
8632
8633 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
8634 0);
8635 gimple_bind_add_stmt (bind, x);
8636
8637 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8638 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8639 gimple_omp_set_body (stmt, NULL);
8640
8641 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8642
8643 gimple_bind_append_vars (bind, ctx->block_vars);
8644 BLOCK_VARS (block) = ctx->block_vars;
8645}
8646
8647
1e8e9920 8648/* Expand code for an OpenMP ordered directive. */
8649
8650static void
75a70cf9 8651lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 8652{
75a70cf9 8653 tree block;
8654 gimple stmt = gsi_stmt (*gsi_p), bind, x;
dac18d1a 8655 struct gimplify_ctx gctx;
1e8e9920 8656
dac18d1a 8657 push_gimplify_context (&gctx);
1e8e9920 8658
8659 block = make_node (BLOCK);
e3a19533 8660 bind = gimple_build_bind (NULL, NULL, block);
8661 gsi_replace (gsi_p, bind, true);
8662 gimple_bind_add_stmt (bind, stmt);
61e47ac8 8663
b9a16870 8664 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
8665 0);
75a70cf9 8666 gimple_bind_add_stmt (bind, x);
1e8e9920 8667
e3a19533 8668 lower_omp (gimple_omp_body_ptr (stmt), ctx);
75a70cf9 8669 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
8670 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8671 gimple_omp_set_body (stmt, NULL);
1e8e9920 8672
b9a16870 8673 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
75a70cf9 8674 gimple_bind_add_stmt (bind, x);
61e47ac8 8675
75a70cf9 8676 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
61e47ac8 8677
1e8e9920 8678 pop_gimplify_context (bind);
773c5ba7 8679
75a70cf9 8680 gimple_bind_append_vars (bind, ctx->block_vars);
8681 BLOCK_VARS (block) = gimple_bind_vars (bind);
1e8e9920 8682}
8683
1e8e9920 8684
75a70cf9 8685/* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
1e8e9920 8686 substitution of a couple of function calls. But in the NAMED case,
8687 requires that languages coordinate a symbol name. It is therefore
8688 best put here in common code. */
8689
8690static GTY((param1_is (tree), param2_is (tree)))
8691 splay_tree critical_name_mutexes;
8692
8693static void
75a70cf9 8694lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 8695{
75a70cf9 8696 tree block;
8697 tree name, lock, unlock;
8698 gimple stmt = gsi_stmt (*gsi_p), bind;
389dd41b 8699 location_t loc = gimple_location (stmt);
75a70cf9 8700 gimple_seq tbody;
dac18d1a 8701 struct gimplify_ctx gctx;
1e8e9920 8702
75a70cf9 8703 name = gimple_omp_critical_name (stmt);
1e8e9920 8704 if (name)
8705 {
c2f47e15 8706 tree decl;
1e8e9920 8707 splay_tree_node n;
8708
8709 if (!critical_name_mutexes)
8710 critical_name_mutexes
ba72912a 8711 = splay_tree_new_ggc (splay_tree_compare_pointers,
8712 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
8713 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
1e8e9920 8714
8715 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
8716 if (n == NULL)
8717 {
8718 char *new_str;
8719
8720 decl = create_tmp_var_raw (ptr_type_node, NULL);
8721
8722 new_str = ACONCAT ((".gomp_critical_user_",
8723 IDENTIFIER_POINTER (name), NULL));
8724 DECL_NAME (decl) = get_identifier (new_str);
8725 TREE_PUBLIC (decl) = 1;
8726 TREE_STATIC (decl) = 1;
8727 DECL_COMMON (decl) = 1;
8728 DECL_ARTIFICIAL (decl) = 1;
8729 DECL_IGNORED_P (decl) = 1;
1d416bd7 8730 varpool_finalize_decl (decl);
1e8e9920 8731
8732 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
8733 (splay_tree_value) decl);
8734 }
8735 else
8736 decl = (tree) n->value;
8737
b9a16870 8738 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
389dd41b 8739 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
1e8e9920 8740
b9a16870 8741 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
389dd41b 8742 unlock = build_call_expr_loc (loc, unlock, 1,
8743 build_fold_addr_expr_loc (loc, decl));
1e8e9920 8744 }
8745 else
8746 {
b9a16870 8747 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
389dd41b 8748 lock = build_call_expr_loc (loc, lock, 0);
1e8e9920 8749
b9a16870 8750 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
389dd41b 8751 unlock = build_call_expr_loc (loc, unlock, 0);
1e8e9920 8752 }
8753
dac18d1a 8754 push_gimplify_context (&gctx);
1e8e9920 8755
8756 block = make_node (BLOCK);
e3a19533 8757 bind = gimple_build_bind (NULL, NULL, block);
8758 gsi_replace (gsi_p, bind, true);
8759 gimple_bind_add_stmt (bind, stmt);
61e47ac8 8760
75a70cf9 8761 tbody = gimple_bind_body (bind);
8762 gimplify_and_add (lock, &tbody);
8763 gimple_bind_set_body (bind, tbody);
1e8e9920 8764
e3a19533 8765 lower_omp (gimple_omp_body_ptr (stmt), ctx);
75a70cf9 8766 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
8767 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8768 gimple_omp_set_body (stmt, NULL);
1e8e9920 8769
75a70cf9 8770 tbody = gimple_bind_body (bind);
8771 gimplify_and_add (unlock, &tbody);
8772 gimple_bind_set_body (bind, tbody);
61e47ac8 8773
75a70cf9 8774 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
1e8e9920 8775
8776 pop_gimplify_context (bind);
75a70cf9 8777 gimple_bind_append_vars (bind, ctx->block_vars);
8778 BLOCK_VARS (block) = gimple_bind_vars (bind);
773c5ba7 8779}
8780
8781
8782/* A subroutine of lower_omp_for. Generate code to emit the predicate
8783 for a lastprivate clause. Given a loop control predicate of (V
8784 cond N2), we gate the clause on (!(V cond N2)). The lowered form
1e4afe3c 8785 is appended to *DLIST, iterator initialization is appended to
8786 *BODY_P. */
773c5ba7 8787
8788static void
75a70cf9 8789lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
8790 gimple_seq *dlist, struct omp_context *ctx)
773c5ba7 8791{
75a70cf9 8792 tree clauses, cond, vinit;
773c5ba7 8793 enum tree_code cond_code;
75a70cf9 8794 gimple_seq stmts;
48e1416a 8795
fd6481cf 8796 cond_code = fd->loop.cond_code;
773c5ba7 8797 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
8798
8799 /* When possible, use a strict equality expression. This can let VRP
8800 type optimizations deduce the value and remove a copy. */
fd6481cf 8801 if (host_integerp (fd->loop.step, 0))
773c5ba7 8802 {
fd6481cf 8803 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
773c5ba7 8804 if (step == 1 || step == -1)
8805 cond_code = EQ_EXPR;
8806 }
8807
fd6481cf 8808 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
773c5ba7 8809
75a70cf9 8810 clauses = gimple_omp_for_clauses (fd->for_stmt);
1e4afe3c 8811 stmts = NULL;
8812 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
75a70cf9 8813 if (!gimple_seq_empty_p (stmts))
1e4afe3c 8814 {
75a70cf9 8815 gimple_seq_add_seq (&stmts, *dlist);
fd6481cf 8816 *dlist = stmts;
1e4afe3c 8817
8818 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
fd6481cf 8819 vinit = fd->loop.n1;
1e4afe3c 8820 if (cond_code == EQ_EXPR
fd6481cf 8821 && host_integerp (fd->loop.n2, 0)
8822 && ! integer_zerop (fd->loop.n2))
8823 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
3d483a94 8824 else
8825 vinit = unshare_expr (vinit);
1e4afe3c 8826
8827 /* Initialize the iterator variable, so that threads that don't execute
8828 any iterations don't execute the lastprivate clauses by accident. */
75a70cf9 8829 gimplify_assign (fd->loop.v, vinit, body_p);
1e4afe3c 8830 }
773c5ba7 8831}
8832
8833
8834/* Lower code for an OpenMP loop directive. */
8835
8836static void
75a70cf9 8837lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 8838{
75a70cf9 8839 tree *rhs_p, block;
bc7bff74 8840 struct omp_for_data fd, *fdp = NULL;
75a70cf9 8841 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
f018d957 8842 gimple_seq omp_for_body, body, dlist;
75a70cf9 8843 size_t i;
dac18d1a 8844 struct gimplify_ctx gctx;
773c5ba7 8845
dac18d1a 8846 push_gimplify_context (&gctx);
773c5ba7 8847
e3a19533 8848 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
773c5ba7 8849
1d22f541 8850 block = make_node (BLOCK);
75a70cf9 8851 new_stmt = gimple_build_bind (NULL, NULL, block);
e3a19533 8852 /* Replace at gsi right away, so that 'stmt' is no member
8853 of a sequence anymore as we're going to add to to a different
8854 one below. */
8855 gsi_replace (gsi_p, new_stmt, true);
1d22f541 8856
773c5ba7 8857 /* Move declaration of temporaries in the loop body before we make
8858 it go away. */
75a70cf9 8859 omp_for_body = gimple_omp_body (stmt);
8860 if (!gimple_seq_empty_p (omp_for_body)
8861 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
8862 {
8863 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
8864 gimple_bind_append_vars (new_stmt, vars);
8865 }
773c5ba7 8866
bc7bff74 8867 if (gimple_omp_for_combined_into_p (stmt))
8868 {
8869 extract_omp_for_data (stmt, &fd, NULL);
8870 fdp = &fd;
8871
8872 /* We need two temporaries with fd.loop.v type (istart/iend)
8873 and then (fd.collapse - 1) temporaries with the same
8874 type for count2 ... countN-1 vars if not constant. */
8875 size_t count = 2;
8876 tree type = fd.iter_type;
8877 if (fd.collapse > 1
8878 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
8879 count += fd.collapse - 1;
8880 bool parallel_for = gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR;
8881 tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
8882 tree clauses = *pc;
8883 if (parallel_for)
8884 outerc
8885 = find_omp_clause (gimple_omp_parallel_clauses (ctx->outer->stmt),
8886 OMP_CLAUSE__LOOPTEMP_);
8887 for (i = 0; i < count; i++)
8888 {
8889 tree temp;
8890 if (parallel_for)
8891 {
8892 gcc_assert (outerc);
8893 temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
8894 outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
8895 OMP_CLAUSE__LOOPTEMP_);
8896 }
8897 else
8898 temp = create_tmp_var (type, NULL);
8899 *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
8900 OMP_CLAUSE_DECL (*pc) = temp;
8901 pc = &OMP_CLAUSE_CHAIN (*pc);
8902 }
8903 *pc = clauses;
8904 }
8905
75a70cf9 8906 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
773c5ba7 8907 dlist = NULL;
75a70cf9 8908 body = NULL;
bc7bff74 8909 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
8910 fdp);
75a70cf9 8911 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
773c5ba7 8912
3d483a94 8913 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8914
773c5ba7 8915 /* Lower the header expressions. At this point, we can assume that
8916 the header is of the form:
8917
8918 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
8919
8920 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
8921 using the .omp_data_s mapping, if needed. */
75a70cf9 8922 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
fd6481cf 8923 {
75a70cf9 8924 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
fd6481cf 8925 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 8926 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 8927
75a70cf9 8928 rhs_p = gimple_omp_for_final_ptr (stmt, i);
fd6481cf 8929 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 8930 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 8931
75a70cf9 8932 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
fd6481cf 8933 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 8934 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 8935 }
773c5ba7 8936
8937 /* Once lowered, extract the bounds and clauses. */
fd6481cf 8938 extract_omp_for_data (stmt, &fd, NULL);
773c5ba7 8939
75a70cf9 8940 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
773c5ba7 8941
75a70cf9 8942 gimple_seq_add_stmt (&body, stmt);
8943 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
61e47ac8 8944
75a70cf9 8945 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
8946 fd.loop.v));
61e47ac8 8947
773c5ba7 8948 /* After the loop, add exit clauses. */
75a70cf9 8949 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
bc7bff74 8950
8951 if (ctx->cancellable)
8952 gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
8953
75a70cf9 8954 gimple_seq_add_seq (&body, dlist);
773c5ba7 8955
75a70cf9 8956 body = maybe_catch_exception (body);
aade31a0 8957
61e47ac8 8958 /* Region exit marker goes at the end of the loop body. */
75a70cf9 8959 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
bc7bff74 8960 maybe_add_implicit_barrier_cancel (ctx, &body);
1d22f541 8961 pop_gimplify_context (new_stmt);
75a70cf9 8962
8963 gimple_bind_append_vars (new_stmt, ctx->block_vars);
8964 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
1d22f541 8965 if (BLOCK_VARS (block))
8966 TREE_USED (block) = 1;
773c5ba7 8967
75a70cf9 8968 gimple_bind_set_body (new_stmt, body);
8969 gimple_omp_set_body (stmt, NULL);
8970 gimple_omp_for_set_pre_body (stmt, NULL);
1e8e9920 8971}
8972
48e1416a 8973/* Callback for walk_stmts. Check if the current statement only contains
75a70cf9 8974 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
de7ef844 8975
8976static tree
75a70cf9 8977check_combined_parallel (gimple_stmt_iterator *gsi_p,
8978 bool *handled_ops_p,
8979 struct walk_stmt_info *wi)
de7ef844 8980{
4077bf7a 8981 int *info = (int *) wi->info;
75a70cf9 8982 gimple stmt = gsi_stmt (*gsi_p);
de7ef844 8983
75a70cf9 8984 *handled_ops_p = true;
8985 switch (gimple_code (stmt))
de7ef844 8986 {
75a70cf9 8987 WALK_SUBSTMTS;
8988
8989 case GIMPLE_OMP_FOR:
8990 case GIMPLE_OMP_SECTIONS:
de7ef844 8991 *info = *info == 0 ? 1 : -1;
8992 break;
8993 default:
8994 *info = -1;
8995 break;
8996 }
8997 return NULL;
8998}
773c5ba7 8999
fd6481cf 9000struct omp_taskcopy_context
9001{
9002 /* This field must be at the beginning, as we do "inheritance": Some
9003 callback functions for tree-inline.c (e.g., omp_copy_decl)
9004 receive a copy_body_data pointer that is up-casted to an
9005 omp_context pointer. */
9006 copy_body_data cb;
9007 omp_context *ctx;
9008};
9009
9010static tree
9011task_copyfn_copy_decl (tree var, copy_body_data *cb)
9012{
9013 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
9014
9015 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
9016 return create_tmp_var (TREE_TYPE (var), NULL);
9017
9018 return var;
9019}
9020
9021static tree
9022task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
9023{
9024 tree name, new_fields = NULL, type, f;
9025
9026 type = lang_hooks.types.make_type (RECORD_TYPE);
9027 name = DECL_NAME (TYPE_NAME (orig_type));
e60a6f7b 9028 name = build_decl (gimple_location (tcctx->ctx->stmt),
9029 TYPE_DECL, name, type);
fd6481cf 9030 TYPE_NAME (type) = name;
9031
9032 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
9033 {
9034 tree new_f = copy_node (f);
9035 DECL_CONTEXT (new_f) = type;
9036 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
9037 TREE_CHAIN (new_f) = new_fields;
75a70cf9 9038 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9039 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9040 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
9041 &tcctx->cb, NULL);
fd6481cf 9042 new_fields = new_f;
9043 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
9044 }
9045 TYPE_FIELDS (type) = nreverse (new_fields);
9046 layout_type (type);
9047 return type;
9048}
9049
9050/* Create task copyfn. */
9051
9052static void
75a70cf9 9053create_task_copyfn (gimple task_stmt, omp_context *ctx)
fd6481cf 9054{
9055 struct function *child_cfun;
9056 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
9057 tree record_type, srecord_type, bind, list;
9058 bool record_needs_remap = false, srecord_needs_remap = false;
9059 splay_tree_node n;
9060 struct omp_taskcopy_context tcctx;
dac18d1a 9061 struct gimplify_ctx gctx;
389dd41b 9062 location_t loc = gimple_location (task_stmt);
fd6481cf 9063
75a70cf9 9064 child_fn = gimple_omp_task_copy_fn (task_stmt);
fd6481cf 9065 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
9066 gcc_assert (child_cfun->cfg == NULL);
fd6481cf 9067 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
9068
9069 /* Reset DECL_CONTEXT on function arguments. */
1767a056 9070 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
fd6481cf 9071 DECL_CONTEXT (t) = child_fn;
9072
9073 /* Populate the function. */
dac18d1a 9074 push_gimplify_context (&gctx);
9078126c 9075 push_cfun (child_cfun);
fd6481cf 9076
9077 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
9078 TREE_SIDE_EFFECTS (bind) = 1;
9079 list = NULL;
9080 DECL_SAVED_TREE (child_fn) = bind;
75a70cf9 9081 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
fd6481cf 9082
9083 /* Remap src and dst argument types if needed. */
9084 record_type = ctx->record_type;
9085 srecord_type = ctx->srecord_type;
1767a056 9086 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
fd6481cf 9087 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9088 {
9089 record_needs_remap = true;
9090 break;
9091 }
1767a056 9092 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
fd6481cf 9093 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9094 {
9095 srecord_needs_remap = true;
9096 break;
9097 }
9098
9099 if (record_needs_remap || srecord_needs_remap)
9100 {
9101 memset (&tcctx, '\0', sizeof (tcctx));
9102 tcctx.cb.src_fn = ctx->cb.src_fn;
9103 tcctx.cb.dst_fn = child_fn;
53f79206 9104 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
9105 gcc_checking_assert (tcctx.cb.src_node);
fd6481cf 9106 tcctx.cb.dst_node = tcctx.cb.src_node;
9107 tcctx.cb.src_cfun = ctx->cb.src_cfun;
9108 tcctx.cb.copy_decl = task_copyfn_copy_decl;
e38def9c 9109 tcctx.cb.eh_lp_nr = 0;
fd6481cf 9110 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
9111 tcctx.cb.decl_map = pointer_map_create ();
9112 tcctx.ctx = ctx;
9113
9114 if (record_needs_remap)
9115 record_type = task_copyfn_remap_type (&tcctx, record_type);
9116 if (srecord_needs_remap)
9117 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
9118 }
9119 else
9120 tcctx.cb.decl_map = NULL;
9121
fd6481cf 9122 arg = DECL_ARGUMENTS (child_fn);
9123 TREE_TYPE (arg) = build_pointer_type (record_type);
1767a056 9124 sarg = DECL_CHAIN (arg);
fd6481cf 9125 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
9126
9127 /* First pass: initialize temporaries used in record_type and srecord_type
9128 sizes and field offsets. */
9129 if (tcctx.cb.decl_map)
75a70cf9 9130 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 9131 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9132 {
9133 tree *p;
9134
9135 decl = OMP_CLAUSE_DECL (c);
9136 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
9137 if (p == NULL)
9138 continue;
9139 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9140 sf = (tree) n->value;
9141 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 9142 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 9143 src = omp_build_component_ref (src, sf);
75a70cf9 9144 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
fd6481cf 9145 append_to_statement_list (t, &list);
9146 }
9147
9148 /* Second pass: copy shared var pointers and copy construct non-VLA
9149 firstprivate vars. */
75a70cf9 9150 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 9151 switch (OMP_CLAUSE_CODE (c))
9152 {
9153 case OMP_CLAUSE_SHARED:
9154 decl = OMP_CLAUSE_DECL (c);
9155 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9156 if (n == NULL)
9157 break;
9158 f = (tree) n->value;
9159 if (tcctx.cb.decl_map)
9160 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
9161 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9162 sf = (tree) n->value;
9163 if (tcctx.cb.decl_map)
9164 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 9165 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 9166 src = omp_build_component_ref (src, sf);
182cf5a9 9167 dst = build_simple_mem_ref_loc (loc, arg);
445d06b6 9168 dst = omp_build_component_ref (dst, f);
75a70cf9 9169 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
fd6481cf 9170 append_to_statement_list (t, &list);
9171 break;
9172 case OMP_CLAUSE_FIRSTPRIVATE:
9173 decl = OMP_CLAUSE_DECL (c);
9174 if (is_variable_sized (decl))
9175 break;
9176 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9177 if (n == NULL)
9178 break;
9179 f = (tree) n->value;
9180 if (tcctx.cb.decl_map)
9181 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
9182 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9183 if (n != NULL)
9184 {
9185 sf = (tree) n->value;
9186 if (tcctx.cb.decl_map)
9187 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 9188 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 9189 src = omp_build_component_ref (src, sf);
fd6481cf 9190 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
182cf5a9 9191 src = build_simple_mem_ref_loc (loc, src);
fd6481cf 9192 }
9193 else
9194 src = decl;
182cf5a9 9195 dst = build_simple_mem_ref_loc (loc, arg);
445d06b6 9196 dst = omp_build_component_ref (dst, f);
fd6481cf 9197 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9198 append_to_statement_list (t, &list);
9199 break;
9200 case OMP_CLAUSE_PRIVATE:
9201 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
9202 break;
9203 decl = OMP_CLAUSE_DECL (c);
9204 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9205 f = (tree) n->value;
9206 if (tcctx.cb.decl_map)
9207 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
9208 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9209 if (n != NULL)
9210 {
9211 sf = (tree) n->value;
9212 if (tcctx.cb.decl_map)
9213 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 9214 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 9215 src = omp_build_component_ref (src, sf);
fd6481cf 9216 if (use_pointer_for_field (decl, NULL))
182cf5a9 9217 src = build_simple_mem_ref_loc (loc, src);
fd6481cf 9218 }
9219 else
9220 src = decl;
182cf5a9 9221 dst = build_simple_mem_ref_loc (loc, arg);
445d06b6 9222 dst = omp_build_component_ref (dst, f);
75a70cf9 9223 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
fd6481cf 9224 append_to_statement_list (t, &list);
9225 break;
9226 default:
9227 break;
9228 }
9229
9230 /* Last pass: handle VLA firstprivates. */
9231 if (tcctx.cb.decl_map)
75a70cf9 9232 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 9233 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9234 {
9235 tree ind, ptr, df;
9236
9237 decl = OMP_CLAUSE_DECL (c);
9238 if (!is_variable_sized (decl))
9239 continue;
9240 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9241 if (n == NULL)
9242 continue;
9243 f = (tree) n->value;
9244 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
9245 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
9246 ind = DECL_VALUE_EXPR (decl);
9247 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
9248 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
9249 n = splay_tree_lookup (ctx->sfield_map,
9250 (splay_tree_key) TREE_OPERAND (ind, 0));
9251 sf = (tree) n->value;
9252 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 9253 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 9254 src = omp_build_component_ref (src, sf);
182cf5a9 9255 src = build_simple_mem_ref_loc (loc, src);
9256 dst = build_simple_mem_ref_loc (loc, arg);
445d06b6 9257 dst = omp_build_component_ref (dst, f);
fd6481cf 9258 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9259 append_to_statement_list (t, &list);
9260 n = splay_tree_lookup (ctx->field_map,
9261 (splay_tree_key) TREE_OPERAND (ind, 0));
9262 df = (tree) n->value;
9263 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
182cf5a9 9264 ptr = build_simple_mem_ref_loc (loc, arg);
445d06b6 9265 ptr = omp_build_component_ref (ptr, df);
75a70cf9 9266 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
389dd41b 9267 build_fold_addr_expr_loc (loc, dst));
fd6481cf 9268 append_to_statement_list (t, &list);
9269 }
9270
9271 t = build1 (RETURN_EXPR, void_type_node, NULL);
9272 append_to_statement_list (t, &list);
9273
9274 if (tcctx.cb.decl_map)
9275 pointer_map_destroy (tcctx.cb.decl_map);
9276 pop_gimplify_context (NULL);
9277 BIND_EXPR_BODY (bind) = list;
9278 pop_cfun ();
fd6481cf 9279}
9280
bc7bff74 9281static void
9282lower_depend_clauses (gimple stmt, gimple_seq *iseq, gimple_seq *oseq)
9283{
9284 tree c, clauses;
9285 gimple g;
9286 size_t n_in = 0, n_out = 0, idx = 2, i;
9287
9288 clauses = find_omp_clause (gimple_omp_task_clauses (stmt),
9289 OMP_CLAUSE_DEPEND);
9290 gcc_assert (clauses);
9291 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9292 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
9293 switch (OMP_CLAUSE_DEPEND_KIND (c))
9294 {
9295 case OMP_CLAUSE_DEPEND_IN:
9296 n_in++;
9297 break;
9298 case OMP_CLAUSE_DEPEND_OUT:
9299 case OMP_CLAUSE_DEPEND_INOUT:
9300 n_out++;
9301 break;
9302 default:
9303 gcc_unreachable ();
9304 }
9305 tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
9306 tree array = create_tmp_var (type, NULL);
9307 tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
9308 NULL_TREE);
9309 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
9310 gimple_seq_add_stmt (iseq, g);
9311 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
9312 NULL_TREE);
9313 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
9314 gimple_seq_add_stmt (iseq, g);
9315 for (i = 0; i < 2; i++)
9316 {
9317 if ((i ? n_in : n_out) == 0)
9318 continue;
9319 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9320 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
9321 && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
9322 {
9323 tree t = OMP_CLAUSE_DECL (c);
9324 t = fold_convert (ptr_type_node, t);
9325 gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
9326 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
9327 NULL_TREE, NULL_TREE);
9328 g = gimple_build_assign (r, t);
9329 gimple_seq_add_stmt (iseq, g);
9330 }
9331 }
9332 tree *p = gimple_omp_task_clauses_ptr (stmt);
9333 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
9334 OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
9335 OMP_CLAUSE_CHAIN (c) = *p;
9336 *p = c;
9337 tree clobber = build_constructor (type, NULL);
9338 TREE_THIS_VOLATILE (clobber) = 1;
9339 g = gimple_build_assign (array, clobber);
9340 gimple_seq_add_stmt (oseq, g);
9341}
9342
75a70cf9 9343/* Lower the OpenMP parallel or task directive in the current statement
9344 in GSI_P. CTX holds context information for the directive. */
773c5ba7 9345
9346static void
75a70cf9 9347lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 9348{
75a70cf9 9349 tree clauses;
9350 tree child_fn, t;
9351 gimple stmt = gsi_stmt (*gsi_p);
bc7bff74 9352 gimple par_bind, bind, dep_bind = NULL;
9353 gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
9354 struct gimplify_ctx gctx, dep_gctx;
389dd41b 9355 location_t loc = gimple_location (stmt);
773c5ba7 9356
75a70cf9 9357 clauses = gimple_omp_taskreg_clauses (stmt);
9358 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
9359 par_body = gimple_bind_body (par_bind);
773c5ba7 9360 child_fn = ctx->cb.dst_fn;
75a70cf9 9361 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
9362 && !gimple_omp_parallel_combined_p (stmt))
de7ef844 9363 {
9364 struct walk_stmt_info wi;
9365 int ws_num = 0;
9366
9367 memset (&wi, 0, sizeof (wi));
de7ef844 9368 wi.info = &ws_num;
9369 wi.val_only = true;
75a70cf9 9370 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
de7ef844 9371 if (ws_num == 1)
75a70cf9 9372 gimple_omp_parallel_set_combined_p (stmt, true);
de7ef844 9373 }
bc7bff74 9374 gimple_seq dep_ilist = NULL;
9375 gimple_seq dep_olist = NULL;
9376 if (gimple_code (stmt) == GIMPLE_OMP_TASK
9377 && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
9378 {
9379 push_gimplify_context (&dep_gctx);
9380 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
9381 lower_depend_clauses (stmt, &dep_ilist, &dep_olist);
9382 }
9383
fd6481cf 9384 if (ctx->srecord_type)
9385 create_task_copyfn (stmt, ctx);
773c5ba7 9386
dac18d1a 9387 push_gimplify_context (&gctx);
773c5ba7 9388
75a70cf9 9389 par_olist = NULL;
9390 par_ilist = NULL;
bc7bff74 9391 par_rlist = NULL;
9392 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
e3a19533 9393 lower_omp (&par_body, ctx);
75a70cf9 9394 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
bc7bff74 9395 lower_reduction_clauses (clauses, &par_rlist, ctx);
773c5ba7 9396
9397 /* Declare all the variables created by mapping and the variables
9398 declared in the scope of the parallel body. */
9399 record_vars_into (ctx->block_vars, child_fn);
75a70cf9 9400 record_vars_into (gimple_bind_vars (par_bind), child_fn);
773c5ba7 9401
9402 if (ctx->record_type)
9403 {
fd6481cf 9404 ctx->sender_decl
9405 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
9406 : ctx->record_type, ".omp_data_o");
84bfaaeb 9407 DECL_NAMELESS (ctx->sender_decl) = 1;
86f2ad37 9408 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
75a70cf9 9409 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
773c5ba7 9410 }
9411
75a70cf9 9412 olist = NULL;
9413 ilist = NULL;
773c5ba7 9414 lower_send_clauses (clauses, &ilist, &olist, ctx);
9415 lower_send_shared_vars (&ilist, &olist, ctx);
9416
bc7bff74 9417 if (ctx->record_type)
9418 {
9419 tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
9420 TREE_THIS_VOLATILE (clobber) = 1;
9421 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
9422 clobber));
9423 }
9424
773c5ba7 9425 /* Once all the expansions are done, sequence all the different
75a70cf9 9426 fragments inside gimple_omp_body. */
773c5ba7 9427
75a70cf9 9428 new_body = NULL;
773c5ba7 9429
9430 if (ctx->record_type)
9431 {
389dd41b 9432 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
cc6b725b 9433 /* fixup_child_record_type might have changed receiver_decl's type. */
389dd41b 9434 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
75a70cf9 9435 gimple_seq_add_stmt (&new_body,
9436 gimple_build_assign (ctx->receiver_decl, t));
773c5ba7 9437 }
9438
75a70cf9 9439 gimple_seq_add_seq (&new_body, par_ilist);
9440 gimple_seq_add_seq (&new_body, par_body);
bc7bff74 9441 gimple_seq_add_seq (&new_body, par_rlist);
9442 if (ctx->cancellable)
9443 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
75a70cf9 9444 gimple_seq_add_seq (&new_body, par_olist);
9445 new_body = maybe_catch_exception (new_body);
9446 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
9447 gimple_omp_set_body (stmt, new_body);
773c5ba7 9448
75a70cf9 9449 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
bc7bff74 9450 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
9451 gimple_bind_add_seq (bind, ilist);
9452 gimple_bind_add_stmt (bind, stmt);
9453 gimple_bind_add_seq (bind, olist);
9454
9455 pop_gimplify_context (NULL);
9456
9457 if (dep_bind)
9458 {
9459 gimple_bind_add_seq (dep_bind, dep_ilist);
9460 gimple_bind_add_stmt (dep_bind, bind);
9461 gimple_bind_add_seq (dep_bind, dep_olist);
9462 pop_gimplify_context (dep_bind);
9463 }
9464}
9465
9466/* Lower the OpenMP target directive in the current statement
9467 in GSI_P. CTX holds context information for the directive. */
9468
9469static void
9470lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9471{
9472 tree clauses;
9473 tree child_fn, t, c;
9474 gimple stmt = gsi_stmt (*gsi_p);
9475 gimple tgt_bind = NULL, bind;
9476 gimple_seq tgt_body = NULL, olist, ilist, new_body;
9477 struct gimplify_ctx gctx;
9478 location_t loc = gimple_location (stmt);
9479 int kind = gimple_omp_target_kind (stmt);
9480 unsigned int map_cnt = 0;
9481
9482 clauses = gimple_omp_target_clauses (stmt);
9483 if (kind == GF_OMP_TARGET_KIND_REGION)
9484 {
9485 tgt_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
9486 tgt_body = gimple_bind_body (tgt_bind);
9487 }
9488 else if (kind == GF_OMP_TARGET_KIND_DATA)
9489 tgt_body = gimple_omp_body (stmt);
9490 child_fn = ctx->cb.dst_fn;
9491
9492 push_gimplify_context (&gctx);
9493
9494 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
9495 switch (OMP_CLAUSE_CODE (c))
9496 {
9497 tree var, x;
9498
9499 default:
9500 break;
9501 case OMP_CLAUSE_MAP:
9502 case OMP_CLAUSE_TO:
9503 case OMP_CLAUSE_FROM:
9504 var = OMP_CLAUSE_DECL (c);
9505 if (!DECL_P (var))
9506 {
9507 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
9508 || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
9509 map_cnt++;
9510 continue;
9511 }
9512
9513 if (DECL_SIZE (var)
9514 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
9515 {
9516 tree var2 = DECL_VALUE_EXPR (var);
9517 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
9518 var2 = TREE_OPERAND (var2, 0);
9519 gcc_assert (DECL_P (var2));
9520 var = var2;
9521 }
9522
9523 if (!maybe_lookup_field (var, ctx))
9524 continue;
9525
9526 if (kind == GF_OMP_TARGET_KIND_REGION)
9527 {
9528 x = build_receiver_ref (var, true, ctx);
9529 tree new_var = lookup_decl (var, ctx);
9530 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
9531 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
9532 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
9533 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
9534 x = build_simple_mem_ref (x);
9535 SET_DECL_VALUE_EXPR (new_var, x);
9536 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
9537 }
9538 map_cnt++;
9539 }
9540
9541 if (kind == GF_OMP_TARGET_KIND_REGION)
9542 {
9543 target_nesting_level++;
9544 lower_omp (&tgt_body, ctx);
9545 target_nesting_level--;
9546 }
9547 else if (kind == GF_OMP_TARGET_KIND_DATA)
9548 lower_omp (&tgt_body, ctx);
9549
9550 if (kind == GF_OMP_TARGET_KIND_REGION)
9551 {
9552 /* Declare all the variables created by mapping and the variables
9553 declared in the scope of the target body. */
9554 record_vars_into (ctx->block_vars, child_fn);
9555 record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
9556 }
9557
9558 olist = NULL;
9559 ilist = NULL;
9560 if (ctx->record_type)
9561 {
9562 ctx->sender_decl
9563 = create_tmp_var (ctx->record_type, ".omp_data_arr");
9564 DECL_NAMELESS (ctx->sender_decl) = 1;
9565 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
9566 t = make_tree_vec (3);
9567 TREE_VEC_ELT (t, 0) = ctx->sender_decl;
9568 TREE_VEC_ELT (t, 1)
9569 = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
9570 ".omp_data_sizes");
9571 DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
9572 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
9573 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
9574 TREE_VEC_ELT (t, 2)
9575 = create_tmp_var (build_array_type_nelts (unsigned_char_type_node,
9576 map_cnt),
9577 ".omp_data_kinds");
9578 DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
9579 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
9580 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
9581 gimple_omp_target_set_data_arg (stmt, t);
9582
9583 vec<constructor_elt, va_gc> *vsize;
9584 vec<constructor_elt, va_gc> *vkind;
9585 vec_alloc (vsize, map_cnt);
9586 vec_alloc (vkind, map_cnt);
9587 unsigned int map_idx = 0;
9588
9589 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
9590 switch (OMP_CLAUSE_CODE (c))
9591 {
9592 tree ovar, nc;
9593
9594 default:
9595 break;
9596 case OMP_CLAUSE_MAP:
9597 case OMP_CLAUSE_TO:
9598 case OMP_CLAUSE_FROM:
9599 nc = c;
9600 ovar = OMP_CLAUSE_DECL (c);
9601 if (!DECL_P (ovar))
9602 {
9603 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
9604 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
9605 {
9606 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
9607 == get_base_address (ovar));
9608 nc = OMP_CLAUSE_CHAIN (c);
9609 ovar = OMP_CLAUSE_DECL (nc);
9610 }
9611 else
9612 {
9613 tree x = build_sender_ref (ovar, ctx);
9614 tree v
9615 = build_fold_addr_expr_with_type (ovar, ptr_type_node);
9616 gimplify_assign (x, v, &ilist);
9617 nc = NULL_TREE;
9618 }
9619 }
9620 else
9621 {
9622 if (DECL_SIZE (ovar)
9623 && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
9624 {
9625 tree ovar2 = DECL_VALUE_EXPR (ovar);
9626 gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
9627 ovar2 = TREE_OPERAND (ovar2, 0);
9628 gcc_assert (DECL_P (ovar2));
9629 ovar = ovar2;
9630 }
9631 if (!maybe_lookup_field (ovar, ctx))
9632 continue;
9633 }
9634
9635 if (nc)
9636 {
9637 tree var = lookup_decl_in_outer_ctx (ovar, ctx);
9638 tree x = build_sender_ref (ovar, ctx);
9639 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
9640 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
9641 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
9642 && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
9643 {
9644 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
9645 tree avar
9646 = create_tmp_var (TREE_TYPE (TREE_TYPE (x)), NULL);
9647 mark_addressable (avar);
9648 gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
9649 avar = build_fold_addr_expr (avar);
9650 gimplify_assign (x, avar, &ilist);
9651 }
9652 else if (is_gimple_reg (var))
9653 {
9654 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
9655 tree avar = create_tmp_var (TREE_TYPE (var), NULL);
9656 mark_addressable (avar);
9657 if (OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_ALLOC
9658 && OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_FROM)
9659 gimplify_assign (avar, var, &ilist);
9660 avar = build_fold_addr_expr (avar);
9661 gimplify_assign (x, avar, &ilist);
9662 if ((OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_FROM
9663 || OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_TOFROM)
9664 && !TYPE_READONLY (TREE_TYPE (var)))
9665 {
9666 x = build_sender_ref (ovar, ctx);
9667 x = build_simple_mem_ref (x);
9668 gimplify_assign (var, x, &olist);
9669 }
9670 }
9671 else
9672 {
9673 var = build_fold_addr_expr (var);
9674 gimplify_assign (x, var, &ilist);
9675 }
9676 }
9677 tree s = OMP_CLAUSE_SIZE (c);
9678 if (s == NULL_TREE)
9679 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
9680 s = fold_convert (size_type_node, s);
9681 tree purpose = size_int (map_idx++);
9682 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
9683 if (TREE_CODE (s) != INTEGER_CST)
9684 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
9685
9686 unsigned char tkind = 0;
9687 switch (OMP_CLAUSE_CODE (c))
9688 {
9689 case OMP_CLAUSE_MAP:
9690 tkind = OMP_CLAUSE_MAP_KIND (c);
9691 break;
9692 case OMP_CLAUSE_TO:
9693 tkind = OMP_CLAUSE_MAP_TO;
9694 break;
9695 case OMP_CLAUSE_FROM:
9696 tkind = OMP_CLAUSE_MAP_FROM;
9697 break;
9698 default:
9699 gcc_unreachable ();
9700 }
9701 unsigned int talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
9702 if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
9703 talign = DECL_ALIGN_UNIT (ovar);
9704 talign = ceil_log2 (talign);
9705 tkind |= talign << 3;
9706 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
9707 build_int_cst (unsigned_char_type_node,
9708 tkind));
9709 if (nc && nc != c)
9710 c = nc;
9711 }
9712
9713 gcc_assert (map_idx == map_cnt);
9714
9715 DECL_INITIAL (TREE_VEC_ELT (t, 1))
9716 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
9717 DECL_INITIAL (TREE_VEC_ELT (t, 2))
9718 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
9719 if (!TREE_STATIC (TREE_VEC_ELT (t, 1)))
9720 {
9721 gimple_seq initlist = NULL;
9722 force_gimple_operand (build1 (DECL_EXPR, void_type_node,
9723 TREE_VEC_ELT (t, 1)),
9724 &initlist, true, NULL_TREE);
9725 gimple_seq_add_seq (&ilist, initlist);
9726 }
9727
9728 tree clobber = build_constructor (ctx->record_type, NULL);
9729 TREE_THIS_VOLATILE (clobber) = 1;
9730 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
9731 clobber));
9732 }
9733
9734 /* Once all the expansions are done, sequence all the different
9735 fragments inside gimple_omp_body. */
9736
9737 new_body = NULL;
9738
9739 if (ctx->record_type && kind == GF_OMP_TARGET_KIND_REGION)
9740 {
9741 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
9742 /* fixup_child_record_type might have changed receiver_decl's type. */
9743 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
9744 gimple_seq_add_stmt (&new_body,
9745 gimple_build_assign (ctx->receiver_decl, t));
9746 }
9747
9748 if (kind == GF_OMP_TARGET_KIND_REGION)
9749 {
9750 gimple_seq_add_seq (&new_body, tgt_body);
9751 new_body = maybe_catch_exception (new_body);
9752 }
9753 else if (kind == GF_OMP_TARGET_KIND_DATA)
9754 new_body = tgt_body;
9755 if (kind != GF_OMP_TARGET_KIND_UPDATE)
9756 {
9757 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
9758 gimple_omp_set_body (stmt, new_body);
9759 }
9760
9761 bind = gimple_build_bind (NULL, NULL,
9762 tgt_bind ? gimple_bind_block (tgt_bind)
9763 : NULL_TREE);
75a70cf9 9764 gsi_replace (gsi_p, bind, true);
e3a19533 9765 gimple_bind_add_seq (bind, ilist);
9766 gimple_bind_add_stmt (bind, stmt);
9767 gimple_bind_add_seq (bind, olist);
773c5ba7 9768
75a70cf9 9769 pop_gimplify_context (NULL);
773c5ba7 9770}
9771
bc7bff74 9772/* Expand code for an OpenMP teams directive. */
9773
9774static void
9775lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9776{
9777 gimple teams_stmt = gsi_stmt (*gsi_p);
9778 struct gimplify_ctx gctx;
9779 push_gimplify_context (&gctx);
9780
9781 tree block = make_node (BLOCK);
9782 gimple bind = gimple_build_bind (NULL, NULL, block);
9783 gsi_replace (gsi_p, bind, true);
9784 gimple_seq bind_body = NULL;
9785 gimple_seq dlist = NULL;
9786 gimple_seq olist = NULL;
9787
9788 tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
9789 OMP_CLAUSE_NUM_TEAMS);
9790 if (num_teams == NULL_TREE)
9791 num_teams = build_int_cst (unsigned_type_node, 0);
9792 else
9793 {
9794 num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
9795 num_teams = fold_convert (unsigned_type_node, num_teams);
9796 gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
9797 }
9798 tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
9799 OMP_CLAUSE_THREAD_LIMIT);
9800 if (thread_limit == NULL_TREE)
9801 thread_limit = build_int_cst (unsigned_type_node, 0);
9802 else
9803 {
9804 thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
9805 thread_limit = fold_convert (unsigned_type_node, thread_limit);
9806 gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
9807 fb_rvalue);
9808 }
9809
9810 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
9811 &bind_body, &dlist, ctx, NULL);
9812 lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
9813 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
9814 gimple_seq_add_stmt (&bind_body, teams_stmt);
9815
9816 location_t loc = gimple_location (teams_stmt);
9817 tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
9818 gimple call = gimple_build_call (decl, 2, num_teams, thread_limit);
9819 gimple_set_location (call, loc);
9820 gimple_seq_add_stmt (&bind_body, call);
9821
9822 gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
9823 gimple_omp_set_body (teams_stmt, NULL);
9824 gimple_seq_add_seq (&bind_body, olist);
9825 gimple_seq_add_seq (&bind_body, dlist);
9826 gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
9827 gimple_bind_set_body (bind, bind_body);
9828
9829 pop_gimplify_context (bind);
9830
9831 gimple_bind_append_vars (bind, ctx->block_vars);
9832 BLOCK_VARS (block) = ctx->block_vars;
9833 if (BLOCK_VARS (block))
9834 TREE_USED (block) = 1;
9835}
9836
9837
a4890dc9 9838/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
75a70cf9 9839 regimplified. If DATA is non-NULL, lower_omp_1 is outside
9840 of OpenMP context, but with task_shared_vars set. */
46515c92 9841
9842static tree
75a70cf9 9843lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
9844 void *data)
46515c92 9845{
a4890dc9 9846 tree t = *tp;
46515c92 9847
a4890dc9 9848 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
75a70cf9 9849 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
9f49e155 9850 return t;
9851
9852 if (task_shared_vars
9853 && DECL_P (t)
9854 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
a4890dc9 9855 return t;
46515c92 9856
a4890dc9 9857 /* If a global variable has been privatized, TREE_CONSTANT on
9858 ADDR_EXPR might be wrong. */
75a70cf9 9859 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
a4890dc9 9860 recompute_tree_invariant_for_addr_expr (t);
46515c92 9861
a4890dc9 9862 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
9863 return NULL_TREE;
46515c92 9864}
773c5ba7 9865
a4890dc9 9866static void
75a70cf9 9867lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 9868{
75a70cf9 9869 gimple stmt = gsi_stmt (*gsi_p);
9870 struct walk_stmt_info wi;
1e8e9920 9871
75a70cf9 9872 if (gimple_has_location (stmt))
9873 input_location = gimple_location (stmt);
a4890dc9 9874
75a70cf9 9875 if (task_shared_vars)
9876 memset (&wi, '\0', sizeof (wi));
a4890dc9 9877
773c5ba7 9878 /* If we have issued syntax errors, avoid doing any heavy lifting.
9879 Just replace the OpenMP directives with a NOP to avoid
9880 confusing RTL expansion. */
852f689e 9881 if (seen_error () && is_gimple_omp (stmt))
773c5ba7 9882 {
75a70cf9 9883 gsi_replace (gsi_p, gimple_build_nop (), true);
a4890dc9 9884 return;
773c5ba7 9885 }
9886
75a70cf9 9887 switch (gimple_code (stmt))
1e8e9920 9888 {
75a70cf9 9889 case GIMPLE_COND:
fd6481cf 9890 if ((ctx || task_shared_vars)
75a70cf9 9891 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
9892 ctx ? NULL : &wi, NULL)
9893 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
9894 ctx ? NULL : &wi, NULL)))
9895 gimple_regimplify_operands (stmt, gsi_p);
a4890dc9 9896 break;
75a70cf9 9897 case GIMPLE_CATCH:
e3a19533 9898 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
a4890dc9 9899 break;
75a70cf9 9900 case GIMPLE_EH_FILTER:
e3a19533 9901 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
a4890dc9 9902 break;
75a70cf9 9903 case GIMPLE_TRY:
e3a19533 9904 lower_omp (gimple_try_eval_ptr (stmt), ctx);
9905 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
a4890dc9 9906 break;
35215227 9907 case GIMPLE_TRANSACTION:
9908 lower_omp (gimple_transaction_body_ptr (stmt), ctx);
9909 break;
75a70cf9 9910 case GIMPLE_BIND:
e3a19533 9911 lower_omp (gimple_bind_body_ptr (stmt), ctx);
a4890dc9 9912 break;
75a70cf9 9913 case GIMPLE_OMP_PARALLEL:
9914 case GIMPLE_OMP_TASK:
9915 ctx = maybe_lookup_ctx (stmt);
bc7bff74 9916 gcc_assert (ctx);
9917 if (ctx->cancellable)
9918 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
75a70cf9 9919 lower_omp_taskreg (gsi_p, ctx);
a4890dc9 9920 break;
75a70cf9 9921 case GIMPLE_OMP_FOR:
9922 ctx = maybe_lookup_ctx (stmt);
1e8e9920 9923 gcc_assert (ctx);
bc7bff74 9924 if (ctx->cancellable)
9925 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
75a70cf9 9926 lower_omp_for (gsi_p, ctx);
1e8e9920 9927 break;
75a70cf9 9928 case GIMPLE_OMP_SECTIONS:
9929 ctx = maybe_lookup_ctx (stmt);
1e8e9920 9930 gcc_assert (ctx);
bc7bff74 9931 if (ctx->cancellable)
9932 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
75a70cf9 9933 lower_omp_sections (gsi_p, ctx);
1e8e9920 9934 break;
75a70cf9 9935 case GIMPLE_OMP_SINGLE:
9936 ctx = maybe_lookup_ctx (stmt);
1e8e9920 9937 gcc_assert (ctx);
75a70cf9 9938 lower_omp_single (gsi_p, ctx);
1e8e9920 9939 break;
75a70cf9 9940 case GIMPLE_OMP_MASTER:
9941 ctx = maybe_lookup_ctx (stmt);
1e8e9920 9942 gcc_assert (ctx);
75a70cf9 9943 lower_omp_master (gsi_p, ctx);
1e8e9920 9944 break;
bc7bff74 9945 case GIMPLE_OMP_TASKGROUP:
9946 ctx = maybe_lookup_ctx (stmt);
9947 gcc_assert (ctx);
9948 lower_omp_taskgroup (gsi_p, ctx);
9949 break;
75a70cf9 9950 case GIMPLE_OMP_ORDERED:
9951 ctx = maybe_lookup_ctx (stmt);
1e8e9920 9952 gcc_assert (ctx);
75a70cf9 9953 lower_omp_ordered (gsi_p, ctx);
1e8e9920 9954 break;
75a70cf9 9955 case GIMPLE_OMP_CRITICAL:
9956 ctx = maybe_lookup_ctx (stmt);
1e8e9920 9957 gcc_assert (ctx);
75a70cf9 9958 lower_omp_critical (gsi_p, ctx);
9959 break;
9960 case GIMPLE_OMP_ATOMIC_LOAD:
9961 if ((ctx || task_shared_vars)
9962 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
9963 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
9964 gimple_regimplify_operands (stmt, gsi_p);
1e8e9920 9965 break;
bc7bff74 9966 case GIMPLE_OMP_TARGET:
9967 ctx = maybe_lookup_ctx (stmt);
9968 gcc_assert (ctx);
9969 lower_omp_target (gsi_p, ctx);
9970 break;
9971 case GIMPLE_OMP_TEAMS:
9972 ctx = maybe_lookup_ctx (stmt);
9973 gcc_assert (ctx);
9974 lower_omp_teams (gsi_p, ctx);
9975 break;
9976 case GIMPLE_CALL:
9977 tree fndecl;
9978 fndecl = gimple_call_fndecl (stmt);
9979 if (fndecl
9980 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
9981 switch (DECL_FUNCTION_CODE (fndecl))
9982 {
9983 case BUILT_IN_GOMP_BARRIER:
9984 if (ctx == NULL)
9985 break;
9986 /* FALLTHRU */
9987 case BUILT_IN_GOMP_CANCEL:
9988 case BUILT_IN_GOMP_CANCELLATION_POINT:
9989 omp_context *cctx;
9990 cctx = ctx;
9991 if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
9992 cctx = cctx->outer;
9993 gcc_assert (gimple_call_lhs (stmt) == NULL_TREE);
9994 if (!cctx->cancellable)
9995 {
9996 if (DECL_FUNCTION_CODE (fndecl)
9997 == BUILT_IN_GOMP_CANCELLATION_POINT)
9998 {
9999 stmt = gimple_build_nop ();
10000 gsi_replace (gsi_p, stmt, false);
10001 }
10002 break;
10003 }
10004 tree lhs;
10005 lhs = create_tmp_var (boolean_type_node, NULL);
10006 if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
10007 {
10008 fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
10009 gimple_call_set_fndecl (stmt, fndecl);
10010 gimple_call_set_fntype (stmt, TREE_TYPE (fndecl));
10011 }
10012 gimple_call_set_lhs (stmt, lhs);
10013 tree fallthru_label;
10014 fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
10015 gimple g;
10016 g = gimple_build_label (fallthru_label);
10017 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10018 g = gimple_build_cond (NE_EXPR, lhs, boolean_false_node,
10019 cctx->cancel_label, fallthru_label);
10020 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10021 break;
10022 default:
10023 break;
10024 }
10025 /* FALLTHRU */
a4890dc9 10026 default:
fd6481cf 10027 if ((ctx || task_shared_vars)
75a70cf9 10028 && walk_gimple_op (stmt, lower_omp_regimplify_p,
10029 ctx ? NULL : &wi))
10030 gimple_regimplify_operands (stmt, gsi_p);
1e8e9920 10031 break;
1e8e9920 10032 }
1e8e9920 10033}
10034
10035static void
e3a19533 10036lower_omp (gimple_seq *body, omp_context *ctx)
1e8e9920 10037{
1d22f541 10038 location_t saved_location = input_location;
e3a19533 10039 gimple_stmt_iterator gsi;
10040 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
75a70cf9 10041 lower_omp_1 (&gsi, ctx);
bc7bff74 10042 /* Inside target region we haven't called fold_stmt during gimplification,
10043 because it can break code by adding decl references that weren't in the
10044 source. Call fold_stmt now. */
10045 if (target_nesting_level)
10046 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10047 fold_stmt (&gsi);
1d22f541 10048 input_location = saved_location;
1e8e9920 10049}
10050\f
10051/* Main entry point. */
10052
2a1990e9 10053static unsigned int
1e8e9920 10054execute_lower_omp (void)
10055{
75a70cf9 10056 gimple_seq body;
10057
41709826 10058 /* This pass always runs, to provide PROP_gimple_lomp.
10059 But there is nothing to do unless -fopenmp is given. */
c630ef93 10060 if (flag_openmp == 0 && flag_openmp_simd == 0)
41709826 10061 return 0;
10062
1e8e9920 10063 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
10064 delete_omp_context);
10065
75a70cf9 10066 body = gimple_body (current_function_decl);
ab129075 10067 scan_omp (&body, NULL);
fd6481cf 10068 gcc_assert (taskreg_nesting_level == 0);
1e8e9920 10069
10070 if (all_contexts->root)
fd6481cf 10071 {
dac18d1a 10072 struct gimplify_ctx gctx;
10073
fd6481cf 10074 if (task_shared_vars)
dac18d1a 10075 push_gimplify_context (&gctx);
e3a19533 10076 lower_omp (&body, NULL);
fd6481cf 10077 if (task_shared_vars)
10078 pop_gimplify_context (NULL);
10079 }
1e8e9920 10080
773c5ba7 10081 if (all_contexts)
10082 {
10083 splay_tree_delete (all_contexts);
10084 all_contexts = NULL;
10085 }
fd6481cf 10086 BITMAP_FREE (task_shared_vars);
2a1990e9 10087 return 0;
1e8e9920 10088}
10089
cbe8bda8 10090namespace {
10091
10092const pass_data pass_data_lower_omp =
10093{
10094 GIMPLE_PASS, /* type */
10095 "omplower", /* name */
10096 OPTGROUP_NONE, /* optinfo_flags */
10097 false, /* has_gate */
10098 true, /* has_execute */
10099 TV_NONE, /* tv_id */
10100 PROP_gimple_any, /* properties_required */
10101 PROP_gimple_lomp, /* properties_provided */
10102 0, /* properties_destroyed */
10103 0, /* todo_flags_start */
10104 0, /* todo_flags_finish */
1e8e9920 10105};
cbe8bda8 10106
10107class pass_lower_omp : public gimple_opt_pass
10108{
10109public:
9af5ce0c 10110 pass_lower_omp (gcc::context *ctxt)
10111 : gimple_opt_pass (pass_data_lower_omp, ctxt)
cbe8bda8 10112 {}
10113
10114 /* opt_pass methods: */
10115 unsigned int execute () { return execute_lower_omp (); }
10116
10117}; // class pass_lower_omp
10118
10119} // anon namespace
10120
10121gimple_opt_pass *
10122make_pass_lower_omp (gcc::context *ctxt)
10123{
10124 return new pass_lower_omp (ctxt);
10125}
1e8e9920 10126\f
10127/* The following is a utility to diagnose OpenMP structured block violations.
61e47ac8 10128 It is not part of the "omplower" pass, as that's invoked too late. It
10129 should be invoked by the respective front ends after gimplification. */
1e8e9920 10130
10131static splay_tree all_labels;
10132
10133/* Check for mismatched contexts and generate an error if needed. Return
10134 true if an error is detected. */
10135
10136static bool
75a70cf9 10137diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
10138 gimple branch_ctx, gimple label_ctx)
1e8e9920 10139{
75a70cf9 10140 if (label_ctx == branch_ctx)
1e8e9920 10141 return false;
10142
48e1416a 10143
75a70cf9 10144 /*
10145 Previously we kept track of the label's entire context in diagnose_sb_[12]
10146 so we could traverse it and issue a correct "exit" or "enter" error
10147 message upon a structured block violation.
10148
10149 We built the context by building a list with tree_cons'ing, but there is
10150 no easy counterpart in gimple tuples. It seems like far too much work
10151 for issuing exit/enter error messages. If someone really misses the
10152 distinct error message... patches welcome.
10153 */
48e1416a 10154
75a70cf9 10155#if 0
1e8e9920 10156 /* Try to avoid confusing the user by producing and error message
f0b5f617 10157 with correct "exit" or "enter" verbiage. We prefer "exit"
1e8e9920 10158 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
10159 if (branch_ctx == NULL)
10160 exit_p = false;
10161 else
10162 {
10163 while (label_ctx)
10164 {
10165 if (TREE_VALUE (label_ctx) == branch_ctx)
10166 {
10167 exit_p = false;
10168 break;
10169 }
10170 label_ctx = TREE_CHAIN (label_ctx);
10171 }
10172 }
10173
10174 if (exit_p)
10175 error ("invalid exit from OpenMP structured block");
10176 else
10177 error ("invalid entry to OpenMP structured block");
75a70cf9 10178#endif
1e8e9920 10179
75a70cf9 10180 /* If it's obvious we have an invalid entry, be specific about the error. */
10181 if (branch_ctx == NULL)
10182 error ("invalid entry to OpenMP structured block");
10183 else
10184 /* Otherwise, be vague and lazy, but efficient. */
10185 error ("invalid branch to/from an OpenMP structured block");
10186
10187 gsi_replace (gsi_p, gimple_build_nop (), false);
1e8e9920 10188 return true;
10189}
10190
10191/* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
75a70cf9 10192 where each label is found. */
1e8e9920 10193
10194static tree
75a70cf9 10195diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10196 struct walk_stmt_info *wi)
1e8e9920 10197{
75a70cf9 10198 gimple context = (gimple) wi->info;
10199 gimple inner_context;
10200 gimple stmt = gsi_stmt (*gsi_p);
1e8e9920 10201
75a70cf9 10202 *handled_ops_p = true;
10203
10204 switch (gimple_code (stmt))
1e8e9920 10205 {
75a70cf9 10206 WALK_SUBSTMTS;
48e1416a 10207
75a70cf9 10208 case GIMPLE_OMP_PARALLEL:
10209 case GIMPLE_OMP_TASK:
10210 case GIMPLE_OMP_SECTIONS:
10211 case GIMPLE_OMP_SINGLE:
10212 case GIMPLE_OMP_SECTION:
10213 case GIMPLE_OMP_MASTER:
10214 case GIMPLE_OMP_ORDERED:
10215 case GIMPLE_OMP_CRITICAL:
bc7bff74 10216 case GIMPLE_OMP_TARGET:
10217 case GIMPLE_OMP_TEAMS:
10218 case GIMPLE_OMP_TASKGROUP:
75a70cf9 10219 /* The minimal context here is just the current OMP construct. */
10220 inner_context = stmt;
1e8e9920 10221 wi->info = inner_context;
75a70cf9 10222 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
1e8e9920 10223 wi->info = context;
10224 break;
10225
75a70cf9 10226 case GIMPLE_OMP_FOR:
10227 inner_context = stmt;
1e8e9920 10228 wi->info = inner_context;
75a70cf9 10229 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10230 walk them. */
10231 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
10232 diagnose_sb_1, NULL, wi);
10233 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
1e8e9920 10234 wi->info = context;
10235 break;
10236
75a70cf9 10237 case GIMPLE_LABEL:
10238 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
1e8e9920 10239 (splay_tree_value) context);
10240 break;
10241
10242 default:
10243 break;
10244 }
10245
10246 return NULL_TREE;
10247}
10248
10249/* Pass 2: Check each branch and see if its context differs from that of
10250 the destination label's context. */
10251
10252static tree
75a70cf9 10253diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10254 struct walk_stmt_info *wi)
1e8e9920 10255{
75a70cf9 10256 gimple context = (gimple) wi->info;
1e8e9920 10257 splay_tree_node n;
75a70cf9 10258 gimple stmt = gsi_stmt (*gsi_p);
1e8e9920 10259
75a70cf9 10260 *handled_ops_p = true;
10261
10262 switch (gimple_code (stmt))
1e8e9920 10263 {
75a70cf9 10264 WALK_SUBSTMTS;
10265
10266 case GIMPLE_OMP_PARALLEL:
10267 case GIMPLE_OMP_TASK:
10268 case GIMPLE_OMP_SECTIONS:
10269 case GIMPLE_OMP_SINGLE:
10270 case GIMPLE_OMP_SECTION:
10271 case GIMPLE_OMP_MASTER:
10272 case GIMPLE_OMP_ORDERED:
10273 case GIMPLE_OMP_CRITICAL:
bc7bff74 10274 case GIMPLE_OMP_TARGET:
10275 case GIMPLE_OMP_TEAMS:
10276 case GIMPLE_OMP_TASKGROUP:
75a70cf9 10277 wi->info = stmt;
e3a19533 10278 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
1e8e9920 10279 wi->info = context;
10280 break;
10281
75a70cf9 10282 case GIMPLE_OMP_FOR:
10283 wi->info = stmt;
10284 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10285 walk them. */
e3a19533 10286 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
10287 diagnose_sb_2, NULL, wi);
10288 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
1e8e9920 10289 wi->info = context;
10290 break;
10291
0e1818e7 10292 case GIMPLE_COND:
10293 {
10294 tree lab = gimple_cond_true_label (stmt);
10295 if (lab)
10296 {
10297 n = splay_tree_lookup (all_labels,
10298 (splay_tree_key) lab);
10299 diagnose_sb_0 (gsi_p, context,
10300 n ? (gimple) n->value : NULL);
10301 }
10302 lab = gimple_cond_false_label (stmt);
10303 if (lab)
10304 {
10305 n = splay_tree_lookup (all_labels,
10306 (splay_tree_key) lab);
10307 diagnose_sb_0 (gsi_p, context,
10308 n ? (gimple) n->value : NULL);
10309 }
10310 }
10311 break;
10312
75a70cf9 10313 case GIMPLE_GOTO:
1e8e9920 10314 {
75a70cf9 10315 tree lab = gimple_goto_dest (stmt);
1e8e9920 10316 if (TREE_CODE (lab) != LABEL_DECL)
10317 break;
10318
10319 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
75a70cf9 10320 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
1e8e9920 10321 }
10322 break;
10323
75a70cf9 10324 case GIMPLE_SWITCH:
1e8e9920 10325 {
75a70cf9 10326 unsigned int i;
10327 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
1e8e9920 10328 {
75a70cf9 10329 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
1e8e9920 10330 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
75a70cf9 10331 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
1e8e9920 10332 break;
10333 }
10334 }
10335 break;
10336
75a70cf9 10337 case GIMPLE_RETURN:
10338 diagnose_sb_0 (gsi_p, context, NULL);
1e8e9920 10339 break;
10340
10341 default:
10342 break;
10343 }
10344
10345 return NULL_TREE;
10346}
10347
7740abd8 10348/* Called from tree-cfg.c::make_edges to create cfg edges for all GIMPLE_OMP
10349 codes. */
10350bool
10351make_gimple_omp_edges (basic_block bb, struct omp_region **region)
10352{
10353 gimple last = last_stmt (bb);
10354 enum gimple_code code = gimple_code (last);
10355 struct omp_region *cur_region = *region;
10356 bool fallthru = false;
10357
10358 switch (code)
10359 {
10360 case GIMPLE_OMP_PARALLEL:
10361 case GIMPLE_OMP_TASK:
10362 case GIMPLE_OMP_FOR:
10363 case GIMPLE_OMP_SINGLE:
10364 case GIMPLE_OMP_TEAMS:
10365 case GIMPLE_OMP_MASTER:
10366 case GIMPLE_OMP_TASKGROUP:
10367 case GIMPLE_OMP_ORDERED:
10368 case GIMPLE_OMP_CRITICAL:
10369 case GIMPLE_OMP_SECTION:
10370 cur_region = new_omp_region (bb, code, cur_region);
10371 fallthru = true;
10372 break;
10373
10374 case GIMPLE_OMP_TARGET:
10375 cur_region = new_omp_region (bb, code, cur_region);
10376 fallthru = true;
10377 if (gimple_omp_target_kind (last) == GF_OMP_TARGET_KIND_UPDATE)
10378 cur_region = cur_region->outer;
10379 break;
10380
10381 case GIMPLE_OMP_SECTIONS:
10382 cur_region = new_omp_region (bb, code, cur_region);
10383 fallthru = true;
10384 break;
10385
10386 case GIMPLE_OMP_SECTIONS_SWITCH:
10387 fallthru = false;
10388 break;
10389
10390 case GIMPLE_OMP_ATOMIC_LOAD:
10391 case GIMPLE_OMP_ATOMIC_STORE:
10392 fallthru = true;
10393 break;
10394
10395 case GIMPLE_OMP_RETURN:
10396 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
10397 somewhere other than the next block. This will be
10398 created later. */
10399 cur_region->exit = bb;
10400 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
10401 cur_region = cur_region->outer;
10402 break;
10403
10404 case GIMPLE_OMP_CONTINUE:
10405 cur_region->cont = bb;
10406 switch (cur_region->type)
10407 {
10408 case GIMPLE_OMP_FOR:
10409 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
10410 succs edges as abnormal to prevent splitting
10411 them. */
10412 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
10413 /* Make the loopback edge. */
10414 make_edge (bb, single_succ (cur_region->entry),
10415 EDGE_ABNORMAL);
10416
10417 /* Create an edge from GIMPLE_OMP_FOR to exit, which
10418 corresponds to the case that the body of the loop
10419 is not executed at all. */
10420 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
10421 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
10422 fallthru = false;
10423 break;
10424
10425 case GIMPLE_OMP_SECTIONS:
10426 /* Wire up the edges into and out of the nested sections. */
10427 {
10428 basic_block switch_bb = single_succ (cur_region->entry);
10429
10430 struct omp_region *i;
10431 for (i = cur_region->inner; i ; i = i->next)
10432 {
10433 gcc_assert (i->type == GIMPLE_OMP_SECTION);
10434 make_edge (switch_bb, i->entry, 0);
10435 make_edge (i->exit, bb, EDGE_FALLTHRU);
10436 }
10437
10438 /* Make the loopback edge to the block with
10439 GIMPLE_OMP_SECTIONS_SWITCH. */
10440 make_edge (bb, switch_bb, 0);
10441
10442 /* Make the edge from the switch to exit. */
10443 make_edge (switch_bb, bb->next_bb, 0);
10444 fallthru = false;
10445 }
10446 break;
10447
10448 default:
10449 gcc_unreachable ();
10450 }
10451 break;
10452
10453 default:
10454 gcc_unreachable ();
10455 }
10456
10457 if (*region != cur_region)
10458 *region = cur_region;
10459
10460 return fallthru;
10461}
10462
bfec3452 10463static unsigned int
10464diagnose_omp_structured_block_errors (void)
1e8e9920 10465{
1e8e9920 10466 struct walk_stmt_info wi;
bfec3452 10467 gimple_seq body = gimple_body (current_function_decl);
1e8e9920 10468
10469 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
10470
10471 memset (&wi, 0, sizeof (wi));
75a70cf9 10472 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
1e8e9920 10473
10474 memset (&wi, 0, sizeof (wi));
1e8e9920 10475 wi.want_locations = true;
e3a19533 10476 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
10477
10478 gimple_set_body (current_function_decl, body);
1e8e9920 10479
10480 splay_tree_delete (all_labels);
10481 all_labels = NULL;
10482
bfec3452 10483 return 0;
1e8e9920 10484}
10485
bfec3452 10486static bool
10487gate_diagnose_omp_blocks (void)
10488{
10489 return flag_openmp != 0;
10490}
10491
cbe8bda8 10492namespace {
10493
10494const pass_data pass_data_diagnose_omp_blocks =
10495{
10496 GIMPLE_PASS, /* type */
10497 "*diagnose_omp_blocks", /* name */
10498 OPTGROUP_NONE, /* optinfo_flags */
10499 true, /* has_gate */
10500 true, /* has_execute */
10501 TV_NONE, /* tv_id */
10502 PROP_gimple_any, /* properties_required */
10503 0, /* properties_provided */
10504 0, /* properties_destroyed */
10505 0, /* todo_flags_start */
10506 0, /* todo_flags_finish */
bfec3452 10507};
10508
cbe8bda8 10509class pass_diagnose_omp_blocks : public gimple_opt_pass
10510{
10511public:
9af5ce0c 10512 pass_diagnose_omp_blocks (gcc::context *ctxt)
10513 : gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
cbe8bda8 10514 {}
10515
10516 /* opt_pass methods: */
10517 bool gate () { return gate_diagnose_omp_blocks (); }
10518 unsigned int execute () {
10519 return diagnose_omp_structured_block_errors ();
10520 }
10521
10522}; // class pass_diagnose_omp_blocks
10523
10524} // anon namespace
10525
10526gimple_opt_pass *
10527make_pass_diagnose_omp_blocks (gcc::context *ctxt)
10528{
10529 return new pass_diagnose_omp_blocks (ctxt);
10530}
10531
1e8e9920 10532#include "gt-omp-low.h"