]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/omp-low.c
2013-09-19 Marc Glisse <marc.glisse@inria.fr>
[thirdparty/gcc.git] / gcc / omp-low.c
CommitLineData
1e8e9920 1/* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
711789cc 6 Copyright (C) 2005-2013 Free Software Foundation, Inc.
1e8e9920 7
8This file is part of GCC.
9
10GCC is free software; you can redistribute it and/or modify it under
11the terms of the GNU General Public License as published by the Free
8c4c00c1 12Software Foundation; either version 3, or (at your option) any later
1e8e9920 13version.
14
15GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16WARRANTY; without even the implied warranty of MERCHANTABILITY or
17FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18for more details.
19
20You should have received a copy of the GNU General Public License
8c4c00c1 21along with GCC; see the file COPYING3. If not see
22<http://www.gnu.org/licenses/>. */
1e8e9920 23
24#include "config.h"
25#include "system.h"
26#include "coretypes.h"
27#include "tm.h"
28#include "tree.h"
29#include "rtl.h"
75a70cf9 30#include "gimple.h"
31#include "tree-iterator.h"
1e8e9920 32#include "tree-inline.h"
33#include "langhooks.h"
852f689e 34#include "diagnostic-core.h"
69ee5dbb 35#include "tree-ssa.h"
1e8e9920 36#include "flags.h"
37#include "function.h"
38#include "expr.h"
1e8e9920 39#include "tree-pass.h"
40#include "ggc.h"
41#include "except.h"
e3022db7 42#include "splay-tree.h"
cb7f680b 43#include "optabs.h"
44#include "cfgloop.h"
3d483a94 45#include "target.h"
1e8e9920 46
75a70cf9 47
48e1416a 48/* Lowering of OpenMP parallel and workshare constructs proceeds in two
1e8e9920 49 phases. The first phase scans the function looking for OMP statements
50 and then for variables that must be replaced to satisfy data sharing
51 clauses. The second phase expands code for the constructs, as well as
334ec2d8 52 re-gimplifying things when variables have been replaced with complex
1e8e9920 53 expressions.
54
d134bccc 55 Final code generation is done by pass_expand_omp. The flowgraph is
56 scanned for parallel regions which are then moved to a new
57 function, to be invoked by the thread library. */
1e8e9920 58
59/* Context structure. Used to store information about each parallel
60 directive in the code. */
61
62typedef struct omp_context
63{
64 /* This field must be at the beginning, as we do "inheritance": Some
65 callback functions for tree-inline.c (e.g., omp_copy_decl)
66 receive a copy_body_data pointer that is up-casted to an
67 omp_context pointer. */
68 copy_body_data cb;
69
70 /* The tree of contexts corresponding to the encountered constructs. */
71 struct omp_context *outer;
75a70cf9 72 gimple stmt;
1e8e9920 73
48e1416a 74 /* Map variables to fields in a structure that allows communication
1e8e9920 75 between sending and receiving threads. */
76 splay_tree field_map;
77 tree record_type;
78 tree sender_decl;
79 tree receiver_decl;
80
fd6481cf 81 /* These are used just by task contexts, if task firstprivate fn is
82 needed. srecord_type is used to communicate from the thread
83 that encountered the task construct to task firstprivate fn,
84 record_type is allocated by GOMP_task, initialized by task firstprivate
85 fn and passed to the task body fn. */
86 splay_tree sfield_map;
87 tree srecord_type;
88
1e8e9920 89 /* A chain of variables to add to the top-level block surrounding the
90 construct. In the case of a parallel, this is in the child function. */
91 tree block_vars;
92
93 /* What to do with variables with implicitly determined sharing
94 attributes. */
95 enum omp_clause_default_kind default_kind;
96
97 /* Nesting depth of this context. Used to beautify error messages re
98 invalid gotos. The outermost ctx is depth 1, with depth 0 being
99 reserved for the main body of the function. */
100 int depth;
101
1e8e9920 102 /* True if this parallel directive is nested within another. */
103 bool is_nested;
1e8e9920 104} omp_context;
105
106
fd6481cf 107struct omp_for_data_loop
108{
109 tree v, n1, n2, step;
110 enum tree_code cond_code;
111};
112
773c5ba7 113/* A structure describing the main elements of a parallel loop. */
1e8e9920 114
773c5ba7 115struct omp_for_data
1e8e9920 116{
fd6481cf 117 struct omp_for_data_loop loop;
75a70cf9 118 tree chunk_size;
119 gimple for_stmt;
fd6481cf 120 tree pre, iter_type;
121 int collapse;
1e8e9920 122 bool have_nowait, have_ordered;
123 enum omp_clause_schedule_kind sched_kind;
fd6481cf 124 struct omp_for_data_loop *loops;
1e8e9920 125};
126
773c5ba7 127
1e8e9920 128static splay_tree all_contexts;
fd6481cf 129static int taskreg_nesting_level;
61e47ac8 130struct omp_region *root_omp_region;
fd6481cf 131static bitmap task_shared_vars;
1e8e9920 132
ab129075 133static void scan_omp (gimple_seq *, omp_context *);
75a70cf9 134static tree scan_omp_1_op (tree *, int *, void *);
135
136#define WALK_SUBSTMTS \
137 case GIMPLE_BIND: \
138 case GIMPLE_TRY: \
139 case GIMPLE_CATCH: \
140 case GIMPLE_EH_FILTER: \
4c0315d0 141 case GIMPLE_TRANSACTION: \
75a70cf9 142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
145
146/* Convenience function for calling scan_omp_1_op on tree operands. */
147
148static inline tree
149scan_omp_op (tree *tp, omp_context *ctx)
150{
151 struct walk_stmt_info wi;
152
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
156
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158}
159
e3a19533 160static void lower_omp (gimple_seq *, omp_context *);
f49d7bb5 161static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
1e8e9920 163
164/* Find an OpenMP clause of type KIND within CLAUSES. */
165
79acaae1 166tree
590c3166 167find_omp_clause (tree clauses, enum omp_clause_code kind)
1e8e9920 168{
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
55d6e7cd 170 if (OMP_CLAUSE_CODE (clauses) == kind)
1e8e9920 171 return clauses;
172
173 return NULL_TREE;
174}
175
176/* Return true if CTX is for an omp parallel. */
177
178static inline bool
179is_parallel_ctx (omp_context *ctx)
180{
75a70cf9 181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
1e8e9920 182}
183
773c5ba7 184
fd6481cf 185/* Return true if CTX is for an omp task. */
186
187static inline bool
188is_task_ctx (omp_context *ctx)
189{
75a70cf9 190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
fd6481cf 191}
192
193
194/* Return true if CTX is for an omp parallel or omp task. */
195
196static inline bool
197is_taskreg_ctx (omp_context *ctx)
198{
75a70cf9 199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
fd6481cf 201}
202
203
773c5ba7 204/* Return true if REGION is a combined parallel+workshare region. */
1e8e9920 205
206static inline bool
773c5ba7 207is_combined_parallel (struct omp_region *region)
208{
209 return region->is_combined_parallel;
210}
211
212
213/* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
215
216static void
75a70cf9 217extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
fd6481cf 218 struct omp_for_data_loop *loops)
773c5ba7 219{
fd6481cf 220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
389dd41b 225 location_t loc = gimple_location (for_stmt);
3d483a94 226 bool simd = gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_SIMD;
773c5ba7 227
228 fd->for_stmt = for_stmt;
229 fd->pre = NULL;
75a70cf9 230 fd->collapse = gimple_omp_for_collapse (for_stmt);
fd6481cf 231 if (fd->collapse > 1)
232 fd->loops = loops;
233 else
234 fd->loops = &fd->loop;
773c5ba7 235
236 fd->have_nowait = fd->have_ordered = false;
237 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238 fd->chunk_size = NULL_TREE;
fd6481cf 239 collapse_iter = NULL;
240 collapse_count = NULL;
773c5ba7 241
75a70cf9 242 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
55d6e7cd 243 switch (OMP_CLAUSE_CODE (t))
773c5ba7 244 {
245 case OMP_CLAUSE_NOWAIT:
246 fd->have_nowait = true;
247 break;
248 case OMP_CLAUSE_ORDERED:
249 fd->have_ordered = true;
250 break;
251 case OMP_CLAUSE_SCHEDULE:
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254 break;
fd6481cf 255 case OMP_CLAUSE_COLLAPSE:
256 if (fd->collapse > 1)
257 {
258 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 }
773c5ba7 261 default:
262 break;
263 }
264
fd6481cf 265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
bde357c8 268 is best) or if it varies (then schedule(dynamic,N) is better). */
fd6481cf 269 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 {
271 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272 gcc_assert (fd->chunk_size == NULL);
273 }
274 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
773c5ba7 275 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276 gcc_assert (fd->chunk_size == NULL);
277 else if (fd->chunk_size == NULL)
278 {
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
fd6481cf 281 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 || fd->have_ordered
283 || fd->collapse > 1)
773c5ba7 284 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285 ? integer_zero_node : integer_one_node;
286 }
fd6481cf 287
288 for (i = 0; i < fd->collapse; i++)
289 {
290 if (fd->collapse == 1)
291 loop = &fd->loop;
292 else if (loops != NULL)
293 loop = loops + i;
294 else
295 loop = &dummy_loop;
296
48e1416a 297
75a70cf9 298 loop->v = gimple_omp_for_index (for_stmt, i);
fd6481cf 299 gcc_assert (SSA_VAR_P (loop->v));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
75a70cf9 303 loop->n1 = gimple_omp_for_initial (for_stmt, i);
fd6481cf 304
75a70cf9 305 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306 loop->n2 = gimple_omp_for_final (for_stmt, i);
fd6481cf 307 switch (loop->cond_code)
308 {
309 case LT_EXPR:
310 case GT_EXPR:
311 break;
312 case LE_EXPR:
313 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
2cc66f2a 314 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
fd6481cf 315 else
389dd41b 316 loop->n2 = fold_build2_loc (loc,
317 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
fd6481cf 318 build_int_cst (TREE_TYPE (loop->n2), 1));
319 loop->cond_code = LT_EXPR;
320 break;
321 case GE_EXPR:
322 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
2cc66f2a 323 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
fd6481cf 324 else
389dd41b 325 loop->n2 = fold_build2_loc (loc,
326 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
fd6481cf 327 build_int_cst (TREE_TYPE (loop->n2), 1));
328 loop->cond_code = GT_EXPR;
329 break;
330 default:
331 gcc_unreachable ();
332 }
333
75a70cf9 334 t = gimple_omp_for_incr (for_stmt, i);
fd6481cf 335 gcc_assert (TREE_OPERAND (t, 0) == var);
336 switch (TREE_CODE (t))
337 {
338 case PLUS_EXPR:
fd6481cf 339 loop->step = TREE_OPERAND (t, 1);
340 break;
85d86b55 341 case POINTER_PLUS_EXPR:
342 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
343 break;
fd6481cf 344 case MINUS_EXPR:
345 loop->step = TREE_OPERAND (t, 1);
389dd41b 346 loop->step = fold_build1_loc (loc,
347 NEGATE_EXPR, TREE_TYPE (loop->step),
fd6481cf 348 loop->step);
349 break;
350 default:
351 gcc_unreachable ();
352 }
353
3d483a94 354 if (simd)
355 {
356 if (fd->collapse == 1)
357 iter_type = TREE_TYPE (loop->v);
358 else if (i == 0
359 || TYPE_PRECISION (iter_type)
360 < TYPE_PRECISION (TREE_TYPE (loop->v)))
361 iter_type
362 = build_nonstandard_integer_type
363 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
364 }
365 else if (iter_type != long_long_unsigned_type_node)
fd6481cf 366 {
367 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
368 iter_type = long_long_unsigned_type_node;
369 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
370 && TYPE_PRECISION (TREE_TYPE (loop->v))
371 >= TYPE_PRECISION (iter_type))
372 {
373 tree n;
374
375 if (loop->cond_code == LT_EXPR)
389dd41b 376 n = fold_build2_loc (loc,
377 PLUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 378 loop->n2, loop->step);
379 else
380 n = loop->n1;
381 if (TREE_CODE (n) != INTEGER_CST
382 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
383 iter_type = long_long_unsigned_type_node;
384 }
385 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
386 > TYPE_PRECISION (iter_type))
387 {
388 tree n1, n2;
389
390 if (loop->cond_code == LT_EXPR)
391 {
392 n1 = loop->n1;
389dd41b 393 n2 = fold_build2_loc (loc,
394 PLUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 395 loop->n2, loop->step);
396 }
397 else
398 {
389dd41b 399 n1 = fold_build2_loc (loc,
400 MINUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 401 loop->n2, loop->step);
402 n2 = loop->n1;
403 }
404 if (TREE_CODE (n1) != INTEGER_CST
405 || TREE_CODE (n2) != INTEGER_CST
406 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
407 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
408 iter_type = long_long_unsigned_type_node;
409 }
410 }
411
412 if (collapse_count && *collapse_count == NULL)
413 {
8e6b4515 414 t = fold_binary (loop->cond_code, boolean_type_node,
415 fold_convert (TREE_TYPE (loop->v), loop->n1),
416 fold_convert (TREE_TYPE (loop->v), loop->n2));
417 if (t && integer_zerop (t))
418 count = build_zero_cst (long_long_unsigned_type_node);
419 else if ((i == 0 || count != NULL_TREE)
420 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
421 && TREE_CONSTANT (loop->n1)
422 && TREE_CONSTANT (loop->n2)
423 && TREE_CODE (loop->step) == INTEGER_CST)
fd6481cf 424 {
425 tree itype = TREE_TYPE (loop->v);
426
427 if (POINTER_TYPE_P (itype))
3cea8318 428 itype = signed_type_for (itype);
fd6481cf 429 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
389dd41b 430 t = fold_build2_loc (loc,
431 PLUS_EXPR, itype,
432 fold_convert_loc (loc, itype, loop->step), t);
433 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
434 fold_convert_loc (loc, itype, loop->n2));
435 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
436 fold_convert_loc (loc, itype, loop->n1));
fd6481cf 437 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
389dd41b 438 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
439 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
440 fold_build1_loc (loc, NEGATE_EXPR, itype,
441 fold_convert_loc (loc, itype,
442 loop->step)));
fd6481cf 443 else
389dd41b 444 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
445 fold_convert_loc (loc, itype, loop->step));
446 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
fd6481cf 447 if (count != NULL_TREE)
389dd41b 448 count = fold_build2_loc (loc,
449 MULT_EXPR, long_long_unsigned_type_node,
fd6481cf 450 count, t);
451 else
452 count = t;
453 if (TREE_CODE (count) != INTEGER_CST)
454 count = NULL_TREE;
455 }
8e6b4515 456 else if (count && !integer_zerop (count))
fd6481cf 457 count = NULL_TREE;
458 }
459 }
460
3d483a94 461 if (count
462 && !simd)
fd6481cf 463 {
464 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
465 iter_type = long_long_unsigned_type_node;
466 else
467 iter_type = long_integer_type_node;
468 }
469 else if (collapse_iter && *collapse_iter != NULL)
470 iter_type = TREE_TYPE (*collapse_iter);
471 fd->iter_type = iter_type;
472 if (collapse_iter && *collapse_iter == NULL)
473 *collapse_iter = create_tmp_var (iter_type, ".iter");
474 if (collapse_count && *collapse_count == NULL)
475 {
476 if (count)
389dd41b 477 *collapse_count = fold_convert_loc (loc, iter_type, count);
fd6481cf 478 else
479 *collapse_count = create_tmp_var (iter_type, ".count");
480 }
481
482 if (fd->collapse > 1)
483 {
484 fd->loop.v = *collapse_iter;
485 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
486 fd->loop.n2 = *collapse_count;
487 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
488 fd->loop.cond_code = LT_EXPR;
489 }
773c5ba7 490}
491
492
493/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
494 is the immediate dominator of PAR_ENTRY_BB, return true if there
495 are no data dependencies that would prevent expanding the parallel
496 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
497
498 When expanding a combined parallel+workshare region, the call to
499 the child function may need additional arguments in the case of
75a70cf9 500 GIMPLE_OMP_FOR regions. In some cases, these arguments are
501 computed out of variables passed in from the parent to the child
502 via 'struct .omp_data_s'. For instance:
773c5ba7 503
504 #pragma omp parallel for schedule (guided, i * 4)
505 for (j ...)
506
507 Is lowered into:
508
509 # BLOCK 2 (PAR_ENTRY_BB)
510 .omp_data_o.i = i;
511 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
48e1416a 512
773c5ba7 513 # BLOCK 3 (WS_ENTRY_BB)
514 .omp_data_i = &.omp_data_o;
515 D.1667 = .omp_data_i->i;
516 D.1598 = D.1667 * 4;
517 #pragma omp for schedule (guided, D.1598)
518
519 When we outline the parallel region, the call to the child function
520 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
521 that value is computed *after* the call site. So, in principle we
522 cannot do the transformation.
523
524 To see whether the code in WS_ENTRY_BB blocks the combined
525 parallel+workshare call, we collect all the variables used in the
75a70cf9 526 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
773c5ba7 527 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
528 call.
529
530 FIXME. If we had the SSA form built at this point, we could merely
531 hoist the code in block 3 into block 2 and be done with it. But at
532 this point we don't have dataflow information and though we could
533 hack something up here, it is really not worth the aggravation. */
534
535static bool
f018d957 536workshare_safe_to_combine_p (basic_block ws_entry_bb)
773c5ba7 537{
538 struct omp_for_data fd;
f018d957 539 gimple ws_stmt = last_stmt (ws_entry_bb);
773c5ba7 540
75a70cf9 541 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
773c5ba7 542 return true;
543
75a70cf9 544 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
773c5ba7 545
fd6481cf 546 extract_omp_for_data (ws_stmt, &fd, NULL);
547
548 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
549 return false;
550 if (fd.iter_type != long_integer_type_node)
551 return false;
773c5ba7 552
553 /* FIXME. We give up too easily here. If any of these arguments
554 are not constants, they will likely involve variables that have
555 been mapped into fields of .omp_data_s for sharing with the child
556 function. With appropriate data flow, it would be possible to
557 see through this. */
fd6481cf 558 if (!is_gimple_min_invariant (fd.loop.n1)
559 || !is_gimple_min_invariant (fd.loop.n2)
560 || !is_gimple_min_invariant (fd.loop.step)
773c5ba7 561 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
562 return false;
563
564 return true;
565}
566
567
568/* Collect additional arguments needed to emit a combined
569 parallel+workshare call. WS_STMT is the workshare directive being
570 expanded. */
571
f1f41a6c 572static vec<tree, va_gc> *
75a70cf9 573get_ws_args_for (gimple ws_stmt)
773c5ba7 574{
575 tree t;
389dd41b 576 location_t loc = gimple_location (ws_stmt);
f1f41a6c 577 vec<tree, va_gc> *ws_args;
773c5ba7 578
75a70cf9 579 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
773c5ba7 580 {
581 struct omp_for_data fd;
773c5ba7 582
fd6481cf 583 extract_omp_for_data (ws_stmt, &fd, NULL);
773c5ba7 584
f1f41a6c 585 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
773c5ba7 586
414c3a2c 587 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
f1f41a6c 588 ws_args->quick_push (t);
773c5ba7 589
389dd41b 590 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
f1f41a6c 591 ws_args->quick_push (t);
773c5ba7 592
414c3a2c 593 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
f1f41a6c 594 ws_args->quick_push (t);
414c3a2c 595
596 if (fd.chunk_size)
597 {
598 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
f1f41a6c 599 ws_args->quick_push (t);
414c3a2c 600 }
773c5ba7 601
602 return ws_args;
603 }
75a70cf9 604 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
773c5ba7 605 {
ac6e3339 606 /* Number of sections is equal to the number of edges from the
75a70cf9 607 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
608 the exit of the sections region. */
609 basic_block bb = single_succ (gimple_bb (ws_stmt));
ac6e3339 610 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
f1f41a6c 611 vec_alloc (ws_args, 1);
612 ws_args->quick_push (t);
414c3a2c 613 return ws_args;
773c5ba7 614 }
615
616 gcc_unreachable ();
617}
618
619
620/* Discover whether REGION is a combined parallel+workshare region. */
621
622static void
623determine_parallel_type (struct omp_region *region)
1e8e9920 624{
773c5ba7 625 basic_block par_entry_bb, par_exit_bb;
626 basic_block ws_entry_bb, ws_exit_bb;
627
03ed154b 628 if (region == NULL || region->inner == NULL
ac6e3339 629 || region->exit == NULL || region->inner->exit == NULL
630 || region->inner->cont == NULL)
773c5ba7 631 return;
632
633 /* We only support parallel+for and parallel+sections. */
75a70cf9 634 if (region->type != GIMPLE_OMP_PARALLEL
635 || (region->inner->type != GIMPLE_OMP_FOR
636 && region->inner->type != GIMPLE_OMP_SECTIONS))
773c5ba7 637 return;
638
639 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
640 WS_EXIT_BB -> PAR_EXIT_BB. */
61e47ac8 641 par_entry_bb = region->entry;
642 par_exit_bb = region->exit;
643 ws_entry_bb = region->inner->entry;
644 ws_exit_bb = region->inner->exit;
773c5ba7 645
646 if (single_succ (par_entry_bb) == ws_entry_bb
647 && single_succ (ws_exit_bb) == par_exit_bb
f018d957 648 && workshare_safe_to_combine_p (ws_entry_bb)
75a70cf9 649 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
de7ef844 650 || (last_and_only_stmt (ws_entry_bb)
651 && last_and_only_stmt (par_exit_bb))))
773c5ba7 652 {
75a70cf9 653 gimple ws_stmt = last_stmt (ws_entry_bb);
61e47ac8 654
75a70cf9 655 if (region->inner->type == GIMPLE_OMP_FOR)
773c5ba7 656 {
657 /* If this is a combined parallel loop, we need to determine
658 whether or not to use the combined library calls. There
659 are two cases where we do not apply the transformation:
660 static loops and any kind of ordered loop. In the first
661 case, we already open code the loop so there is no need
662 to do anything else. In the latter case, the combined
663 parallel loop call would still need extra synchronization
664 to implement ordered semantics, so there would not be any
665 gain in using the combined call. */
75a70cf9 666 tree clauses = gimple_omp_for_clauses (ws_stmt);
773c5ba7 667 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
668 if (c == NULL
669 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
670 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
671 {
672 region->is_combined_parallel = false;
673 region->inner->is_combined_parallel = false;
674 return;
675 }
676 }
677
678 region->is_combined_parallel = true;
679 region->inner->is_combined_parallel = true;
61e47ac8 680 region->ws_args = get_ws_args_for (ws_stmt);
773c5ba7 681 }
1e8e9920 682}
683
773c5ba7 684
1e8e9920 685/* Return true if EXPR is variable sized. */
686
687static inline bool
1f1872fd 688is_variable_sized (const_tree expr)
1e8e9920 689{
690 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
691}
692
693/* Return true if DECL is a reference type. */
694
695static inline bool
696is_reference (tree decl)
697{
698 return lang_hooks.decls.omp_privatize_by_reference (decl);
699}
700
701/* Lookup variables in the decl or field splay trees. The "maybe" form
702 allows for the variable form to not have been entered, otherwise we
703 assert that the variable must have been entered. */
704
705static inline tree
706lookup_decl (tree var, omp_context *ctx)
707{
e3022db7 708 tree *n;
709 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
710 return *n;
1e8e9920 711}
712
713static inline tree
e8a588af 714maybe_lookup_decl (const_tree var, omp_context *ctx)
1e8e9920 715{
e3022db7 716 tree *n;
717 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
718 return n ? *n : NULL_TREE;
1e8e9920 719}
720
721static inline tree
722lookup_field (tree var, omp_context *ctx)
723{
724 splay_tree_node n;
725 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
726 return (tree) n->value;
727}
728
fd6481cf 729static inline tree
730lookup_sfield (tree var, omp_context *ctx)
731{
732 splay_tree_node n;
733 n = splay_tree_lookup (ctx->sfield_map
734 ? ctx->sfield_map : ctx->field_map,
735 (splay_tree_key) var);
736 return (tree) n->value;
737}
738
1e8e9920 739static inline tree
740maybe_lookup_field (tree var, omp_context *ctx)
741{
742 splay_tree_node n;
743 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
744 return n ? (tree) n->value : NULL_TREE;
745}
746
e8a588af 747/* Return true if DECL should be copied by pointer. SHARED_CTX is
748 the parallel context if DECL is to be shared. */
1e8e9920 749
750static bool
fd6481cf 751use_pointer_for_field (tree decl, omp_context *shared_ctx)
1e8e9920 752{
753 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
754 return true;
755
554f2707 756 /* We can only use copy-in/copy-out semantics for shared variables
1e8e9920 757 when we know the value is not accessible from an outer scope. */
e8a588af 758 if (shared_ctx)
1e8e9920 759 {
760 /* ??? Trivially accessible from anywhere. But why would we even
761 be passing an address in this case? Should we simply assert
762 this to be false, or should we have a cleanup pass that removes
763 these from the list of mappings? */
764 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
765 return true;
766
767 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
768 without analyzing the expression whether or not its location
769 is accessible to anyone else. In the case of nested parallel
770 regions it certainly may be. */
df2c34fc 771 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
1e8e9920 772 return true;
773
774 /* Do not use copy-in/copy-out for variables that have their
775 address taken. */
776 if (TREE_ADDRESSABLE (decl))
777 return true;
e8a588af 778
b8214689 779 /* lower_send_shared_vars only uses copy-in, but not copy-out
780 for these. */
781 if (TREE_READONLY (decl)
782 || ((TREE_CODE (decl) == RESULT_DECL
783 || TREE_CODE (decl) == PARM_DECL)
784 && DECL_BY_REFERENCE (decl)))
785 return false;
786
e8a588af 787 /* Disallow copy-in/out in nested parallel if
788 decl is shared in outer parallel, otherwise
789 each thread could store the shared variable
790 in its own copy-in location, making the
791 variable no longer really shared. */
b8214689 792 if (shared_ctx->is_nested)
e8a588af 793 {
794 omp_context *up;
795
796 for (up = shared_ctx->outer; up; up = up->outer)
0cb159ec 797 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
e8a588af 798 break;
799
0cb159ec 800 if (up)
e8a588af 801 {
802 tree c;
803
75a70cf9 804 for (c = gimple_omp_taskreg_clauses (up->stmt);
e8a588af 805 c; c = OMP_CLAUSE_CHAIN (c))
806 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
807 && OMP_CLAUSE_DECL (c) == decl)
808 break;
809
810 if (c)
784ad964 811 goto maybe_mark_addressable_and_ret;
e8a588af 812 }
813 }
fd6481cf 814
b8214689 815 /* For tasks avoid using copy-in/out. As tasks can be
fd6481cf 816 deferred or executed in different thread, when GOMP_task
817 returns, the task hasn't necessarily terminated. */
b8214689 818 if (is_task_ctx (shared_ctx))
fd6481cf 819 {
784ad964 820 tree outer;
821 maybe_mark_addressable_and_ret:
822 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
fd6481cf 823 if (is_gimple_reg (outer))
824 {
825 /* Taking address of OUTER in lower_send_shared_vars
826 might need regimplification of everything that uses the
827 variable. */
828 if (!task_shared_vars)
829 task_shared_vars = BITMAP_ALLOC (NULL);
830 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
831 TREE_ADDRESSABLE (outer) = 1;
832 }
833 return true;
834 }
1e8e9920 835 }
836
837 return false;
838}
839
79acaae1 840/* Create a new VAR_DECL and copy information from VAR to it. */
1e8e9920 841
79acaae1 842tree
843copy_var_decl (tree var, tree name, tree type)
1e8e9920 844{
e60a6f7b 845 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
1e8e9920 846
847 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
79acaae1 848 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
8ea8de24 849 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
1e8e9920 850 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
851 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
79acaae1 852 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
34d8484d 853 TREE_NO_WARNING (copy) = TREE_NO_WARNING (var);
1e8e9920 854 TREE_USED (copy) = 1;
1e8e9920 855 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
3b36c9f7 856 DECL_ATTRIBUTES (copy) = DECL_ATTRIBUTES (var);
1e8e9920 857
79acaae1 858 return copy;
859}
860
861/* Construct a new automatic decl similar to VAR. */
862
863static tree
864omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
865{
866 tree copy = copy_var_decl (var, name, type);
867
868 DECL_CONTEXT (copy) = current_function_decl;
1767a056 869 DECL_CHAIN (copy) = ctx->block_vars;
1e8e9920 870 ctx->block_vars = copy;
871
872 return copy;
873}
874
875static tree
876omp_copy_decl_1 (tree var, omp_context *ctx)
877{
878 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
879}
880
445d06b6 881/* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
882 as appropriate. */
883static tree
884omp_build_component_ref (tree obj, tree field)
885{
886 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
887 if (TREE_THIS_VOLATILE (field))
888 TREE_THIS_VOLATILE (ret) |= 1;
889 if (TREE_READONLY (field))
890 TREE_READONLY (ret) |= 1;
891 return ret;
892}
893
1e8e9920 894/* Build tree nodes to access the field for VAR on the receiver side. */
895
896static tree
897build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
898{
899 tree x, field = lookup_field (var, ctx);
900
901 /* If the receiver record type was remapped in the child function,
902 remap the field into the new record type. */
903 x = maybe_lookup_field (field, ctx);
904 if (x != NULL)
905 field = x;
906
182cf5a9 907 x = build_simple_mem_ref (ctx->receiver_decl);
445d06b6 908 x = omp_build_component_ref (x, field);
1e8e9920 909 if (by_ref)
182cf5a9 910 x = build_simple_mem_ref (x);
1e8e9920 911
912 return x;
913}
914
915/* Build tree nodes to access VAR in the scope outer to CTX. In the case
916 of a parallel, this is a component reference; for workshare constructs
917 this is some variable. */
918
919static tree
920build_outer_var_ref (tree var, omp_context *ctx)
921{
922 tree x;
923
f49d7bb5 924 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1e8e9920 925 x = var;
926 else if (is_variable_sized (var))
927 {
928 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
929 x = build_outer_var_ref (x, ctx);
182cf5a9 930 x = build_simple_mem_ref (x);
1e8e9920 931 }
fd6481cf 932 else if (is_taskreg_ctx (ctx))
1e8e9920 933 {
e8a588af 934 bool by_ref = use_pointer_for_field (var, NULL);
1e8e9920 935 x = build_receiver_ref (var, by_ref, ctx);
936 }
3d483a94 937 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
938 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
939 {
940 /* #pragma omp simd isn't a worksharing construct, and can reference even
941 private vars in its linear etc. clauses. */
942 x = NULL_TREE;
943 if (ctx->outer && is_taskreg_ctx (ctx))
944 x = lookup_decl (var, ctx->outer);
945 else if (ctx->outer)
84cb1020 946 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
3d483a94 947 if (x == NULL_TREE)
948 x = var;
949 }
1e8e9920 950 else if (ctx->outer)
951 x = lookup_decl (var, ctx->outer);
9438af57 952 else if (is_reference (var))
953 /* This can happen with orphaned constructs. If var is reference, it is
954 possible it is shared and as such valid. */
955 x = var;
1e8e9920 956 else
957 gcc_unreachable ();
958
959 if (is_reference (var))
182cf5a9 960 x = build_simple_mem_ref (x);
1e8e9920 961
962 return x;
963}
964
965/* Build tree nodes to access the field for VAR on the sender side. */
966
967static tree
968build_sender_ref (tree var, omp_context *ctx)
969{
fd6481cf 970 tree field = lookup_sfield (var, ctx);
445d06b6 971 return omp_build_component_ref (ctx->sender_decl, field);
1e8e9920 972}
973
974/* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
975
976static void
fd6481cf 977install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1e8e9920 978{
fd6481cf 979 tree field, type, sfield = NULL_TREE;
1e8e9920 980
fd6481cf 981 gcc_assert ((mask & 1) == 0
982 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
983 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
984 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1e8e9920 985
986 type = TREE_TYPE (var);
987 if (by_ref)
988 type = build_pointer_type (type);
fd6481cf 989 else if ((mask & 3) == 1 && is_reference (var))
990 type = TREE_TYPE (type);
1e8e9920 991
e60a6f7b 992 field = build_decl (DECL_SOURCE_LOCATION (var),
993 FIELD_DECL, DECL_NAME (var), type);
1e8e9920 994
995 /* Remember what variable this field was created for. This does have a
996 side effect of making dwarf2out ignore this member, so for helpful
997 debugging we clear it later in delete_omp_context. */
998 DECL_ABSTRACT_ORIGIN (field) = var;
fd6481cf 999 if (type == TREE_TYPE (var))
1000 {
1001 DECL_ALIGN (field) = DECL_ALIGN (var);
1002 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1003 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1004 }
1005 else
1006 DECL_ALIGN (field) = TYPE_ALIGN (type);
1e8e9920 1007
fd6481cf 1008 if ((mask & 3) == 3)
1009 {
1010 insert_field_into_struct (ctx->record_type, field);
1011 if (ctx->srecord_type)
1012 {
e60a6f7b 1013 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1014 FIELD_DECL, DECL_NAME (var), type);
fd6481cf 1015 DECL_ABSTRACT_ORIGIN (sfield) = var;
1016 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1017 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1018 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1019 insert_field_into_struct (ctx->srecord_type, sfield);
1020 }
1021 }
1022 else
1023 {
1024 if (ctx->srecord_type == NULL_TREE)
1025 {
1026 tree t;
1027
1028 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1029 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1030 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1031 {
e60a6f7b 1032 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1033 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
fd6481cf 1034 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1035 insert_field_into_struct (ctx->srecord_type, sfield);
1036 splay_tree_insert (ctx->sfield_map,
1037 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1038 (splay_tree_value) sfield);
1039 }
1040 }
1041 sfield = field;
1042 insert_field_into_struct ((mask & 1) ? ctx->record_type
1043 : ctx->srecord_type, field);
1044 }
1e8e9920 1045
fd6481cf 1046 if (mask & 1)
1047 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1048 (splay_tree_value) field);
1049 if ((mask & 2) && ctx->sfield_map)
1050 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1051 (splay_tree_value) sfield);
1e8e9920 1052}
1053
1054static tree
1055install_var_local (tree var, omp_context *ctx)
1056{
1057 tree new_var = omp_copy_decl_1 (var, ctx);
1058 insert_decl_map (&ctx->cb, var, new_var);
1059 return new_var;
1060}
1061
1062/* Adjust the replacement for DECL in CTX for the new context. This means
1063 copying the DECL_VALUE_EXPR, and fixing up the type. */
1064
1065static void
1066fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1067{
1068 tree new_decl, size;
1069
1070 new_decl = lookup_decl (decl, ctx);
1071
1072 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1073
1074 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1075 && DECL_HAS_VALUE_EXPR_P (decl))
1076 {
1077 tree ve = DECL_VALUE_EXPR (decl);
75a70cf9 1078 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1e8e9920 1079 SET_DECL_VALUE_EXPR (new_decl, ve);
1080 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1081 }
1082
1083 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1084 {
1085 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1086 if (size == error_mark_node)
1087 size = TYPE_SIZE (TREE_TYPE (new_decl));
1088 DECL_SIZE (new_decl) = size;
1089
1090 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1091 if (size == error_mark_node)
1092 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1093 DECL_SIZE_UNIT (new_decl) = size;
1094 }
1095}
1096
1097/* The callback for remap_decl. Search all containing contexts for a
1098 mapping of the variable; this avoids having to duplicate the splay
1099 tree ahead of time. We know a mapping doesn't already exist in the
1100 given context. Create new mappings to implement default semantics. */
1101
1102static tree
1103omp_copy_decl (tree var, copy_body_data *cb)
1104{
1105 omp_context *ctx = (omp_context *) cb;
1106 tree new_var;
1107
1e8e9920 1108 if (TREE_CODE (var) == LABEL_DECL)
1109 {
e60a6f7b 1110 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
773c5ba7 1111 DECL_CONTEXT (new_var) = current_function_decl;
1e8e9920 1112 insert_decl_map (&ctx->cb, var, new_var);
1113 return new_var;
1114 }
1115
fd6481cf 1116 while (!is_taskreg_ctx (ctx))
1e8e9920 1117 {
1118 ctx = ctx->outer;
1119 if (ctx == NULL)
1120 return var;
1121 new_var = maybe_lookup_decl (var, ctx);
1122 if (new_var)
1123 return new_var;
1124 }
1125
f49d7bb5 1126 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1127 return var;
1128
1e8e9920 1129 return error_mark_node;
1130}
1131
773c5ba7 1132
1133/* Return the parallel region associated with STMT. */
1134
773c5ba7 1135/* Debugging dumps for parallel regions. */
1136void dump_omp_region (FILE *, struct omp_region *, int);
1137void debug_omp_region (struct omp_region *);
1138void debug_all_omp_regions (void);
1139
1140/* Dump the parallel region tree rooted at REGION. */
1141
1142void
1143dump_omp_region (FILE *file, struct omp_region *region, int indent)
1144{
61e47ac8 1145 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
75a70cf9 1146 gimple_code_name[region->type]);
773c5ba7 1147
1148 if (region->inner)
1149 dump_omp_region (file, region->inner, indent + 4);
1150
61e47ac8 1151 if (region->cont)
1152 {
75a70cf9 1153 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
61e47ac8 1154 region->cont->index);
1155 }
48e1416a 1156
773c5ba7 1157 if (region->exit)
75a70cf9 1158 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
61e47ac8 1159 region->exit->index);
773c5ba7 1160 else
61e47ac8 1161 fprintf (file, "%*s[no exit marker]\n", indent, "");
773c5ba7 1162
1163 if (region->next)
61e47ac8 1164 dump_omp_region (file, region->next, indent);
773c5ba7 1165}
1166
4b987fac 1167DEBUG_FUNCTION void
773c5ba7 1168debug_omp_region (struct omp_region *region)
1169{
1170 dump_omp_region (stderr, region, 0);
1171}
1172
4b987fac 1173DEBUG_FUNCTION void
773c5ba7 1174debug_all_omp_regions (void)
1175{
1176 dump_omp_region (stderr, root_omp_region, 0);
1177}
1178
1179
1180/* Create a new parallel region starting at STMT inside region PARENT. */
1181
61e47ac8 1182struct omp_region *
75a70cf9 1183new_omp_region (basic_block bb, enum gimple_code type,
1184 struct omp_region *parent)
773c5ba7 1185{
4077bf7a 1186 struct omp_region *region = XCNEW (struct omp_region);
773c5ba7 1187
1188 region->outer = parent;
61e47ac8 1189 region->entry = bb;
1190 region->type = type;
773c5ba7 1191
1192 if (parent)
1193 {
1194 /* This is a nested region. Add it to the list of inner
1195 regions in PARENT. */
1196 region->next = parent->inner;
1197 parent->inner = region;
1198 }
61e47ac8 1199 else
773c5ba7 1200 {
1201 /* This is a toplevel region. Add it to the list of toplevel
1202 regions in ROOT_OMP_REGION. */
1203 region->next = root_omp_region;
1204 root_omp_region = region;
1205 }
61e47ac8 1206
1207 return region;
1208}
1209
1210/* Release the memory associated with the region tree rooted at REGION. */
1211
1212static void
1213free_omp_region_1 (struct omp_region *region)
1214{
1215 struct omp_region *i, *n;
1216
1217 for (i = region->inner; i ; i = n)
773c5ba7 1218 {
61e47ac8 1219 n = i->next;
1220 free_omp_region_1 (i);
773c5ba7 1221 }
1222
61e47ac8 1223 free (region);
1224}
773c5ba7 1225
61e47ac8 1226/* Release the memory for the entire omp region tree. */
1227
1228void
1229free_omp_regions (void)
1230{
1231 struct omp_region *r, *n;
1232 for (r = root_omp_region; r ; r = n)
1233 {
1234 n = r->next;
1235 free_omp_region_1 (r);
1236 }
1237 root_omp_region = NULL;
773c5ba7 1238}
1239
1240
1e8e9920 1241/* Create a new context, with OUTER_CTX being the surrounding context. */
1242
1243static omp_context *
75a70cf9 1244new_omp_context (gimple stmt, omp_context *outer_ctx)
1e8e9920 1245{
1246 omp_context *ctx = XCNEW (omp_context);
1247
1248 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1249 (splay_tree_value) ctx);
1250 ctx->stmt = stmt;
1251
1252 if (outer_ctx)
1253 {
1254 ctx->outer = outer_ctx;
1255 ctx->cb = outer_ctx->cb;
1256 ctx->cb.block = NULL;
1257 ctx->depth = outer_ctx->depth + 1;
1258 }
1259 else
1260 {
1261 ctx->cb.src_fn = current_function_decl;
1262 ctx->cb.dst_fn = current_function_decl;
53f79206 1263 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1264 gcc_checking_assert (ctx->cb.src_node);
1e8e9920 1265 ctx->cb.dst_node = ctx->cb.src_node;
1266 ctx->cb.src_cfun = cfun;
1267 ctx->cb.copy_decl = omp_copy_decl;
e38def9c 1268 ctx->cb.eh_lp_nr = 0;
1e8e9920 1269 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1270 ctx->depth = 1;
1271 }
1272
e3022db7 1273 ctx->cb.decl_map = pointer_map_create ();
1e8e9920 1274
1275 return ctx;
1276}
1277
75a70cf9 1278static gimple_seq maybe_catch_exception (gimple_seq);
f6430caa 1279
1280/* Finalize task copyfn. */
1281
1282static void
75a70cf9 1283finalize_task_copyfn (gimple task_stmt)
f6430caa 1284{
1285 struct function *child_cfun;
9078126c 1286 tree child_fn;
e3a19533 1287 gimple_seq seq = NULL, new_seq;
75a70cf9 1288 gimple bind;
f6430caa 1289
75a70cf9 1290 child_fn = gimple_omp_task_copy_fn (task_stmt);
f6430caa 1291 if (child_fn == NULL_TREE)
1292 return;
1293
1294 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
82b40354 1295 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
f6430caa 1296
f6430caa 1297 push_cfun (child_cfun);
7e3aae05 1298 bind = gimplify_body (child_fn, false);
75a70cf9 1299 gimple_seq_add_stmt (&seq, bind);
1300 new_seq = maybe_catch_exception (seq);
1301 if (new_seq != seq)
1302 {
1303 bind = gimple_build_bind (NULL, new_seq, NULL);
e3a19533 1304 seq = NULL;
75a70cf9 1305 gimple_seq_add_stmt (&seq, bind);
1306 }
1307 gimple_set_body (child_fn, seq);
f6430caa 1308 pop_cfun ();
f6430caa 1309
82b40354 1310 /* Inform the callgraph about the new function. */
f6430caa 1311 cgraph_add_new_function (child_fn, false);
1312}
1313
1e8e9920 1314/* Destroy a omp_context data structures. Called through the splay tree
1315 value delete callback. */
1316
1317static void
1318delete_omp_context (splay_tree_value value)
1319{
1320 omp_context *ctx = (omp_context *) value;
1321
e3022db7 1322 pointer_map_destroy (ctx->cb.decl_map);
1e8e9920 1323
1324 if (ctx->field_map)
1325 splay_tree_delete (ctx->field_map);
fd6481cf 1326 if (ctx->sfield_map)
1327 splay_tree_delete (ctx->sfield_map);
1e8e9920 1328
1329 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1330 it produces corrupt debug information. */
1331 if (ctx->record_type)
1332 {
1333 tree t;
1767a056 1334 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1e8e9920 1335 DECL_ABSTRACT_ORIGIN (t) = NULL;
1336 }
fd6481cf 1337 if (ctx->srecord_type)
1338 {
1339 tree t;
1767a056 1340 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
fd6481cf 1341 DECL_ABSTRACT_ORIGIN (t) = NULL;
1342 }
1e8e9920 1343
f6430caa 1344 if (is_task_ctx (ctx))
1345 finalize_task_copyfn (ctx->stmt);
1346
1e8e9920 1347 XDELETE (ctx);
1348}
1349
1350/* Fix up RECEIVER_DECL with a type that has been remapped to the child
1351 context. */
1352
1353static void
1354fixup_child_record_type (omp_context *ctx)
1355{
1356 tree f, type = ctx->record_type;
1357
1358 /* ??? It isn't sufficient to just call remap_type here, because
1359 variably_modified_type_p doesn't work the way we expect for
1360 record types. Testing each field for whether it needs remapping
1361 and creating a new record by hand works, however. */
1767a056 1362 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1e8e9920 1363 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1364 break;
1365 if (f)
1366 {
1367 tree name, new_fields = NULL;
1368
1369 type = lang_hooks.types.make_type (RECORD_TYPE);
1370 name = DECL_NAME (TYPE_NAME (ctx->record_type));
e60a6f7b 1371 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1372 TYPE_DECL, name, type);
1e8e9920 1373 TYPE_NAME (type) = name;
1374
1767a056 1375 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1e8e9920 1376 {
1377 tree new_f = copy_node (f);
1378 DECL_CONTEXT (new_f) = type;
1379 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1767a056 1380 DECL_CHAIN (new_f) = new_fields;
75a70cf9 1381 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1382 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1383 &ctx->cb, NULL);
1384 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1385 &ctx->cb, NULL);
1e8e9920 1386 new_fields = new_f;
1387
1388 /* Arrange to be able to look up the receiver field
1389 given the sender field. */
1390 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1391 (splay_tree_value) new_f);
1392 }
1393 TYPE_FIELDS (type) = nreverse (new_fields);
1394 layout_type (type);
1395 }
1396
1397 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1398}
1399
1400/* Instantiate decls as necessary in CTX to satisfy the data sharing
1401 specified by CLAUSES. */
1402
1403static void
1404scan_sharing_clauses (tree clauses, omp_context *ctx)
1405{
1406 tree c, decl;
1407 bool scan_array_reductions = false;
1408
1409 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1410 {
1411 bool by_ref;
1412
55d6e7cd 1413 switch (OMP_CLAUSE_CODE (c))
1e8e9920 1414 {
1415 case OMP_CLAUSE_PRIVATE:
1416 decl = OMP_CLAUSE_DECL (c);
fd6481cf 1417 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1418 goto do_private;
1419 else if (!is_variable_sized (decl))
1e8e9920 1420 install_var_local (decl, ctx);
1421 break;
1422
1423 case OMP_CLAUSE_SHARED:
fd6481cf 1424 gcc_assert (is_taskreg_ctx (ctx));
1e8e9920 1425 decl = OMP_CLAUSE_DECL (c);
e7327393 1426 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1427 || !is_variable_sized (decl));
f49d7bb5 1428 /* Global variables don't need to be copied,
1429 the receiver side will use them directly. */
1430 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1431 break;
fd6481cf 1432 by_ref = use_pointer_for_field (decl, ctx);
1e8e9920 1433 if (! TREE_READONLY (decl)
1434 || TREE_ADDRESSABLE (decl)
1435 || by_ref
1436 || is_reference (decl))
1437 {
fd6481cf 1438 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1439 install_var_local (decl, ctx);
1440 break;
1441 }
1442 /* We don't need to copy const scalar vars back. */
55d6e7cd 1443 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1e8e9920 1444 goto do_private;
1445
1446 case OMP_CLAUSE_LASTPRIVATE:
1447 /* Let the corresponding firstprivate clause create
1448 the variable. */
1449 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1450 break;
1451 /* FALLTHRU */
1452
1453 case OMP_CLAUSE_FIRSTPRIVATE:
1454 case OMP_CLAUSE_REDUCTION:
3d483a94 1455 case OMP_CLAUSE_LINEAR:
1e8e9920 1456 decl = OMP_CLAUSE_DECL (c);
1457 do_private:
1458 if (is_variable_sized (decl))
1e8e9920 1459 {
fd6481cf 1460 if (is_task_ctx (ctx))
1461 install_var_field (decl, false, 1, ctx);
1462 break;
1463 }
1464 else if (is_taskreg_ctx (ctx))
1465 {
1466 bool global
1467 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
e8a588af 1468 by_ref = use_pointer_for_field (decl, NULL);
fd6481cf 1469
1470 if (is_task_ctx (ctx)
1471 && (global || by_ref || is_reference (decl)))
1472 {
1473 install_var_field (decl, false, 1, ctx);
1474 if (!global)
1475 install_var_field (decl, by_ref, 2, ctx);
1476 }
1477 else if (!global)
1478 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1479 }
1480 install_var_local (decl, ctx);
1481 break;
1482
1483 case OMP_CLAUSE_COPYPRIVATE:
1e8e9920 1484 case OMP_CLAUSE_COPYIN:
1485 decl = OMP_CLAUSE_DECL (c);
e8a588af 1486 by_ref = use_pointer_for_field (decl, NULL);
fd6481cf 1487 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1488 break;
1489
1490 case OMP_CLAUSE_DEFAULT:
1491 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1492 break;
1493
2169f33b 1494 case OMP_CLAUSE_FINAL:
1e8e9920 1495 case OMP_CLAUSE_IF:
1496 case OMP_CLAUSE_NUM_THREADS:
1497 case OMP_CLAUSE_SCHEDULE:
1498 if (ctx->outer)
75a70cf9 1499 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1e8e9920 1500 break;
1501
1502 case OMP_CLAUSE_NOWAIT:
1503 case OMP_CLAUSE_ORDERED:
fd6481cf 1504 case OMP_CLAUSE_COLLAPSE:
1505 case OMP_CLAUSE_UNTIED:
2169f33b 1506 case OMP_CLAUSE_MERGEABLE:
3d483a94 1507 case OMP_CLAUSE_SAFELEN:
1e8e9920 1508 break;
1509
1510 default:
1511 gcc_unreachable ();
1512 }
1513 }
1514
1515 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1516 {
55d6e7cd 1517 switch (OMP_CLAUSE_CODE (c))
1e8e9920 1518 {
1519 case OMP_CLAUSE_LASTPRIVATE:
1520 /* Let the corresponding firstprivate clause create
1521 the variable. */
75a70cf9 1522 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
fd6481cf 1523 scan_array_reductions = true;
1e8e9920 1524 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1525 break;
1526 /* FALLTHRU */
1527
1528 case OMP_CLAUSE_PRIVATE:
1529 case OMP_CLAUSE_FIRSTPRIVATE:
1530 case OMP_CLAUSE_REDUCTION:
3d483a94 1531 case OMP_CLAUSE_LINEAR:
1e8e9920 1532 decl = OMP_CLAUSE_DECL (c);
1533 if (is_variable_sized (decl))
1534 install_var_local (decl, ctx);
1535 fixup_remapped_decl (decl, ctx,
55d6e7cd 1536 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1e8e9920 1537 && OMP_CLAUSE_PRIVATE_DEBUG (c));
55d6e7cd 1538 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1e8e9920 1539 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1540 scan_array_reductions = true;
1541 break;
1542
1543 case OMP_CLAUSE_SHARED:
1544 decl = OMP_CLAUSE_DECL (c);
f49d7bb5 1545 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1546 fixup_remapped_decl (decl, ctx, false);
1e8e9920 1547 break;
1548
1549 case OMP_CLAUSE_COPYPRIVATE:
1550 case OMP_CLAUSE_COPYIN:
1551 case OMP_CLAUSE_DEFAULT:
1552 case OMP_CLAUSE_IF:
1553 case OMP_CLAUSE_NUM_THREADS:
1554 case OMP_CLAUSE_SCHEDULE:
1555 case OMP_CLAUSE_NOWAIT:
1556 case OMP_CLAUSE_ORDERED:
fd6481cf 1557 case OMP_CLAUSE_COLLAPSE:
1558 case OMP_CLAUSE_UNTIED:
2169f33b 1559 case OMP_CLAUSE_FINAL:
1560 case OMP_CLAUSE_MERGEABLE:
3d483a94 1561 case OMP_CLAUSE_SAFELEN:
1e8e9920 1562 break;
1563
1564 default:
1565 gcc_unreachable ();
1566 }
1567 }
1568
1569 if (scan_array_reductions)
1570 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
55d6e7cd 1571 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1e8e9920 1572 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1573 {
ab129075 1574 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1575 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1e8e9920 1576 }
fd6481cf 1577 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
75a70cf9 1578 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
ab129075 1579 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1e8e9920 1580}
1581
1582/* Create a new name for omp child function. Returns an identifier. */
1583
1584static GTY(()) unsigned int tmp_ompfn_id_num;
1585
1586static tree
fd6481cf 1587create_omp_child_function_name (bool task_copy)
1e8e9920 1588{
a70a5e2c 1589 return (clone_function_name (current_function_decl,
1590 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1e8e9920 1591}
1592
1593/* Build a decl for the omp child function. It'll not contain a body
1594 yet, just the bare decl. */
1595
1596static void
fd6481cf 1597create_omp_child_function (omp_context *ctx, bool task_copy)
1e8e9920 1598{
1599 tree decl, type, name, t;
1600
fd6481cf 1601 name = create_omp_child_function_name (task_copy);
1602 if (task_copy)
1603 type = build_function_type_list (void_type_node, ptr_type_node,
1604 ptr_type_node, NULL_TREE);
1605 else
1606 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1e8e9920 1607
e60a6f7b 1608 decl = build_decl (gimple_location (ctx->stmt),
1609 FUNCTION_DECL, name, type);
1e8e9920 1610
fd6481cf 1611 if (!task_copy)
1612 ctx->cb.dst_fn = decl;
1613 else
75a70cf9 1614 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1e8e9920 1615
1616 TREE_STATIC (decl) = 1;
1617 TREE_USED (decl) = 1;
1618 DECL_ARTIFICIAL (decl) = 1;
84bfaaeb 1619 DECL_NAMELESS (decl) = 1;
1e8e9920 1620 DECL_IGNORED_P (decl) = 0;
1621 TREE_PUBLIC (decl) = 0;
1622 DECL_UNINLINABLE (decl) = 1;
1623 DECL_EXTERNAL (decl) = 0;
1624 DECL_CONTEXT (decl) = NULL_TREE;
773c5ba7 1625 DECL_INITIAL (decl) = make_node (BLOCK);
1e8e9920 1626
e60a6f7b 1627 t = build_decl (DECL_SOURCE_LOCATION (decl),
1628 RESULT_DECL, NULL_TREE, void_type_node);
1e8e9920 1629 DECL_ARTIFICIAL (t) = 1;
1630 DECL_IGNORED_P (t) = 1;
8e5b4ed6 1631 DECL_CONTEXT (t) = decl;
1e8e9920 1632 DECL_RESULT (decl) = t;
1633
e60a6f7b 1634 t = build_decl (DECL_SOURCE_LOCATION (decl),
1635 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1e8e9920 1636 DECL_ARTIFICIAL (t) = 1;
84bfaaeb 1637 DECL_NAMELESS (t) = 1;
1e8e9920 1638 DECL_ARG_TYPE (t) = ptr_type_node;
773c5ba7 1639 DECL_CONTEXT (t) = current_function_decl;
1e8e9920 1640 TREE_USED (t) = 1;
1641 DECL_ARGUMENTS (decl) = t;
fd6481cf 1642 if (!task_copy)
1643 ctx->receiver_decl = t;
1644 else
1645 {
e60a6f7b 1646 t = build_decl (DECL_SOURCE_LOCATION (decl),
1647 PARM_DECL, get_identifier (".omp_data_o"),
fd6481cf 1648 ptr_type_node);
1649 DECL_ARTIFICIAL (t) = 1;
84bfaaeb 1650 DECL_NAMELESS (t) = 1;
fd6481cf 1651 DECL_ARG_TYPE (t) = ptr_type_node;
1652 DECL_CONTEXT (t) = current_function_decl;
1653 TREE_USED (t) = 1;
86f2ad37 1654 TREE_ADDRESSABLE (t) = 1;
1767a056 1655 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
fd6481cf 1656 DECL_ARGUMENTS (decl) = t;
1657 }
1e8e9920 1658
48e1416a 1659 /* Allocate memory for the function structure. The call to
773c5ba7 1660 allocate_struct_function clobbers CFUN, so we need to restore
1e8e9920 1661 it afterward. */
87d4aa85 1662 push_struct_function (decl);
75a70cf9 1663 cfun->function_end_locus = gimple_location (ctx->stmt);
87d4aa85 1664 pop_cfun ();
1e8e9920 1665}
1666
1e8e9920 1667/* Scan an OpenMP parallel directive. */
1668
1669static void
75a70cf9 1670scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1e8e9920 1671{
1672 omp_context *ctx;
1673 tree name;
75a70cf9 1674 gimple stmt = gsi_stmt (*gsi);
1e8e9920 1675
1676 /* Ignore parallel directives with empty bodies, unless there
1677 are copyin clauses. */
1678 if (optimize > 0
75a70cf9 1679 && empty_body_p (gimple_omp_body (stmt))
1680 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1681 OMP_CLAUSE_COPYIN) == NULL)
1e8e9920 1682 {
75a70cf9 1683 gsi_replace (gsi, gimple_build_nop (), false);
1e8e9920 1684 return;
1685 }
1686
75a70cf9 1687 ctx = new_omp_context (stmt, outer_ctx);
fd6481cf 1688 if (taskreg_nesting_level > 1)
773c5ba7 1689 ctx->is_nested = true;
1e8e9920 1690 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1e8e9920 1691 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1692 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1e8e9920 1693 name = create_tmp_var_name (".omp_data_s");
e60a6f7b 1694 name = build_decl (gimple_location (stmt),
1695 TYPE_DECL, name, ctx->record_type);
84bfaaeb 1696 DECL_ARTIFICIAL (name) = 1;
1697 DECL_NAMELESS (name) = 1;
1e8e9920 1698 TYPE_NAME (ctx->record_type) = name;
fd6481cf 1699 create_omp_child_function (ctx, false);
75a70cf9 1700 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1e8e9920 1701
75a70cf9 1702 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
ab129075 1703 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 1704
1705 if (TYPE_FIELDS (ctx->record_type) == NULL)
1706 ctx->record_type = ctx->receiver_decl = NULL;
1707 else
1708 {
1709 layout_type (ctx->record_type);
1710 fixup_child_record_type (ctx);
1711 }
1712}
1713
fd6481cf 1714/* Scan an OpenMP task directive. */
1715
1716static void
75a70cf9 1717scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
fd6481cf 1718{
1719 omp_context *ctx;
75a70cf9 1720 tree name, t;
1721 gimple stmt = gsi_stmt (*gsi);
389dd41b 1722 location_t loc = gimple_location (stmt);
fd6481cf 1723
1724 /* Ignore task directives with empty bodies. */
1725 if (optimize > 0
75a70cf9 1726 && empty_body_p (gimple_omp_body (stmt)))
fd6481cf 1727 {
75a70cf9 1728 gsi_replace (gsi, gimple_build_nop (), false);
fd6481cf 1729 return;
1730 }
1731
75a70cf9 1732 ctx = new_omp_context (stmt, outer_ctx);
fd6481cf 1733 if (taskreg_nesting_level > 1)
1734 ctx->is_nested = true;
1735 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1736 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1737 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1738 name = create_tmp_var_name (".omp_data_s");
e60a6f7b 1739 name = build_decl (gimple_location (stmt),
1740 TYPE_DECL, name, ctx->record_type);
84bfaaeb 1741 DECL_ARTIFICIAL (name) = 1;
1742 DECL_NAMELESS (name) = 1;
fd6481cf 1743 TYPE_NAME (ctx->record_type) = name;
1744 create_omp_child_function (ctx, false);
75a70cf9 1745 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
fd6481cf 1746
75a70cf9 1747 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
fd6481cf 1748
1749 if (ctx->srecord_type)
1750 {
1751 name = create_tmp_var_name (".omp_data_a");
e60a6f7b 1752 name = build_decl (gimple_location (stmt),
1753 TYPE_DECL, name, ctx->srecord_type);
84bfaaeb 1754 DECL_ARTIFICIAL (name) = 1;
1755 DECL_NAMELESS (name) = 1;
fd6481cf 1756 TYPE_NAME (ctx->srecord_type) = name;
1757 create_omp_child_function (ctx, true);
1758 }
1759
ab129075 1760 scan_omp (gimple_omp_body_ptr (stmt), ctx);
fd6481cf 1761
1762 if (TYPE_FIELDS (ctx->record_type) == NULL)
1763 {
1764 ctx->record_type = ctx->receiver_decl = NULL;
75a70cf9 1765 t = build_int_cst (long_integer_type_node, 0);
1766 gimple_omp_task_set_arg_size (stmt, t);
1767 t = build_int_cst (long_integer_type_node, 1);
1768 gimple_omp_task_set_arg_align (stmt, t);
fd6481cf 1769 }
1770 else
1771 {
1772 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1773 /* Move VLA fields to the end. */
1774 p = &TYPE_FIELDS (ctx->record_type);
1775 while (*p)
1776 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1777 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1778 {
1779 *q = *p;
1780 *p = TREE_CHAIN (*p);
1781 TREE_CHAIN (*q) = NULL_TREE;
1782 q = &TREE_CHAIN (*q);
1783 }
1784 else
1767a056 1785 p = &DECL_CHAIN (*p);
fd6481cf 1786 *p = vla_fields;
1787 layout_type (ctx->record_type);
1788 fixup_child_record_type (ctx);
1789 if (ctx->srecord_type)
1790 layout_type (ctx->srecord_type);
389dd41b 1791 t = fold_convert_loc (loc, long_integer_type_node,
fd6481cf 1792 TYPE_SIZE_UNIT (ctx->record_type));
75a70cf9 1793 gimple_omp_task_set_arg_size (stmt, t);
1794 t = build_int_cst (long_integer_type_node,
fd6481cf 1795 TYPE_ALIGN_UNIT (ctx->record_type));
75a70cf9 1796 gimple_omp_task_set_arg_align (stmt, t);
fd6481cf 1797 }
1798}
1799
1e8e9920 1800
773c5ba7 1801/* Scan an OpenMP loop directive. */
1e8e9920 1802
1803static void
75a70cf9 1804scan_omp_for (gimple stmt, omp_context *outer_ctx)
1e8e9920 1805{
773c5ba7 1806 omp_context *ctx;
75a70cf9 1807 size_t i;
1e8e9920 1808
773c5ba7 1809 ctx = new_omp_context (stmt, outer_ctx);
1e8e9920 1810
75a70cf9 1811 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1e8e9920 1812
ab129075 1813 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
75a70cf9 1814 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
fd6481cf 1815 {
75a70cf9 1816 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1817 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1818 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1819 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
fd6481cf 1820 }
ab129075 1821 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 1822}
1823
1824/* Scan an OpenMP sections directive. */
1825
1826static void
75a70cf9 1827scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1e8e9920 1828{
1e8e9920 1829 omp_context *ctx;
1830
1831 ctx = new_omp_context (stmt, outer_ctx);
75a70cf9 1832 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
ab129075 1833 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 1834}
1835
1836/* Scan an OpenMP single directive. */
1837
1838static void
75a70cf9 1839scan_omp_single (gimple stmt, omp_context *outer_ctx)
1e8e9920 1840{
1e8e9920 1841 omp_context *ctx;
1842 tree name;
1843
1844 ctx = new_omp_context (stmt, outer_ctx);
1845 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1846 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1847 name = create_tmp_var_name (".omp_copy_s");
e60a6f7b 1848 name = build_decl (gimple_location (stmt),
1849 TYPE_DECL, name, ctx->record_type);
1e8e9920 1850 TYPE_NAME (ctx->record_type) = name;
1851
75a70cf9 1852 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
ab129075 1853 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 1854
1855 if (TYPE_FIELDS (ctx->record_type) == NULL)
1856 ctx->record_type = NULL;
1857 else
1858 layout_type (ctx->record_type);
1859}
1860
1e8e9920 1861
c1d127dd 1862/* Check OpenMP nesting restrictions. */
ab129075 1863static bool
1864check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
c1d127dd 1865{
3d483a94 1866 if (ctx != NULL)
1867 {
1868 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1869 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
1870 {
1871 error_at (gimple_location (stmt),
1872 "OpenMP constructs may not be nested inside simd region");
1873 return false;
1874 }
1875 }
75a70cf9 1876 switch (gimple_code (stmt))
c1d127dd 1877 {
75a70cf9 1878 case GIMPLE_OMP_FOR:
3d483a94 1879 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_SIMD)
1880 return true;
1881 /* FALLTHRU */
75a70cf9 1882 case GIMPLE_OMP_SECTIONS:
1883 case GIMPLE_OMP_SINGLE:
1884 case GIMPLE_CALL:
c1d127dd 1885 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1886 switch (gimple_code (ctx->stmt))
c1d127dd 1887 {
75a70cf9 1888 case GIMPLE_OMP_FOR:
1889 case GIMPLE_OMP_SECTIONS:
1890 case GIMPLE_OMP_SINGLE:
1891 case GIMPLE_OMP_ORDERED:
1892 case GIMPLE_OMP_MASTER:
1893 case GIMPLE_OMP_TASK:
1894 if (is_gimple_call (stmt))
fd6481cf 1895 {
ab129075 1896 error_at (gimple_location (stmt),
1897 "barrier region may not be closely nested inside "
1898 "of work-sharing, critical, ordered, master or "
1899 "explicit task region");
1900 return false;
fd6481cf 1901 }
ab129075 1902 error_at (gimple_location (stmt),
1903 "work-sharing region may not be closely nested inside "
1904 "of work-sharing, critical, ordered, master or explicit "
1905 "task region");
1906 return false;
75a70cf9 1907 case GIMPLE_OMP_PARALLEL:
ab129075 1908 return true;
c1d127dd 1909 default:
1910 break;
1911 }
1912 break;
75a70cf9 1913 case GIMPLE_OMP_MASTER:
c1d127dd 1914 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1915 switch (gimple_code (ctx->stmt))
c1d127dd 1916 {
75a70cf9 1917 case GIMPLE_OMP_FOR:
1918 case GIMPLE_OMP_SECTIONS:
1919 case GIMPLE_OMP_SINGLE:
1920 case GIMPLE_OMP_TASK:
ab129075 1921 error_at (gimple_location (stmt),
1922 "master region may not be closely nested inside "
1923 "of work-sharing or explicit task region");
1924 return false;
75a70cf9 1925 case GIMPLE_OMP_PARALLEL:
ab129075 1926 return true;
c1d127dd 1927 default:
1928 break;
1929 }
1930 break;
75a70cf9 1931 case GIMPLE_OMP_ORDERED:
c1d127dd 1932 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1933 switch (gimple_code (ctx->stmt))
c1d127dd 1934 {
75a70cf9 1935 case GIMPLE_OMP_CRITICAL:
1936 case GIMPLE_OMP_TASK:
ab129075 1937 error_at (gimple_location (stmt),
1938 "ordered region may not be closely nested inside "
1939 "of critical or explicit task region");
1940 return false;
75a70cf9 1941 case GIMPLE_OMP_FOR:
1942 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
c1d127dd 1943 OMP_CLAUSE_ORDERED) == NULL)
ab129075 1944 {
1945 error_at (gimple_location (stmt),
1946 "ordered region must be closely nested inside "
c1d127dd 1947 "a loop region with an ordered clause");
ab129075 1948 return false;
1949 }
1950 return true;
75a70cf9 1951 case GIMPLE_OMP_PARALLEL:
ab129075 1952 return true;
c1d127dd 1953 default:
1954 break;
1955 }
1956 break;
75a70cf9 1957 case GIMPLE_OMP_CRITICAL:
c1d127dd 1958 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1959 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1960 && (gimple_omp_critical_name (stmt)
1961 == gimple_omp_critical_name (ctx->stmt)))
c1d127dd 1962 {
ab129075 1963 error_at (gimple_location (stmt),
1964 "critical region may not be nested inside a critical "
1965 "region with the same name");
1966 return false;
c1d127dd 1967 }
1968 break;
1969 default:
1970 break;
1971 }
ab129075 1972 return true;
c1d127dd 1973}
1974
1975
75a70cf9 1976/* Helper function scan_omp.
1977
1978 Callback for walk_tree or operators in walk_gimple_stmt used to
1979 scan for OpenMP directives in TP. */
1e8e9920 1980
1981static tree
75a70cf9 1982scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1e8e9920 1983{
4077bf7a 1984 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1985 omp_context *ctx = (omp_context *) wi->info;
1e8e9920 1986 tree t = *tp;
1987
75a70cf9 1988 switch (TREE_CODE (t))
1989 {
1990 case VAR_DECL:
1991 case PARM_DECL:
1992 case LABEL_DECL:
1993 case RESULT_DECL:
1994 if (ctx)
1995 *tp = remap_decl (t, &ctx->cb);
1996 break;
1997
1998 default:
1999 if (ctx && TYPE_P (t))
2000 *tp = remap_type (t, &ctx->cb);
2001 else if (!DECL_P (t))
7cf869dd 2002 {
2003 *walk_subtrees = 1;
2004 if (ctx)
182cf5a9 2005 {
2006 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
2007 if (tem != TREE_TYPE (t))
2008 {
2009 if (TREE_CODE (t) == INTEGER_CST)
2010 *tp = build_int_cst_wide (tem,
2011 TREE_INT_CST_LOW (t),
2012 TREE_INT_CST_HIGH (t));
2013 else
2014 TREE_TYPE (t) = tem;
2015 }
2016 }
7cf869dd 2017 }
75a70cf9 2018 break;
2019 }
2020
2021 return NULL_TREE;
2022}
2023
2024
2025/* Helper function for scan_omp.
2026
2027 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2028 the current statement in GSI. */
2029
2030static tree
2031scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
2032 struct walk_stmt_info *wi)
2033{
2034 gimple stmt = gsi_stmt (*gsi);
2035 omp_context *ctx = (omp_context *) wi->info;
2036
2037 if (gimple_has_location (stmt))
2038 input_location = gimple_location (stmt);
1e8e9920 2039
c1d127dd 2040 /* Check the OpenMP nesting restrictions. */
fd6481cf 2041 if (ctx != NULL)
2042 {
ab129075 2043 bool remove = false;
75a70cf9 2044 if (is_gimple_omp (stmt))
ab129075 2045 remove = !check_omp_nesting_restrictions (stmt, ctx);
75a70cf9 2046 else if (is_gimple_call (stmt))
fd6481cf 2047 {
75a70cf9 2048 tree fndecl = gimple_call_fndecl (stmt);
fd6481cf 2049 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2050 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
ab129075 2051 remove = !check_omp_nesting_restrictions (stmt, ctx);
2052 }
2053 if (remove)
2054 {
2055 stmt = gimple_build_nop ();
2056 gsi_replace (gsi, stmt, false);
fd6481cf 2057 }
2058 }
c1d127dd 2059
75a70cf9 2060 *handled_ops_p = true;
2061
2062 switch (gimple_code (stmt))
1e8e9920 2063 {
75a70cf9 2064 case GIMPLE_OMP_PARALLEL:
fd6481cf 2065 taskreg_nesting_level++;
75a70cf9 2066 scan_omp_parallel (gsi, ctx);
fd6481cf 2067 taskreg_nesting_level--;
2068 break;
2069
75a70cf9 2070 case GIMPLE_OMP_TASK:
fd6481cf 2071 taskreg_nesting_level++;
75a70cf9 2072 scan_omp_task (gsi, ctx);
fd6481cf 2073 taskreg_nesting_level--;
1e8e9920 2074 break;
2075
75a70cf9 2076 case GIMPLE_OMP_FOR:
2077 scan_omp_for (stmt, ctx);
1e8e9920 2078 break;
2079
75a70cf9 2080 case GIMPLE_OMP_SECTIONS:
2081 scan_omp_sections (stmt, ctx);
1e8e9920 2082 break;
2083
75a70cf9 2084 case GIMPLE_OMP_SINGLE:
2085 scan_omp_single (stmt, ctx);
1e8e9920 2086 break;
2087
75a70cf9 2088 case GIMPLE_OMP_SECTION:
2089 case GIMPLE_OMP_MASTER:
2090 case GIMPLE_OMP_ORDERED:
2091 case GIMPLE_OMP_CRITICAL:
2092 ctx = new_omp_context (stmt, ctx);
ab129075 2093 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 2094 break;
2095
75a70cf9 2096 case GIMPLE_BIND:
1e8e9920 2097 {
2098 tree var;
1e8e9920 2099
75a70cf9 2100 *handled_ops_p = false;
2101 if (ctx)
1767a056 2102 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
75a70cf9 2103 insert_decl_map (&ctx->cb, var, var);
1e8e9920 2104 }
2105 break;
1e8e9920 2106 default:
75a70cf9 2107 *handled_ops_p = false;
1e8e9920 2108 break;
2109 }
2110
2111 return NULL_TREE;
2112}
2113
2114
75a70cf9 2115/* Scan all the statements starting at the current statement. CTX
2116 contains context information about the OpenMP directives and
2117 clauses found during the scan. */
1e8e9920 2118
2119static void
ab129075 2120scan_omp (gimple_seq *body_p, omp_context *ctx)
1e8e9920 2121{
2122 location_t saved_location;
2123 struct walk_stmt_info wi;
2124
2125 memset (&wi, 0, sizeof (wi));
1e8e9920 2126 wi.info = ctx;
1e8e9920 2127 wi.want_locations = true;
2128
2129 saved_location = input_location;
ab129075 2130 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
1e8e9920 2131 input_location = saved_location;
2132}
2133\f
2134/* Re-gimplification and code generation routines. */
2135
2136/* Build a call to GOMP_barrier. */
2137
79acaae1 2138static tree
2139build_omp_barrier (void)
1e8e9920 2140{
b9a16870 2141 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
1e8e9920 2142}
2143
2144/* If a context was created for STMT when it was scanned, return it. */
2145
2146static omp_context *
75a70cf9 2147maybe_lookup_ctx (gimple stmt)
1e8e9920 2148{
2149 splay_tree_node n;
2150 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2151 return n ? (omp_context *) n->value : NULL;
2152}
2153
773c5ba7 2154
2155/* Find the mapping for DECL in CTX or the immediately enclosing
2156 context that has a mapping for DECL.
2157
2158 If CTX is a nested parallel directive, we may have to use the decl
2159 mappings created in CTX's parent context. Suppose that we have the
2160 following parallel nesting (variable UIDs showed for clarity):
2161
2162 iD.1562 = 0;
2163 #omp parallel shared(iD.1562) -> outer parallel
2164 iD.1562 = iD.1562 + 1;
2165
2166 #omp parallel shared (iD.1562) -> inner parallel
2167 iD.1562 = iD.1562 - 1;
2168
2169 Each parallel structure will create a distinct .omp_data_s structure
2170 for copying iD.1562 in/out of the directive:
2171
2172 outer parallel .omp_data_s.1.i -> iD.1562
2173 inner parallel .omp_data_s.2.i -> iD.1562
2174
2175 A shared variable mapping will produce a copy-out operation before
2176 the parallel directive and a copy-in operation after it. So, in
2177 this case we would have:
2178
2179 iD.1562 = 0;
2180 .omp_data_o.1.i = iD.1562;
2181 #omp parallel shared(iD.1562) -> outer parallel
2182 .omp_data_i.1 = &.omp_data_o.1
2183 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2184
2185 .omp_data_o.2.i = iD.1562; -> **
2186 #omp parallel shared(iD.1562) -> inner parallel
2187 .omp_data_i.2 = &.omp_data_o.2
2188 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2189
2190
2191 ** This is a problem. The symbol iD.1562 cannot be referenced
2192 inside the body of the outer parallel region. But since we are
2193 emitting this copy operation while expanding the inner parallel
2194 directive, we need to access the CTX structure of the outer
2195 parallel directive to get the correct mapping:
2196
2197 .omp_data_o.2.i = .omp_data_i.1->i
2198
2199 Since there may be other workshare or parallel directives enclosing
2200 the parallel directive, it may be necessary to walk up the context
2201 parent chain. This is not a problem in general because nested
2202 parallelism happens only rarely. */
2203
2204static tree
2205lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2206{
2207 tree t;
2208 omp_context *up;
2209
773c5ba7 2210 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2211 t = maybe_lookup_decl (decl, up);
2212
87b31375 2213 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
773c5ba7 2214
c37594c7 2215 return t ? t : decl;
773c5ba7 2216}
2217
2218
f49d7bb5 2219/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2220 in outer contexts. */
2221
2222static tree
2223maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2224{
2225 tree t = NULL;
2226 omp_context *up;
2227
87b31375 2228 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2229 t = maybe_lookup_decl (decl, up);
f49d7bb5 2230
2231 return t ? t : decl;
2232}
2233
2234
1e8e9920 2235/* Construct the initialization value for reduction CLAUSE. */
2236
2237tree
2238omp_reduction_init (tree clause, tree type)
2239{
389dd41b 2240 location_t loc = OMP_CLAUSE_LOCATION (clause);
1e8e9920 2241 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2242 {
2243 case PLUS_EXPR:
2244 case MINUS_EXPR:
2245 case BIT_IOR_EXPR:
2246 case BIT_XOR_EXPR:
2247 case TRUTH_OR_EXPR:
2248 case TRUTH_ORIF_EXPR:
2249 case TRUTH_XOR_EXPR:
2250 case NE_EXPR:
385f3f36 2251 return build_zero_cst (type);
1e8e9920 2252
2253 case MULT_EXPR:
2254 case TRUTH_AND_EXPR:
2255 case TRUTH_ANDIF_EXPR:
2256 case EQ_EXPR:
389dd41b 2257 return fold_convert_loc (loc, type, integer_one_node);
1e8e9920 2258
2259 case BIT_AND_EXPR:
389dd41b 2260 return fold_convert_loc (loc, type, integer_minus_one_node);
1e8e9920 2261
2262 case MAX_EXPR:
2263 if (SCALAR_FLOAT_TYPE_P (type))
2264 {
2265 REAL_VALUE_TYPE max, min;
2266 if (HONOR_INFINITIES (TYPE_MODE (type)))
2267 {
2268 real_inf (&max);
2269 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2270 }
2271 else
2272 real_maxval (&min, 1, TYPE_MODE (type));
2273 return build_real (type, min);
2274 }
2275 else
2276 {
2277 gcc_assert (INTEGRAL_TYPE_P (type));
2278 return TYPE_MIN_VALUE (type);
2279 }
2280
2281 case MIN_EXPR:
2282 if (SCALAR_FLOAT_TYPE_P (type))
2283 {
2284 REAL_VALUE_TYPE max;
2285 if (HONOR_INFINITIES (TYPE_MODE (type)))
2286 real_inf (&max);
2287 else
2288 real_maxval (&max, 0, TYPE_MODE (type));
2289 return build_real (type, max);
2290 }
2291 else
2292 {
2293 gcc_assert (INTEGRAL_TYPE_P (type));
2294 return TYPE_MAX_VALUE (type);
2295 }
2296
2297 default:
2298 gcc_unreachable ();
2299 }
2300}
2301
3d483a94 2302/* Return maximum possible vectorization factor for the target. */
2303
2304static int
2305omp_max_vf (void)
2306{
2307 if (!optimize
2308 || optimize_debug
043115ec 2309 || (!flag_tree_loop_vectorize
2310 && (global_options_set.x_flag_tree_loop_vectorize
2311 || global_options_set.x_flag_tree_vectorize)))
3d483a94 2312 return 1;
2313
2314 int vs = targetm.vectorize.autovectorize_vector_sizes ();
2315 if (vs)
2316 {
2317 vs = 1 << floor_log2 (vs);
2318 return vs;
2319 }
2320 enum machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
2321 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
2322 return GET_MODE_NUNITS (vqimode);
2323 return 1;
2324}
2325
2326/* Helper function of lower_rec_input_clauses, used for #pragma omp simd
2327 privatization. */
2328
2329static bool
2330lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
2331 tree &idx, tree &lane, tree &ivar, tree &lvar)
2332{
2333 if (max_vf == 0)
2334 {
2335 max_vf = omp_max_vf ();
2336 if (max_vf > 1)
2337 {
2338 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2339 OMP_CLAUSE_SAFELEN);
2340 if (c
2341 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c), max_vf) == -1)
2342 max_vf = tree_low_cst (OMP_CLAUSE_SAFELEN_EXPR (c), 0);
2343 }
2344 if (max_vf > 1)
2345 {
2346 idx = create_tmp_var (unsigned_type_node, NULL);
2347 lane = create_tmp_var (unsigned_type_node, NULL);
2348 }
2349 }
2350 if (max_vf == 1)
2351 return false;
2352
2353 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
2354 tree avar = create_tmp_var_raw (atype, NULL);
2355 if (TREE_ADDRESSABLE (new_var))
2356 TREE_ADDRESSABLE (avar) = 1;
2357 DECL_ATTRIBUTES (avar)
2358 = tree_cons (get_identifier ("omp simd array"), NULL,
2359 DECL_ATTRIBUTES (avar));
2360 gimple_add_tmp_var (avar);
2361 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
2362 NULL_TREE, NULL_TREE);
2363 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
2364 NULL_TREE, NULL_TREE);
2365 SET_DECL_VALUE_EXPR (new_var, lvar);
2366 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2367 return true;
2368}
2369
1e8e9920 2370/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2371 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2372 private variables. Initialization statements go in ILIST, while calls
2373 to destructors go in DLIST. */
2374
2375static void
75a70cf9 2376lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
1e4afe3c 2377 omp_context *ctx)
1e8e9920 2378{
c2f47e15 2379 tree c, dtor, copyin_seq, x, ptr;
1e8e9920 2380 bool copyin_by_ref = false;
f49d7bb5 2381 bool lastprivate_firstprivate = false;
1e8e9920 2382 int pass;
3d483a94 2383 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2384 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD);
2385 int max_vf = 0;
2386 tree lane = NULL_TREE, idx = NULL_TREE;
2387 tree ivar = NULL_TREE, lvar = NULL_TREE;
2388 gimple_seq llist[2] = { NULL, NULL };
1e8e9920 2389
1e8e9920 2390 copyin_seq = NULL;
2391
3d483a94 2392 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
2393 with data sharing clauses referencing variable sized vars. That
2394 is unnecessarily hard to support and very unlikely to result in
2395 vectorized code anyway. */
2396 if (is_simd)
2397 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2398 switch (OMP_CLAUSE_CODE (c))
2399 {
2400 case OMP_CLAUSE_REDUCTION:
2401 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2402 max_vf = 1;
2403 /* FALLTHRU */
2404 case OMP_CLAUSE_PRIVATE:
2405 case OMP_CLAUSE_FIRSTPRIVATE:
2406 case OMP_CLAUSE_LASTPRIVATE:
2407 case OMP_CLAUSE_LINEAR:
2408 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
2409 max_vf = 1;
2410 break;
2411 default:
2412 continue;
2413 }
2414
1e8e9920 2415 /* Do all the fixed sized types in the first pass, and the variable sized
2416 types in the second pass. This makes sure that the scalar arguments to
48e1416a 2417 the variable sized types are processed before we use them in the
1e8e9920 2418 variable sized operations. */
2419 for (pass = 0; pass < 2; ++pass)
2420 {
2421 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2422 {
55d6e7cd 2423 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
1e8e9920 2424 tree var, new_var;
2425 bool by_ref;
389dd41b 2426 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2427
2428 switch (c_kind)
2429 {
2430 case OMP_CLAUSE_PRIVATE:
2431 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2432 continue;
2433 break;
2434 case OMP_CLAUSE_SHARED:
f49d7bb5 2435 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2436 {
2437 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2438 continue;
2439 }
1e8e9920 2440 case OMP_CLAUSE_FIRSTPRIVATE:
1e8e9920 2441 case OMP_CLAUSE_COPYIN:
2442 case OMP_CLAUSE_REDUCTION:
2443 break;
3d483a94 2444 case OMP_CLAUSE_LINEAR:
2445 break;
df2c34fc 2446 case OMP_CLAUSE_LASTPRIVATE:
f49d7bb5 2447 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2448 {
2449 lastprivate_firstprivate = true;
2450 if (pass != 0)
2451 continue;
2452 }
df2c34fc 2453 break;
1e8e9920 2454 default:
2455 continue;
2456 }
2457
2458 new_var = var = OMP_CLAUSE_DECL (c);
2459 if (c_kind != OMP_CLAUSE_COPYIN)
2460 new_var = lookup_decl (var, ctx);
2461
2462 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2463 {
2464 if (pass != 0)
2465 continue;
2466 }
1e8e9920 2467 else if (is_variable_sized (var))
2468 {
773c5ba7 2469 /* For variable sized types, we need to allocate the
2470 actual storage here. Call alloca and store the
2471 result in the pointer decl that we created elsewhere. */
1e8e9920 2472 if (pass == 0)
2473 continue;
2474
fd6481cf 2475 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2476 {
75a70cf9 2477 gimple stmt;
b9a16870 2478 tree tmp, atmp;
75a70cf9 2479
fd6481cf 2480 ptr = DECL_VALUE_EXPR (new_var);
2481 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2482 ptr = TREE_OPERAND (ptr, 0);
2483 gcc_assert (DECL_P (ptr));
2484 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
75a70cf9 2485
2486 /* void *tmp = __builtin_alloca */
b9a16870 2487 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2488 stmt = gimple_build_call (atmp, 1, x);
75a70cf9 2489 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2490 gimple_add_tmp_var (tmp);
2491 gimple_call_set_lhs (stmt, tmp);
2492
2493 gimple_seq_add_stmt (ilist, stmt);
2494
389dd41b 2495 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
75a70cf9 2496 gimplify_assign (ptr, x, ilist);
fd6481cf 2497 }
1e8e9920 2498 }
1e8e9920 2499 else if (is_reference (var))
2500 {
773c5ba7 2501 /* For references that are being privatized for Fortran,
2502 allocate new backing storage for the new pointer
2503 variable. This allows us to avoid changing all the
2504 code that expects a pointer to something that expects
2505 a direct variable. Note that this doesn't apply to
2506 C++, since reference types are disallowed in data
df2c34fc 2507 sharing clauses there, except for NRV optimized
2508 return values. */
1e8e9920 2509 if (pass == 0)
2510 continue;
2511
2512 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
fd6481cf 2513 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2514 {
2515 x = build_receiver_ref (var, false, ctx);
389dd41b 2516 x = build_fold_addr_expr_loc (clause_loc, x);
fd6481cf 2517 }
2518 else if (TREE_CONSTANT (x))
1e8e9920 2519 {
2520 const char *name = NULL;
2521 if (DECL_NAME (var))
2522 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2523
df2c34fc 2524 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2525 name);
2526 gimple_add_tmp_var (x);
86f2ad37 2527 TREE_ADDRESSABLE (x) = 1;
389dd41b 2528 x = build_fold_addr_expr_loc (clause_loc, x);
1e8e9920 2529 }
2530 else
2531 {
b9a16870 2532 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2533 x = build_call_expr_loc (clause_loc, atmp, 1, x);
1e8e9920 2534 }
2535
389dd41b 2536 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
75a70cf9 2537 gimplify_assign (new_var, x, ilist);
1e8e9920 2538
182cf5a9 2539 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
1e8e9920 2540 }
2541 else if (c_kind == OMP_CLAUSE_REDUCTION
2542 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2543 {
2544 if (pass == 0)
2545 continue;
2546 }
2547 else if (pass != 0)
2548 continue;
2549
55d6e7cd 2550 switch (OMP_CLAUSE_CODE (c))
1e8e9920 2551 {
2552 case OMP_CLAUSE_SHARED:
f49d7bb5 2553 /* Shared global vars are just accessed directly. */
2554 if (is_global_var (new_var))
2555 break;
1e8e9920 2556 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2557 needs to be delayed until after fixup_child_record_type so
2558 that we get the correct type during the dereference. */
e8a588af 2559 by_ref = use_pointer_for_field (var, ctx);
1e8e9920 2560 x = build_receiver_ref (var, by_ref, ctx);
2561 SET_DECL_VALUE_EXPR (new_var, x);
2562 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2563
2564 /* ??? If VAR is not passed by reference, and the variable
2565 hasn't been initialized yet, then we'll get a warning for
2566 the store into the omp_data_s structure. Ideally, we'd be
48e1416a 2567 able to notice this and not store anything at all, but
1e8e9920 2568 we're generating code too early. Suppress the warning. */
2569 if (!by_ref)
2570 TREE_NO_WARNING (var) = 1;
2571 break;
2572
2573 case OMP_CLAUSE_LASTPRIVATE:
2574 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2575 break;
2576 /* FALLTHRU */
2577
2578 case OMP_CLAUSE_PRIVATE:
fd6481cf 2579 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2580 x = build_outer_var_ref (var, ctx);
2581 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2582 {
2583 if (is_task_ctx (ctx))
2584 x = build_receiver_ref (var, false, ctx);
2585 else
2586 x = build_outer_var_ref (var, ctx);
2587 }
2588 else
2589 x = NULL;
3d483a94 2590 do_private:
fd6481cf 2591 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
3d483a94 2592 if (is_simd)
2593 {
2594 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
2595 if ((TREE_ADDRESSABLE (new_var) || x || y
2596 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2597 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
2598 idx, lane, ivar, lvar))
2599 {
2600 if (x)
2601 x = lang_hooks.decls.omp_clause_default_ctor
2602 (c, unshare_expr (ivar), x);
2603 if (x)
2604 gimplify_and_add (x, &llist[0]);
2605 if (y)
2606 {
2607 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
2608 if (y)
2609 {
2610 gimple_seq tseq = NULL;
2611
2612 dtor = y;
2613 gimplify_stmt (&dtor, &tseq);
2614 gimple_seq_add_seq (&llist[1], tseq);
2615 }
2616 }
2617 break;
2618 }
2619 }
1e8e9920 2620 if (x)
2621 gimplify_and_add (x, ilist);
2622 /* FALLTHRU */
2623
2624 do_dtor:
2625 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2626 if (x)
2627 {
75a70cf9 2628 gimple_seq tseq = NULL;
2629
1e8e9920 2630 dtor = x;
75a70cf9 2631 gimplify_stmt (&dtor, &tseq);
e3a19533 2632 gimple_seq_add_seq (dlist, tseq);
1e8e9920 2633 }
2634 break;
2635
3d483a94 2636 case OMP_CLAUSE_LINEAR:
2637 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
2638 goto do_firstprivate;
2639 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
2640 x = NULL;
2641 else
2642 x = build_outer_var_ref (var, ctx);
2643 goto do_private;
2644
1e8e9920 2645 case OMP_CLAUSE_FIRSTPRIVATE:
fd6481cf 2646 if (is_task_ctx (ctx))
2647 {
2648 if (is_reference (var) || is_variable_sized (var))
2649 goto do_dtor;
2650 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2651 ctx))
2652 || use_pointer_for_field (var, NULL))
2653 {
2654 x = build_receiver_ref (var, false, ctx);
2655 SET_DECL_VALUE_EXPR (new_var, x);
2656 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2657 goto do_dtor;
2658 }
2659 }
3d483a94 2660 do_firstprivate:
1e8e9920 2661 x = build_outer_var_ref (var, ctx);
3d483a94 2662 if (is_simd)
2663 {
2664 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
2665 || TREE_ADDRESSABLE (new_var))
2666 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
2667 idx, lane, ivar, lvar))
2668 {
2669 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
2670 {
2671 tree iv = create_tmp_var (TREE_TYPE (new_var), NULL);
2672 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
2673 gimplify_and_add (x, ilist);
2674 gimple_stmt_iterator gsi
2675 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
2676 gimple g
2677 = gimple_build_assign (unshare_expr (lvar), iv);
2678 gsi_insert_before_without_update (&gsi, g,
2679 GSI_SAME_STMT);
2680 tree stept = POINTER_TYPE_P (TREE_TYPE (x))
2681 ? sizetype : TREE_TYPE (x);
2682 tree t = fold_convert (stept,
2683 OMP_CLAUSE_LINEAR_STEP (c));
2684 enum tree_code code = PLUS_EXPR;
2685 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
2686 code = POINTER_PLUS_EXPR;
2687 g = gimple_build_assign_with_ops (code, iv, iv, t);
2688 gsi_insert_before_without_update (&gsi, g,
2689 GSI_SAME_STMT);
2690 break;
2691 }
2692 x = lang_hooks.decls.omp_clause_copy_ctor
2693 (c, unshare_expr (ivar), x);
2694 gimplify_and_add (x, &llist[0]);
2695 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
2696 if (x)
2697 {
2698 gimple_seq tseq = NULL;
2699
2700 dtor = x;
2701 gimplify_stmt (&dtor, &tseq);
2702 gimple_seq_add_seq (&llist[1], tseq);
2703 }
2704 break;
2705 }
2706 }
1e8e9920 2707 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2708 gimplify_and_add (x, ilist);
2709 goto do_dtor;
1e8e9920 2710
2711 case OMP_CLAUSE_COPYIN:
e8a588af 2712 by_ref = use_pointer_for_field (var, NULL);
1e8e9920 2713 x = build_receiver_ref (var, by_ref, ctx);
2714 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2715 append_to_statement_list (x, &copyin_seq);
2716 copyin_by_ref |= by_ref;
2717 break;
2718
2719 case OMP_CLAUSE_REDUCTION:
2720 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2721 {
fd6481cf 2722 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2723 x = build_outer_var_ref (var, ctx);
2724
3d483a94 2725 /* FIXME: Not handled yet. */
2726 gcc_assert (!is_simd);
fd6481cf 2727 if (is_reference (var))
389dd41b 2728 x = build_fold_addr_expr_loc (clause_loc, x);
fd6481cf 2729 SET_DECL_VALUE_EXPR (placeholder, x);
2730 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
e3a19533 2731 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
75a70cf9 2732 gimple_seq_add_seq (ilist,
2733 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2734 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
fd6481cf 2735 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
1e8e9920 2736 }
2737 else
2738 {
2739 x = omp_reduction_init (c, TREE_TYPE (new_var));
2740 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
3d483a94 2741 if (is_simd
2742 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
2743 idx, lane, ivar, lvar))
2744 {
2745 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
2746 tree ref = build_outer_var_ref (var, ctx);
2747
2748 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
2749
2750 /* reduction(-:var) sums up the partial results, so it
2751 acts identically to reduction(+:var). */
2752 if (code == MINUS_EXPR)
2753 code = PLUS_EXPR;
2754
2755 x = build2 (code, TREE_TYPE (ref), ref, ivar);
2756 ref = build_outer_var_ref (var, ctx);
2757 gimplify_assign (ref, x, &llist[1]);
2758 }
2759 else
2760 {
2761 gimplify_assign (new_var, x, ilist);
2762 if (is_simd)
2763 gimplify_assign (build_outer_var_ref (var, ctx),
2764 new_var, dlist);
2765 }
1e8e9920 2766 }
2767 break;
2768
2769 default:
2770 gcc_unreachable ();
2771 }
2772 }
2773 }
2774
3d483a94 2775 if (lane)
2776 {
2777 tree uid = create_tmp_var (ptr_type_node, "simduid");
2778 gimple g
2779 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
2780 gimple_call_set_lhs (g, lane);
2781 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
2782 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
2783 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
2784 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
2785 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
2786 gimple_omp_for_set_clauses (ctx->stmt, c);
2787 g = gimple_build_assign_with_ops (INTEGER_CST, lane,
2788 build_int_cst (unsigned_type_node, 0),
2789 NULL_TREE);
2790 gimple_seq_add_stmt (ilist, g);
2791 for (int i = 0; i < 2; i++)
2792 if (llist[i])
2793 {
2794 tree vf = create_tmp_var (unsigned_type_node, NULL);
2795 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
2796 gimple_call_set_lhs (g, vf);
2797 gimple_seq *seq = i == 0 ? ilist : dlist;
2798 gimple_seq_add_stmt (seq, g);
2799 tree t = build_int_cst (unsigned_type_node, 0);
2800 g = gimple_build_assign_with_ops (INTEGER_CST, idx, t, NULL_TREE);
2801 gimple_seq_add_stmt (seq, g);
2802 tree body = create_artificial_label (UNKNOWN_LOCATION);
2803 tree header = create_artificial_label (UNKNOWN_LOCATION);
2804 tree end = create_artificial_label (UNKNOWN_LOCATION);
2805 gimple_seq_add_stmt (seq, gimple_build_goto (header));
2806 gimple_seq_add_stmt (seq, gimple_build_label (body));
2807 gimple_seq_add_seq (seq, llist[i]);
2808 t = build_int_cst (unsigned_type_node, 1);
2809 g = gimple_build_assign_with_ops (PLUS_EXPR, idx, idx, t);
2810 gimple_seq_add_stmt (seq, g);
2811 gimple_seq_add_stmt (seq, gimple_build_label (header));
2812 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
2813 gimple_seq_add_stmt (seq, g);
2814 gimple_seq_add_stmt (seq, gimple_build_label (end));
2815 }
2816 }
2817
1e8e9920 2818 /* The copyin sequence is not to be executed by the main thread, since
2819 that would result in self-copies. Perhaps not visible to scalars,
2820 but it certainly is to C++ operator=. */
2821 if (copyin_seq)
2822 {
b9a16870 2823 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2824 0);
1e8e9920 2825 x = build2 (NE_EXPR, boolean_type_node, x,
2826 build_int_cst (TREE_TYPE (x), 0));
2827 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2828 gimplify_and_add (x, ilist);
2829 }
2830
2831 /* If any copyin variable is passed by reference, we must ensure the
2832 master thread doesn't modify it before it is copied over in all
f49d7bb5 2833 threads. Similarly for variables in both firstprivate and
2834 lastprivate clauses we need to ensure the lastprivate copying
2835 happens after firstprivate copying in all threads. */
2836 if (copyin_by_ref || lastprivate_firstprivate)
3d483a94 2837 {
2838 /* Don't add any barrier for #pragma omp simd or
2839 #pragma omp distribute. */
2840 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2841 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
2842 gimplify_and_add (build_omp_barrier (), ilist);
2843 }
2844
2845 /* If max_vf is non-zero, then we can use only a vectorization factor
2846 up to the max_vf we chose. So stick it into the safelen clause. */
2847 if (max_vf)
2848 {
2849 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2850 OMP_CLAUSE_SAFELEN);
2851 if (c == NULL_TREE
2852 || compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
2853 max_vf) == 1)
2854 {
2855 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
2856 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
2857 max_vf);
2858 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
2859 gimple_omp_for_set_clauses (ctx->stmt, c);
2860 }
2861 }
1e8e9920 2862}
2863
773c5ba7 2864
1e8e9920 2865/* Generate code to implement the LASTPRIVATE clauses. This is used for
2866 both parallel and workshare constructs. PREDICATE may be NULL if it's
2867 always true. */
2868
2869static void
75a70cf9 2870lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2871 omp_context *ctx)
1e8e9920 2872{
3d483a94 2873 tree x, c, label = NULL, orig_clauses = clauses;
fd6481cf 2874 bool par_clauses = false;
3d483a94 2875 tree simduid = NULL, lastlane = NULL;
1e8e9920 2876
3d483a94 2877 /* Early exit if there are no lastprivate or linear clauses. */
2878 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
2879 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
2880 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
2881 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
2882 break;
1e8e9920 2883 if (clauses == NULL)
2884 {
2885 /* If this was a workshare clause, see if it had been combined
2886 with its parallel. In that case, look for the clauses on the
2887 parallel statement itself. */
2888 if (is_parallel_ctx (ctx))
2889 return;
2890
2891 ctx = ctx->outer;
2892 if (ctx == NULL || !is_parallel_ctx (ctx))
2893 return;
2894
75a70cf9 2895 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
1e8e9920 2896 OMP_CLAUSE_LASTPRIVATE);
2897 if (clauses == NULL)
2898 return;
fd6481cf 2899 par_clauses = true;
1e8e9920 2900 }
2901
75a70cf9 2902 if (predicate)
2903 {
2904 gimple stmt;
2905 tree label_true, arm1, arm2;
2906
e60a6f7b 2907 label = create_artificial_label (UNKNOWN_LOCATION);
2908 label_true = create_artificial_label (UNKNOWN_LOCATION);
75a70cf9 2909 arm1 = TREE_OPERAND (predicate, 0);
2910 arm2 = TREE_OPERAND (predicate, 1);
2911 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2912 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2913 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2914 label_true, label);
2915 gimple_seq_add_stmt (stmt_list, stmt);
2916 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2917 }
1e8e9920 2918
3d483a94 2919 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2920 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
2921 {
2922 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
2923 if (simduid)
2924 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
2925 }
2926
fd6481cf 2927 for (c = clauses; c ;)
1e8e9920 2928 {
2929 tree var, new_var;
389dd41b 2930 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2931
3d483a94 2932 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
2933 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2934 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
fd6481cf 2935 {
2936 var = OMP_CLAUSE_DECL (c);
2937 new_var = lookup_decl (var, ctx);
1e8e9920 2938
3d483a94 2939 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
2940 {
2941 tree val = DECL_VALUE_EXPR (new_var);
2942 if (TREE_CODE (val) == ARRAY_REF
2943 && VAR_P (TREE_OPERAND (val, 0))
2944 && lookup_attribute ("omp simd array",
2945 DECL_ATTRIBUTES (TREE_OPERAND (val,
2946 0))))
2947 {
2948 if (lastlane == NULL)
2949 {
2950 lastlane = create_tmp_var (unsigned_type_node, NULL);
2951 gimple g
2952 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
2953 2, simduid,
2954 TREE_OPERAND (val, 1));
2955 gimple_call_set_lhs (g, lastlane);
2956 gimple_seq_add_stmt (stmt_list, g);
2957 }
2958 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
2959 TREE_OPERAND (val, 0), lastlane,
2960 NULL_TREE, NULL_TREE);
2961 }
2962 }
2963
2964 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
2965 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
75a70cf9 2966 {
e3a19533 2967 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
75a70cf9 2968 gimple_seq_add_seq (stmt_list,
2969 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
3d483a94 2970 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
75a70cf9 2971 }
1e8e9920 2972
fd6481cf 2973 x = build_outer_var_ref (var, ctx);
2974 if (is_reference (var))
182cf5a9 2975 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
fd6481cf 2976 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
75a70cf9 2977 gimplify_and_add (x, stmt_list);
fd6481cf 2978 }
2979 c = OMP_CLAUSE_CHAIN (c);
2980 if (c == NULL && !par_clauses)
2981 {
2982 /* If this was a workshare clause, see if it had been combined
2983 with its parallel. In that case, continue looking for the
2984 clauses also on the parallel statement itself. */
2985 if (is_parallel_ctx (ctx))
2986 break;
2987
2988 ctx = ctx->outer;
2989 if (ctx == NULL || !is_parallel_ctx (ctx))
2990 break;
2991
75a70cf9 2992 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
fd6481cf 2993 OMP_CLAUSE_LASTPRIVATE);
2994 par_clauses = true;
2995 }
1e8e9920 2996 }
2997
75a70cf9 2998 if (label)
2999 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
1e8e9920 3000}
3001
773c5ba7 3002
1e8e9920 3003/* Generate code to implement the REDUCTION clauses. */
3004
3005static void
75a70cf9 3006lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
1e8e9920 3007{
75a70cf9 3008 gimple_seq sub_seq = NULL;
3009 gimple stmt;
3010 tree x, c;
1e8e9920 3011 int count = 0;
3012
3d483a94 3013 /* SIMD reductions are handled in lower_rec_input_clauses. */
3014 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3015 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
3016 return;
3017
1e8e9920 3018 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
3019 update in that case, otherwise use a lock. */
3020 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
55d6e7cd 3021 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
1e8e9920 3022 {
3023 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3024 {
3025 /* Never use OMP_ATOMIC for array reductions. */
3026 count = -1;
3027 break;
3028 }
3029 count++;
3030 }
3031
3032 if (count == 0)
3033 return;
3034
3035 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3036 {
3037 tree var, ref, new_var;
3038 enum tree_code code;
389dd41b 3039 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 3040
55d6e7cd 3041 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
1e8e9920 3042 continue;
3043
3044 var = OMP_CLAUSE_DECL (c);
3045 new_var = lookup_decl (var, ctx);
3046 if (is_reference (var))
182cf5a9 3047 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
1e8e9920 3048 ref = build_outer_var_ref (var, ctx);
3049 code = OMP_CLAUSE_REDUCTION_CODE (c);
773c5ba7 3050
3051 /* reduction(-:var) sums up the partial results, so it acts
3052 identically to reduction(+:var). */
1e8e9920 3053 if (code == MINUS_EXPR)
3054 code = PLUS_EXPR;
3055
3056 if (count == 1)
3057 {
389dd41b 3058 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
1e8e9920 3059
3060 addr = save_expr (addr);
3061 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
389dd41b 3062 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
1e8e9920 3063 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
75a70cf9 3064 gimplify_and_add (x, stmt_seqp);
1e8e9920 3065 return;
3066 }
3067
3068 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3069 {
3070 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3071
3072 if (is_reference (var))
389dd41b 3073 ref = build_fold_addr_expr_loc (clause_loc, ref);
1e8e9920 3074 SET_DECL_VALUE_EXPR (placeholder, ref);
3075 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
e3a19533 3076 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
75a70cf9 3077 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
3078 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
1e8e9920 3079 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
3080 }
3081 else
3082 {
3083 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3084 ref = build_outer_var_ref (var, ctx);
75a70cf9 3085 gimplify_assign (ref, x, &sub_seq);
1e8e9920 3086 }
3087 }
3088
b9a16870 3089 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
3090 0);
75a70cf9 3091 gimple_seq_add_stmt (stmt_seqp, stmt);
1e8e9920 3092
75a70cf9 3093 gimple_seq_add_seq (stmt_seqp, sub_seq);
1e8e9920 3094
b9a16870 3095 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
3096 0);
75a70cf9 3097 gimple_seq_add_stmt (stmt_seqp, stmt);
1e8e9920 3098}
3099
773c5ba7 3100
1e8e9920 3101/* Generate code to implement the COPYPRIVATE clauses. */
3102
3103static void
75a70cf9 3104lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
1e8e9920 3105 omp_context *ctx)
3106{
3107 tree c;
3108
3109 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3110 {
cb561506 3111 tree var, new_var, ref, x;
1e8e9920 3112 bool by_ref;
389dd41b 3113 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 3114
55d6e7cd 3115 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
1e8e9920 3116 continue;
3117
3118 var = OMP_CLAUSE_DECL (c);
e8a588af 3119 by_ref = use_pointer_for_field (var, NULL);
1e8e9920 3120
3121 ref = build_sender_ref (var, ctx);
cb561506 3122 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
3123 if (by_ref)
3124 {
3125 x = build_fold_addr_expr_loc (clause_loc, new_var);
3126 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
3127 }
75a70cf9 3128 gimplify_assign (ref, x, slist);
1e8e9920 3129
cb561506 3130 ref = build_receiver_ref (var, false, ctx);
3131 if (by_ref)
3132 {
3133 ref = fold_convert_loc (clause_loc,
3134 build_pointer_type (TREE_TYPE (new_var)),
3135 ref);
3136 ref = build_fold_indirect_ref_loc (clause_loc, ref);
3137 }
1e8e9920 3138 if (is_reference (var))
3139 {
cb561506 3140 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
182cf5a9 3141 ref = build_simple_mem_ref_loc (clause_loc, ref);
3142 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
1e8e9920 3143 }
cb561506 3144 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
1e8e9920 3145 gimplify_and_add (x, rlist);
3146 }
3147}
3148
773c5ba7 3149
1e8e9920 3150/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
3151 and REDUCTION from the sender (aka parent) side. */
3152
3153static void
75a70cf9 3154lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
3155 omp_context *ctx)
1e8e9920 3156{
3157 tree c;
3158
3159 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3160 {
773c5ba7 3161 tree val, ref, x, var;
1e8e9920 3162 bool by_ref, do_in = false, do_out = false;
389dd41b 3163 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 3164
55d6e7cd 3165 switch (OMP_CLAUSE_CODE (c))
1e8e9920 3166 {
fd6481cf 3167 case OMP_CLAUSE_PRIVATE:
3168 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3169 break;
3170 continue;
1e8e9920 3171 case OMP_CLAUSE_FIRSTPRIVATE:
3172 case OMP_CLAUSE_COPYIN:
3173 case OMP_CLAUSE_LASTPRIVATE:
3174 case OMP_CLAUSE_REDUCTION:
3175 break;
3176 default:
3177 continue;
3178 }
3179
87b31375 3180 val = OMP_CLAUSE_DECL (c);
3181 var = lookup_decl_in_outer_ctx (val, ctx);
773c5ba7 3182
f49d7bb5 3183 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
3184 && is_global_var (var))
3185 continue;
1e8e9920 3186 if (is_variable_sized (val))
3187 continue;
e8a588af 3188 by_ref = use_pointer_for_field (val, NULL);
1e8e9920 3189
55d6e7cd 3190 switch (OMP_CLAUSE_CODE (c))
1e8e9920 3191 {
fd6481cf 3192 case OMP_CLAUSE_PRIVATE:
1e8e9920 3193 case OMP_CLAUSE_FIRSTPRIVATE:
3194 case OMP_CLAUSE_COPYIN:
3195 do_in = true;
3196 break;
3197
3198 case OMP_CLAUSE_LASTPRIVATE:
3199 if (by_ref || is_reference (val))
3200 {
3201 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3202 continue;
3203 do_in = true;
3204 }
3205 else
fd6481cf 3206 {
3207 do_out = true;
3208 if (lang_hooks.decls.omp_private_outer_ref (val))
3209 do_in = true;
3210 }
1e8e9920 3211 break;
3212
3213 case OMP_CLAUSE_REDUCTION:
3214 do_in = true;
3215 do_out = !(by_ref || is_reference (val));
3216 break;
3217
3218 default:
3219 gcc_unreachable ();
3220 }
3221
3222 if (do_in)
3223 {
3224 ref = build_sender_ref (val, ctx);
389dd41b 3225 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
75a70cf9 3226 gimplify_assign (ref, x, ilist);
fd6481cf 3227 if (is_task_ctx (ctx))
3228 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
1e8e9920 3229 }
773c5ba7 3230
1e8e9920 3231 if (do_out)
3232 {
3233 ref = build_sender_ref (val, ctx);
75a70cf9 3234 gimplify_assign (var, ref, olist);
1e8e9920 3235 }
3236 }
3237}
3238
75a70cf9 3239/* Generate code to implement SHARED from the sender (aka parent)
3240 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
3241 list things that got automatically shared. */
1e8e9920 3242
3243static void
75a70cf9 3244lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
1e8e9920 3245{
fd6481cf 3246 tree var, ovar, nvar, f, x, record_type;
1e8e9920 3247
3248 if (ctx->record_type == NULL)
3249 return;
773c5ba7 3250
fd6481cf 3251 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
1767a056 3252 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
1e8e9920 3253 {
3254 ovar = DECL_ABSTRACT_ORIGIN (f);
3255 nvar = maybe_lookup_decl (ovar, ctx);
3256 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
3257 continue;
3258
773c5ba7 3259 /* If CTX is a nested parallel directive. Find the immediately
3260 enclosing parallel or workshare construct that contains a
3261 mapping for OVAR. */
87b31375 3262 var = lookup_decl_in_outer_ctx (ovar, ctx);
773c5ba7 3263
e8a588af 3264 if (use_pointer_for_field (ovar, ctx))
1e8e9920 3265 {
3266 x = build_sender_ref (ovar, ctx);
773c5ba7 3267 var = build_fold_addr_expr (var);
75a70cf9 3268 gimplify_assign (x, var, ilist);
1e8e9920 3269 }
3270 else
3271 {
3272 x = build_sender_ref (ovar, ctx);
75a70cf9 3273 gimplify_assign (x, var, ilist);
1e8e9920 3274
d2263ebb 3275 if (!TREE_READONLY (var)
3276 /* We don't need to receive a new reference to a result
3277 or parm decl. In fact we may not store to it as we will
3278 invalidate any pending RSO and generate wrong gimple
3279 during inlining. */
3280 && !((TREE_CODE (var) == RESULT_DECL
3281 || TREE_CODE (var) == PARM_DECL)
3282 && DECL_BY_REFERENCE (var)))
fd6481cf 3283 {
3284 x = build_sender_ref (ovar, ctx);
75a70cf9 3285 gimplify_assign (var, x, olist);
fd6481cf 3286 }
1e8e9920 3287 }
3288 }
3289}
3290
75a70cf9 3291
3292/* A convenience function to build an empty GIMPLE_COND with just the
3293 condition. */
3294
3295static gimple
3296gimple_build_cond_empty (tree cond)
3297{
3298 enum tree_code pred_code;
3299 tree lhs, rhs;
3300
3301 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
3302 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
3303}
3304
3305
48e1416a 3306/* Build the function calls to GOMP_parallel_start etc to actually
773c5ba7 3307 generate the parallel operation. REGION is the parallel region
3308 being expanded. BB is the block where to insert the code. WS_ARGS
3309 will be set if this is a call to a combined parallel+workshare
3310 construct, it contains the list of additional arguments needed by
3311 the workshare construct. */
1e8e9920 3312
3313static void
61e47ac8 3314expand_parallel_call (struct omp_region *region, basic_block bb,
f1f41a6c 3315 gimple entry_stmt, vec<tree, va_gc> *ws_args)
1e8e9920 3316{
79acaae1 3317 tree t, t1, t2, val, cond, c, clauses;
75a70cf9 3318 gimple_stmt_iterator gsi;
3319 gimple stmt;
b9a16870 3320 enum built_in_function start_ix;
3321 int start_ix2;
389dd41b 3322 location_t clause_loc;
f1f41a6c 3323 vec<tree, va_gc> *args;
773c5ba7 3324
75a70cf9 3325 clauses = gimple_omp_parallel_clauses (entry_stmt);
773c5ba7 3326
334ec2d8 3327 /* Determine what flavor of GOMP_parallel_start we will be
773c5ba7 3328 emitting. */
3329 start_ix = BUILT_IN_GOMP_PARALLEL_START;
3330 if (is_combined_parallel (region))
3331 {
61e47ac8 3332 switch (region->inner->type)
773c5ba7 3333 {
75a70cf9 3334 case GIMPLE_OMP_FOR:
fd6481cf 3335 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
b9a16870 3336 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
3337 + (region->inner->sched_kind
3338 == OMP_CLAUSE_SCHEDULE_RUNTIME
3339 ? 3 : region->inner->sched_kind));
3340 start_ix = (enum built_in_function)start_ix2;
61e47ac8 3341 break;
75a70cf9 3342 case GIMPLE_OMP_SECTIONS:
61e47ac8 3343 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
3344 break;
3345 default:
3346 gcc_unreachable ();
773c5ba7 3347 }
773c5ba7 3348 }
1e8e9920 3349
3350 /* By default, the value of NUM_THREADS is zero (selected at run time)
3351 and there is no conditional. */
3352 cond = NULL_TREE;
3353 val = build_int_cst (unsigned_type_node, 0);
3354
3355 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3356 if (c)
3357 cond = OMP_CLAUSE_IF_EXPR (c);
3358
3359 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
3360 if (c)
389dd41b 3361 {
3362 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
3363 clause_loc = OMP_CLAUSE_LOCATION (c);
3364 }
3365 else
3366 clause_loc = gimple_location (entry_stmt);
1e8e9920 3367
3368 /* Ensure 'val' is of the correct type. */
389dd41b 3369 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
1e8e9920 3370
3371 /* If we found the clause 'if (cond)', build either
3372 (cond != 0) or (cond ? val : 1u). */
3373 if (cond)
3374 {
75a70cf9 3375 gimple_stmt_iterator gsi;
773c5ba7 3376
3377 cond = gimple_boolify (cond);
3378
1e8e9920 3379 if (integer_zerop (val))
389dd41b 3380 val = fold_build2_loc (clause_loc,
3381 EQ_EXPR, unsigned_type_node, cond,
79acaae1 3382 build_int_cst (TREE_TYPE (cond), 0));
1e8e9920 3383 else
773c5ba7 3384 {
3385 basic_block cond_bb, then_bb, else_bb;
79acaae1 3386 edge e, e_then, e_else;
75a70cf9 3387 tree tmp_then, tmp_else, tmp_join, tmp_var;
79acaae1 3388
3389 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
3390 if (gimple_in_ssa_p (cfun))
3391 {
75a70cf9 3392 tmp_then = make_ssa_name (tmp_var, NULL);
3393 tmp_else = make_ssa_name (tmp_var, NULL);
3394 tmp_join = make_ssa_name (tmp_var, NULL);
79acaae1 3395 }
3396 else
3397 {
3398 tmp_then = tmp_var;
3399 tmp_else = tmp_var;
3400 tmp_join = tmp_var;
3401 }
773c5ba7 3402
773c5ba7 3403 e = split_block (bb, NULL);
3404 cond_bb = e->src;
3405 bb = e->dest;
3406 remove_edge (e);
3407
3408 then_bb = create_empty_bb (cond_bb);
3409 else_bb = create_empty_bb (then_bb);
79acaae1 3410 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3411 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
773c5ba7 3412
75a70cf9 3413 stmt = gimple_build_cond_empty (cond);
3414 gsi = gsi_start_bb (cond_bb);
3415 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 3416
75a70cf9 3417 gsi = gsi_start_bb (then_bb);
3418 stmt = gimple_build_assign (tmp_then, val);
3419 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 3420
75a70cf9 3421 gsi = gsi_start_bb (else_bb);
3422 stmt = gimple_build_assign
3423 (tmp_else, build_int_cst (unsigned_type_node, 1));
3424 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 3425
3426 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3427 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
f6568ea4 3428 if (current_loops)
3429 {
3430 add_bb_to_loop (then_bb, cond_bb->loop_father);
3431 add_bb_to_loop (else_bb, cond_bb->loop_father);
3432 }
79acaae1 3433 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3434 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
773c5ba7 3435
79acaae1 3436 if (gimple_in_ssa_p (cfun))
3437 {
75a70cf9 3438 gimple phi = create_phi_node (tmp_join, bb);
60d535d2 3439 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3440 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
79acaae1 3441 }
3442
3443 val = tmp_join;
773c5ba7 3444 }
3445
75a70cf9 3446 gsi = gsi_start_bb (bb);
3447 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3448 false, GSI_CONTINUE_LINKING);
1e8e9920 3449 }
3450
75a70cf9 3451 gsi = gsi_last_bb (bb);
3452 t = gimple_omp_parallel_data_arg (entry_stmt);
1e8e9920 3453 if (t == NULL)
c2f47e15 3454 t1 = null_pointer_node;
1e8e9920 3455 else
c2f47e15 3456 t1 = build_fold_addr_expr (t);
75a70cf9 3457 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
773c5ba7 3458
f1f41a6c 3459 vec_alloc (args, 3 + vec_safe_length (ws_args));
3460 args->quick_push (t2);
3461 args->quick_push (t1);
3462 args->quick_push (val);
3463 if (ws_args)
3464 args->splice (*ws_args);
414c3a2c 3465
3466 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
b9a16870 3467 builtin_decl_explicit (start_ix), args);
773c5ba7 3468
75a70cf9 3469 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3470 false, GSI_CONTINUE_LINKING);
1e8e9920 3471
75a70cf9 3472 t = gimple_omp_parallel_data_arg (entry_stmt);
1e8e9920 3473 if (t == NULL)
3474 t = null_pointer_node;
3475 else
3476 t = build_fold_addr_expr (t);
389dd41b 3477 t = build_call_expr_loc (gimple_location (entry_stmt),
3478 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
75a70cf9 3479 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3480 false, GSI_CONTINUE_LINKING);
1e8e9920 3481
389dd41b 3482 t = build_call_expr_loc (gimple_location (entry_stmt),
b9a16870 3483 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3484 0);
75a70cf9 3485 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3486 false, GSI_CONTINUE_LINKING);
1e8e9920 3487}
3488
773c5ba7 3489
fd6481cf 3490/* Build the function call to GOMP_task to actually
3491 generate the task operation. BB is the block where to insert the code. */
3492
3493static void
75a70cf9 3494expand_task_call (basic_block bb, gimple entry_stmt)
fd6481cf 3495{
2169f33b 3496 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
75a70cf9 3497 gimple_stmt_iterator gsi;
389dd41b 3498 location_t loc = gimple_location (entry_stmt);
fd6481cf 3499
75a70cf9 3500 clauses = gimple_omp_task_clauses (entry_stmt);
fd6481cf 3501
fd6481cf 3502 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3503 if (c)
3504 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3505 else
3506 cond = boolean_true_node;
3507
3508 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
2169f33b 3509 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3510 flags = build_int_cst (unsigned_type_node,
3511 (c ? 1 : 0) + (c2 ? 4 : 0));
3512
3513 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3514 if (c)
3515 {
3516 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3517 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3518 build_int_cst (unsigned_type_node, 2),
3519 build_int_cst (unsigned_type_node, 0));
3520 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3521 }
fd6481cf 3522
75a70cf9 3523 gsi = gsi_last_bb (bb);
3524 t = gimple_omp_task_data_arg (entry_stmt);
fd6481cf 3525 if (t == NULL)
3526 t2 = null_pointer_node;
3527 else
389dd41b 3528 t2 = build_fold_addr_expr_loc (loc, t);
3529 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
75a70cf9 3530 t = gimple_omp_task_copy_fn (entry_stmt);
fd6481cf 3531 if (t == NULL)
3532 t3 = null_pointer_node;
3533 else
389dd41b 3534 t3 = build_fold_addr_expr_loc (loc, t);
fd6481cf 3535
b9a16870 3536 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3537 7, t1, t2, t3,
75a70cf9 3538 gimple_omp_task_arg_size (entry_stmt),
3539 gimple_omp_task_arg_align (entry_stmt), cond, flags);
fd6481cf 3540
75a70cf9 3541 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3542 false, GSI_CONTINUE_LINKING);
fd6481cf 3543}
3544
3545
75a70cf9 3546/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3547 catch handler and return it. This prevents programs from violating the
3548 structured block semantics with throws. */
1e8e9920 3549
75a70cf9 3550static gimple_seq
3551maybe_catch_exception (gimple_seq body)
1e8e9920 3552{
e38def9c 3553 gimple g;
3554 tree decl;
1e8e9920 3555
3556 if (!flag_exceptions)
75a70cf9 3557 return body;
1e8e9920 3558
596981c8 3559 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3560 decl = lang_hooks.eh_protect_cleanup_actions ();
1e8e9920 3561 else
b9a16870 3562 decl = builtin_decl_explicit (BUILT_IN_TRAP);
75a70cf9 3563
e38def9c 3564 g = gimple_build_eh_must_not_throw (decl);
3565 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
75a70cf9 3566 GIMPLE_TRY_CATCH);
1e8e9920 3567
e38def9c 3568 return gimple_seq_alloc_with_stmt (g);
1e8e9920 3569}
3570
773c5ba7 3571/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
1e8e9920 3572
773c5ba7 3573static tree
f1f41a6c 3574vec2chain (vec<tree, va_gc> *v)
1e8e9920 3575{
2ab2ce89 3576 tree chain = NULL_TREE, t;
3577 unsigned ix;
1e8e9920 3578
f1f41a6c 3579 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
773c5ba7 3580 {
1767a056 3581 DECL_CHAIN (t) = chain;
2ab2ce89 3582 chain = t;
773c5ba7 3583 }
1e8e9920 3584
2ab2ce89 3585 return chain;
773c5ba7 3586}
1e8e9920 3587
1e8e9920 3588
773c5ba7 3589/* Remove barriers in REGION->EXIT's block. Note that this is only
75a70cf9 3590 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3591 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3592 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
773c5ba7 3593 removed. */
1e8e9920 3594
773c5ba7 3595static void
3596remove_exit_barrier (struct omp_region *region)
3597{
75a70cf9 3598 gimple_stmt_iterator gsi;
773c5ba7 3599 basic_block exit_bb;
61e47ac8 3600 edge_iterator ei;
3601 edge e;
75a70cf9 3602 gimple stmt;
4a04f4b4 3603 int any_addressable_vars = -1;
1e8e9920 3604
61e47ac8 3605 exit_bb = region->exit;
1e8e9920 3606
5056ba1a 3607 /* If the parallel region doesn't return, we don't have REGION->EXIT
3608 block at all. */
3609 if (! exit_bb)
3610 return;
3611
75a70cf9 3612 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3613 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
61e47ac8 3614 statements that can appear in between are extremely limited -- no
3615 memory operations at all. Here, we allow nothing at all, so the
75a70cf9 3616 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3617 gsi = gsi_last_bb (exit_bb);
3618 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3619 gsi_prev (&gsi);
3620 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
773c5ba7 3621 return;
1e8e9920 3622
61e47ac8 3623 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3624 {
75a70cf9 3625 gsi = gsi_last_bb (e->src);
3626 if (gsi_end_p (gsi))
61e47ac8 3627 continue;
75a70cf9 3628 stmt = gsi_stmt (gsi);
4a04f4b4 3629 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3630 && !gimple_omp_return_nowait_p (stmt))
3631 {
3632 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3633 in many cases. If there could be tasks queued, the barrier
3634 might be needed to let the tasks run before some local
3635 variable of the parallel that the task uses as shared
3636 runs out of scope. The task can be spawned either
3637 from within current function (this would be easy to check)
3638 or from some function it calls and gets passed an address
3639 of such a variable. */
3640 if (any_addressable_vars < 0)
3641 {
3642 gimple parallel_stmt = last_stmt (region->entry);
3643 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
2ab2ce89 3644 tree local_decls, block, decl;
3645 unsigned ix;
4a04f4b4 3646
3647 any_addressable_vars = 0;
2ab2ce89 3648 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3649 if (TREE_ADDRESSABLE (decl))
4a04f4b4 3650 {
3651 any_addressable_vars = 1;
3652 break;
3653 }
3654 for (block = gimple_block (stmt);
3655 !any_addressable_vars
3656 && block
3657 && TREE_CODE (block) == BLOCK;
3658 block = BLOCK_SUPERCONTEXT (block))
3659 {
3660 for (local_decls = BLOCK_VARS (block);
3661 local_decls;
1767a056 3662 local_decls = DECL_CHAIN (local_decls))
4a04f4b4 3663 if (TREE_ADDRESSABLE (local_decls))
3664 {
3665 any_addressable_vars = 1;
3666 break;
3667 }
3668 if (block == gimple_block (parallel_stmt))
3669 break;
3670 }
3671 }
3672 if (!any_addressable_vars)
3673 gimple_omp_return_set_nowait (stmt);
3674 }
61e47ac8 3675 }
1e8e9920 3676}
3677
61e47ac8 3678static void
3679remove_exit_barriers (struct omp_region *region)
3680{
75a70cf9 3681 if (region->type == GIMPLE_OMP_PARALLEL)
61e47ac8 3682 remove_exit_barrier (region);
3683
3684 if (region->inner)
3685 {
3686 region = region->inner;
3687 remove_exit_barriers (region);
3688 while (region->next)
3689 {
3690 region = region->next;
3691 remove_exit_barriers (region);
3692 }
3693 }
3694}
773c5ba7 3695
658b4427 3696/* Optimize omp_get_thread_num () and omp_get_num_threads ()
3697 calls. These can't be declared as const functions, but
3698 within one parallel body they are constant, so they can be
3699 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
fd6481cf 3700 which are declared const. Similarly for task body, except
3701 that in untied task omp_get_thread_num () can change at any task
3702 scheduling point. */
658b4427 3703
3704static void
75a70cf9 3705optimize_omp_library_calls (gimple entry_stmt)
658b4427 3706{
3707 basic_block bb;
75a70cf9 3708 gimple_stmt_iterator gsi;
b9a16870 3709 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3710 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3711 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3712 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
75a70cf9 3713 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3714 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
fd6481cf 3715 OMP_CLAUSE_UNTIED) != NULL);
658b4427 3716
3717 FOR_EACH_BB (bb)
75a70cf9 3718 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
658b4427 3719 {
75a70cf9 3720 gimple call = gsi_stmt (gsi);
658b4427 3721 tree decl;
3722
75a70cf9 3723 if (is_gimple_call (call)
3724 && (decl = gimple_call_fndecl (call))
658b4427 3725 && DECL_EXTERNAL (decl)
3726 && TREE_PUBLIC (decl)
3727 && DECL_INITIAL (decl) == NULL)
3728 {
3729 tree built_in;
3730
3731 if (DECL_NAME (decl) == thr_num_id)
fd6481cf 3732 {
3733 /* In #pragma omp task untied omp_get_thread_num () can change
3734 during the execution of the task region. */
3735 if (untied_task)
3736 continue;
b9a16870 3737 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
fd6481cf 3738 }
658b4427 3739 else if (DECL_NAME (decl) == num_thr_id)
b9a16870 3740 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
658b4427 3741 else
3742 continue;
3743
3744 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
75a70cf9 3745 || gimple_call_num_args (call) != 0)
658b4427 3746 continue;
3747
3748 if (flag_exceptions && !TREE_NOTHROW (decl))
3749 continue;
3750
3751 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
1ea6a73c 3752 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3753 TREE_TYPE (TREE_TYPE (built_in))))
658b4427 3754 continue;
3755
0acacf9e 3756 gimple_call_set_fndecl (call, built_in);
658b4427 3757 }
3758 }
3759}
3760
8e6b4515 3761/* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
3762 regimplified. */
3763
3764static tree
3765expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
3766{
3767 tree t = *tp;
3768
3769 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
3770 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
3771 return t;
3772
3773 if (TREE_CODE (t) == ADDR_EXPR)
3774 recompute_tree_invariant_for_addr_expr (t);
3775
3776 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
3777 return NULL_TREE;
3778}
3779
3d483a94 3780/* Prepend TO = FROM assignment before *GSI_P. */
3781
3782static void
3783expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
3784{
3785 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
3786 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
3787 true, GSI_SAME_STMT);
3788 gimple stmt = gimple_build_assign (to, from);
3789 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
3790 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
3791 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
3792 {
3793 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
3794 gimple_regimplify_operands (stmt, &gsi);
3795 }
3796}
3797
fd6481cf 3798/* Expand the OpenMP parallel or task directive starting at REGION. */
1e8e9920 3799
3800static void
fd6481cf 3801expand_omp_taskreg (struct omp_region *region)
1e8e9920 3802{
773c5ba7 3803 basic_block entry_bb, exit_bb, new_bb;
87d4aa85 3804 struct function *child_cfun;
414c3a2c 3805 tree child_fn, block, t;
75a70cf9 3806 gimple_stmt_iterator gsi;
3807 gimple entry_stmt, stmt;
773c5ba7 3808 edge e;
f1f41a6c 3809 vec<tree, va_gc> *ws_args;
773c5ba7 3810
61e47ac8 3811 entry_stmt = last_stmt (region->entry);
75a70cf9 3812 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
773c5ba7 3813 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
773c5ba7 3814
61e47ac8 3815 entry_bb = region->entry;
3816 exit_bb = region->exit;
773c5ba7 3817
773c5ba7 3818 if (is_combined_parallel (region))
61e47ac8 3819 ws_args = region->ws_args;
773c5ba7 3820 else
414c3a2c 3821 ws_args = NULL;
1e8e9920 3822
61e47ac8 3823 if (child_cfun->cfg)
1e8e9920 3824 {
773c5ba7 3825 /* Due to inlining, it may happen that we have already outlined
3826 the region, in which case all we need to do is make the
3827 sub-graph unreachable and emit the parallel call. */
3828 edge entry_succ_e, exit_succ_e;
75a70cf9 3829 gimple_stmt_iterator gsi;
773c5ba7 3830
3831 entry_succ_e = single_succ_edge (entry_bb);
773c5ba7 3832
75a70cf9 3833 gsi = gsi_last_bb (entry_bb);
3834 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3835 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3836 gsi_remove (&gsi, true);
773c5ba7 3837
3838 new_bb = entry_bb;
03ed154b 3839 if (exit_bb)
3840 {
3841 exit_succ_e = single_succ_edge (exit_bb);
3842 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3843 }
79acaae1 3844 remove_edge_and_dominated_blocks (entry_succ_e);
1e8e9920 3845 }
773c5ba7 3846 else
3847 {
501bdd19 3848 unsigned srcidx, dstidx, num;
2ab2ce89 3849
773c5ba7 3850 /* If the parallel region needs data sent from the parent
3480139d 3851 function, then the very first statement (except possible
3852 tree profile counter updates) of the parallel body
773c5ba7 3853 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3854 &.OMP_DATA_O is passed as an argument to the child function,
3855 we need to replace it with the argument as seen by the child
3856 function.
3857
3858 In most cases, this will end up being the identity assignment
3859 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3860 a function call that has been inlined, the original PARM_DECL
3861 .OMP_DATA_I may have been converted into a different local
3862 variable. In which case, we need to keep the assignment. */
75a70cf9 3863 if (gimple_omp_taskreg_data_arg (entry_stmt))
773c5ba7 3864 {
3865 basic_block entry_succ_bb = single_succ (entry_bb);
75a70cf9 3866 gimple_stmt_iterator gsi;
3867 tree arg, narg;
3868 gimple parcopy_stmt = NULL;
1e8e9920 3869
75a70cf9 3870 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3480139d 3871 {
75a70cf9 3872 gimple stmt;
3480139d 3873
75a70cf9 3874 gcc_assert (!gsi_end_p (gsi));
3875 stmt = gsi_stmt (gsi);
3876 if (gimple_code (stmt) != GIMPLE_ASSIGN)
cc6b725b 3877 continue;
3878
75a70cf9 3879 if (gimple_num_ops (stmt) == 2)
3480139d 3880 {
75a70cf9 3881 tree arg = gimple_assign_rhs1 (stmt);
3882
3883 /* We're ignore the subcode because we're
3884 effectively doing a STRIP_NOPS. */
3885
3886 if (TREE_CODE (arg) == ADDR_EXPR
3887 && TREE_OPERAND (arg, 0)
3888 == gimple_omp_taskreg_data_arg (entry_stmt))
3889 {
3890 parcopy_stmt = stmt;
3891 break;
3892 }
3480139d 3893 }
3894 }
79acaae1 3895
75a70cf9 3896 gcc_assert (parcopy_stmt != NULL);
79acaae1 3897 arg = DECL_ARGUMENTS (child_fn);
3898
3899 if (!gimple_in_ssa_p (cfun))
3900 {
75a70cf9 3901 if (gimple_assign_lhs (parcopy_stmt) == arg)
3902 gsi_remove (&gsi, true);
79acaae1 3903 else
75a70cf9 3904 {
3905 /* ?? Is setting the subcode really necessary ?? */
3906 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3907 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3908 }
79acaae1 3909 }
3910 else
3911 {
3912 /* If we are in ssa form, we must load the value from the default
3913 definition of the argument. That should not be defined now,
3914 since the argument is not used uninitialized. */
c6dfe037 3915 gcc_assert (ssa_default_def (cfun, arg) == NULL);
75a70cf9 3916 narg = make_ssa_name (arg, gimple_build_nop ());
c6dfe037 3917 set_ssa_default_def (cfun, arg, narg);
75a70cf9 3918 /* ?? Is setting the subcode really necessary ?? */
3919 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3920 gimple_assign_set_rhs1 (parcopy_stmt, narg);
79acaae1 3921 update_stmt (parcopy_stmt);
3922 }
773c5ba7 3923 }
3924
3925 /* Declare local variables needed in CHILD_CFUN. */
3926 block = DECL_INITIAL (child_fn);
2ab2ce89 3927 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
e1a7ccb9 3928 /* The gimplifier could record temporaries in parallel/task block
3929 rather than in containing function's local_decls chain,
3930 which would mean cgraph missed finalizing them. Do it now. */
1767a056 3931 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
e1a7ccb9 3932 if (TREE_CODE (t) == VAR_DECL
3933 && TREE_STATIC (t)
3934 && !DECL_EXTERNAL (t))
3935 varpool_finalize_decl (t);
75a70cf9 3936 DECL_SAVED_TREE (child_fn) = NULL;
e3a19533 3937 /* We'll create a CFG for child_fn, so no gimple body is needed. */
3938 gimple_set_body (child_fn, NULL);
1d22f541 3939 TREE_USED (block) = 1;
773c5ba7 3940
79acaae1 3941 /* Reset DECL_CONTEXT on function arguments. */
1767a056 3942 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
773c5ba7 3943 DECL_CONTEXT (t) = child_fn;
3944
75a70cf9 3945 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3946 so that it can be moved to the child function. */
3947 gsi = gsi_last_bb (entry_bb);
3948 stmt = gsi_stmt (gsi);
3949 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3950 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3951 gsi_remove (&gsi, true);
3952 e = split_block (entry_bb, stmt);
773c5ba7 3953 entry_bb = e->dest;
3954 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3955
75a70cf9 3956 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
5056ba1a 3957 if (exit_bb)
3958 {
75a70cf9 3959 gsi = gsi_last_bb (exit_bb);
3960 gcc_assert (!gsi_end_p (gsi)
3961 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3962 stmt = gimple_build_return (NULL);
3963 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3964 gsi_remove (&gsi, true);
5056ba1a 3965 }
79acaae1 3966
3967 /* Move the parallel region into CHILD_CFUN. */
48e1416a 3968
79acaae1 3969 if (gimple_in_ssa_p (cfun))
3970 {
bcaa2770 3971 init_tree_ssa (child_cfun);
5084b2e4 3972 init_ssa_operands (child_cfun);
3973 child_cfun->gimple_df->in_ssa_p = true;
1d22f541 3974 block = NULL_TREE;
79acaae1 3975 }
1d22f541 3976 else
75a70cf9 3977 block = gimple_block (entry_stmt);
1d22f541 3978
3979 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
79acaae1 3980 if (exit_bb)
3981 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
04c2922b 3982 /* When the OMP expansion process cannot guarantee an up-to-date
3983 loop tree arrange for the child function to fixup loops. */
3984 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
3985 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
79acaae1 3986
1d22f541 3987 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
f1f41a6c 3988 num = vec_safe_length (child_cfun->local_decls);
501bdd19 3989 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3990 {
f1f41a6c 3991 t = (*child_cfun->local_decls)[srcidx];
501bdd19 3992 if (DECL_CONTEXT (t) == cfun->decl)
3993 continue;
3994 if (srcidx != dstidx)
f1f41a6c 3995 (*child_cfun->local_decls)[dstidx] = t;
501bdd19 3996 dstidx++;
3997 }
3998 if (dstidx != num)
f1f41a6c 3999 vec_safe_truncate (child_cfun->local_decls, dstidx);
1d22f541 4000
79acaae1 4001 /* Inform the callgraph about the new function. */
82b40354 4002 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
79acaae1 4003 cgraph_add_new_function (child_fn, true);
4004
4005 /* Fix the callgraph edges for child_cfun. Those for cfun will be
4006 fixed in a following pass. */
4007 push_cfun (child_cfun);
658b4427 4008 if (optimize)
fd6481cf 4009 optimize_omp_library_calls (entry_stmt);
79acaae1 4010 rebuild_cgraph_edges ();
fbe86b1b 4011
4012 /* Some EH regions might become dead, see PR34608. If
4013 pass_cleanup_cfg isn't the first pass to happen with the
4014 new child, these dead EH edges might cause problems.
4015 Clean them up now. */
4016 if (flag_exceptions)
4017 {
4018 basic_block bb;
fbe86b1b 4019 bool changed = false;
4020
fbe86b1b 4021 FOR_EACH_BB (bb)
75a70cf9 4022 changed |= gimple_purge_dead_eh_edges (bb);
fbe86b1b 4023 if (changed)
4024 cleanup_tree_cfg ();
fbe86b1b 4025 }
dd277d48 4026 if (gimple_in_ssa_p (cfun))
4027 update_ssa (TODO_update_ssa);
79acaae1 4028 pop_cfun ();
773c5ba7 4029 }
48e1416a 4030
773c5ba7 4031 /* Emit a library call to launch the children threads. */
75a70cf9 4032 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
fd6481cf 4033 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
4034 else
4035 expand_task_call (new_bb, entry_stmt);
083152fb 4036 if (gimple_in_ssa_p (cfun))
4037 update_ssa (TODO_update_ssa_only_virtuals);
1e8e9920 4038}
4039
773c5ba7 4040
3d483a94 4041/* Helper function for expand_omp_{for_*,simd}. If this is the outermost
4042 of the combined collapse > 1 loop constructs, generate code like:
4043 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
4044 if (cond3 is <)
4045 adj = STEP3 - 1;
4046 else
4047 adj = STEP3 + 1;
4048 count3 = (adj + N32 - N31) / STEP3;
4049 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
4050 if (cond2 is <)
4051 adj = STEP2 - 1;
4052 else
4053 adj = STEP2 + 1;
4054 count2 = (adj + N22 - N21) / STEP2;
4055 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
4056 if (cond1 is <)
4057 adj = STEP1 - 1;
4058 else
4059 adj = STEP1 + 1;
4060 count1 = (adj + N12 - N11) / STEP1;
4061 count = count1 * count2 * count3;
4062 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
4063 count = 0;
4064 and set ZERO_ITER_BB to that bb. */
4065
4066/* NOTE: It *could* be better to moosh all of the BBs together,
4067 creating one larger BB with all the computation and the unexpected
4068 jump at the end. I.e.
4069
4070 bool zero3, zero2, zero1, zero;
4071
4072 zero3 = N32 c3 N31;
4073 count3 = (N32 - N31) /[cl] STEP3;
4074 zero2 = N22 c2 N21;
4075 count2 = (N22 - N21) /[cl] STEP2;
4076 zero1 = N12 c1 N11;
4077 count1 = (N12 - N11) /[cl] STEP1;
4078 zero = zero3 || zero2 || zero1;
4079 count = count1 * count2 * count3;
4080 if (__builtin_expect(zero, false)) goto zero_iter_bb;
4081
4082 After all, we expect the zero=false, and thus we expect to have to
4083 evaluate all of the comparison expressions, so short-circuiting
4084 oughtn't be a win. Since the condition isn't protecting a
4085 denominator, we're not concerned about divide-by-zero, so we can
4086 fully evaluate count even if a numerator turned out to be wrong.
4087
4088 It seems like putting this all together would create much better
4089 scheduling opportunities, and less pressure on the chip's branch
4090 predictor. */
4091
4092static void
4093expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
4094 basic_block &entry_bb, tree *counts,
4095 basic_block &zero_iter_bb, int &first_zero_iter,
4096 basic_block &l2_dom_bb)
4097{
4098 tree t, type = TREE_TYPE (fd->loop.v);
4099 gimple stmt;
4100 edge e, ne;
4101 int i;
4102
4103 /* Collapsed loops need work for expansion into SSA form. */
4104 gcc_assert (!gimple_in_ssa_p (cfun));
4105
4106 for (i = 0; i < fd->collapse; i++)
4107 {
4108 tree itype = TREE_TYPE (fd->loops[i].v);
4109
4110 if (SSA_VAR_P (fd->loop.n2)
4111 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
4112 fold_convert (itype, fd->loops[i].n1),
4113 fold_convert (itype, fd->loops[i].n2)))
4114 == NULL_TREE || !integer_onep (t)))
4115 {
4116 tree n1, n2;
4117 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
4118 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
4119 true, GSI_SAME_STMT);
4120 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
4121 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
4122 true, GSI_SAME_STMT);
4123 stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
4124 NULL_TREE, NULL_TREE);
4125 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
4126 if (walk_tree (gimple_cond_lhs_ptr (stmt),
4127 expand_omp_regimplify_p, NULL, NULL)
4128 || walk_tree (gimple_cond_rhs_ptr (stmt),
4129 expand_omp_regimplify_p, NULL, NULL))
4130 {
4131 *gsi = gsi_for_stmt (stmt);
4132 gimple_regimplify_operands (stmt, gsi);
4133 }
4134 e = split_block (entry_bb, stmt);
4135 if (zero_iter_bb == NULL)
4136 {
4137 first_zero_iter = i;
4138 zero_iter_bb = create_empty_bb (entry_bb);
4139 if (current_loops)
4140 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
4141 *gsi = gsi_after_labels (zero_iter_bb);
4142 stmt = gimple_build_assign (fd->loop.n2,
4143 build_zero_cst (type));
4144 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
4145 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
4146 entry_bb);
4147 }
4148 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
4149 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
4150 e->flags = EDGE_TRUE_VALUE;
4151 e->probability = REG_BR_PROB_BASE - ne->probability;
4152 if (l2_dom_bb == NULL)
4153 l2_dom_bb = entry_bb;
4154 entry_bb = e->dest;
4155 *gsi = gsi_last_bb (entry_bb);
4156 }
4157
4158 if (POINTER_TYPE_P (itype))
4159 itype = signed_type_for (itype);
4160 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
4161 ? -1 : 1));
4162 t = fold_build2 (PLUS_EXPR, itype,
4163 fold_convert (itype, fd->loops[i].step), t);
4164 t = fold_build2 (PLUS_EXPR, itype, t,
4165 fold_convert (itype, fd->loops[i].n2));
4166 t = fold_build2 (MINUS_EXPR, itype, t,
4167 fold_convert (itype, fd->loops[i].n1));
4168 /* ?? We could probably use CEIL_DIV_EXPR instead of
4169 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
4170 generate the same code in the end because generically we
4171 don't know that the values involved must be negative for
4172 GT?? */
4173 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
4174 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4175 fold_build1 (NEGATE_EXPR, itype, t),
4176 fold_build1 (NEGATE_EXPR, itype,
4177 fold_convert (itype,
4178 fd->loops[i].step)));
4179 else
4180 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
4181 fold_convert (itype, fd->loops[i].step));
4182 t = fold_convert (type, t);
4183 if (TREE_CODE (t) == INTEGER_CST)
4184 counts[i] = t;
4185 else
4186 {
4187 counts[i] = create_tmp_reg (type, ".count");
4188 expand_omp_build_assign (gsi, counts[i], t);
4189 }
4190 if (SSA_VAR_P (fd->loop.n2))
4191 {
4192 if (i == 0)
4193 t = counts[0];
4194 else
4195 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
4196 expand_omp_build_assign (gsi, fd->loop.n2, t);
4197 }
4198 }
4199}
4200
4201
4202/* Helper function for expand_omp_{for_*,simd}. Generate code like:
4203 T = V;
4204 V3 = N31 + (T % count3) * STEP3;
4205 T = T / count3;
4206 V2 = N21 + (T % count2) * STEP2;
4207 T = T / count2;
4208 V1 = N11 + T * STEP1;
4209 if this loop doesn't have an inner loop construct combined with it. */
4210
4211static void
4212expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
4213 tree *counts, tree startvar)
4214{
4215 int i;
4216 tree type = TREE_TYPE (fd->loop.v);
4217 tree tem = create_tmp_reg (type, ".tem");
4218 gimple stmt = gimple_build_assign (tem, startvar);
4219 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
4220
4221 for (i = fd->collapse - 1; i >= 0; i--)
4222 {
4223 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
4224 itype = vtype;
4225 if (POINTER_TYPE_P (vtype))
4226 itype = signed_type_for (vtype);
4227 if (i != 0)
4228 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
4229 else
4230 t = tem;
4231 t = fold_convert (itype, t);
4232 t = fold_build2 (MULT_EXPR, itype, t,
4233 fold_convert (itype, fd->loops[i].step));
4234 if (POINTER_TYPE_P (vtype))
4235 t = fold_build_pointer_plus (fd->loops[i].n1, t);
4236 else
4237 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
4238 t = force_gimple_operand_gsi (gsi, t,
4239 DECL_P (fd->loops[i].v)
4240 && TREE_ADDRESSABLE (fd->loops[i].v),
4241 NULL_TREE, false,
4242 GSI_CONTINUE_LINKING);
4243 stmt = gimple_build_assign (fd->loops[i].v, t);
4244 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
4245 if (i != 0)
4246 {
4247 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
4248 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
4249 false, GSI_CONTINUE_LINKING);
4250 stmt = gimple_build_assign (tem, t);
4251 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
4252 }
4253 }
4254}
4255
4256
4257/* Helper function for expand_omp_for_*. Generate code like:
4258 L10:
4259 V3 += STEP3;
4260 if (V3 cond3 N32) goto BODY_BB; else goto L11;
4261 L11:
4262 V3 = N31;
4263 V2 += STEP2;
4264 if (V2 cond2 N22) goto BODY_BB; else goto L12;
4265 L12:
4266 V2 = N21;
4267 V1 += STEP1;
4268 goto BODY_BB; */
4269
4270static basic_block
4271extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
4272 basic_block body_bb)
4273{
4274 basic_block last_bb, bb, collapse_bb = NULL;
4275 int i;
4276 gimple_stmt_iterator gsi;
4277 edge e;
4278 tree t;
4279 gimple stmt;
4280
4281 last_bb = cont_bb;
4282 for (i = fd->collapse - 1; i >= 0; i--)
4283 {
4284 tree vtype = TREE_TYPE (fd->loops[i].v);
4285
4286 bb = create_empty_bb (last_bb);
4287 if (current_loops)
4288 add_bb_to_loop (bb, last_bb->loop_father);
4289 gsi = gsi_start_bb (bb);
4290
4291 if (i < fd->collapse - 1)
4292 {
4293 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
4294 e->probability = REG_BR_PROB_BASE / 8;
4295
4296 t = fd->loops[i + 1].n1;
4297 t = force_gimple_operand_gsi (&gsi, t,
4298 DECL_P (fd->loops[i + 1].v)
4299 && TREE_ADDRESSABLE (fd->loops[i
4300 + 1].v),
4301 NULL_TREE, false,
4302 GSI_CONTINUE_LINKING);
4303 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4304 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4305 }
4306 else
4307 collapse_bb = bb;
4308
4309 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4310
4311 if (POINTER_TYPE_P (vtype))
4312 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
4313 else
4314 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
4315 t = force_gimple_operand_gsi (&gsi, t,
4316 DECL_P (fd->loops[i].v)
4317 && TREE_ADDRESSABLE (fd->loops[i].v),
4318 NULL_TREE, false, GSI_CONTINUE_LINKING);
4319 stmt = gimple_build_assign (fd->loops[i].v, t);
4320 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4321
4322 if (i > 0)
4323 {
4324 t = fd->loops[i].n2;
4325 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4326 false, GSI_CONTINUE_LINKING);
4327 tree v = fd->loops[i].v;
4328 if (DECL_P (v) && TREE_ADDRESSABLE (v))
4329 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
4330 false, GSI_CONTINUE_LINKING);
4331 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
4332 stmt = gimple_build_cond_empty (t);
4333 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4334 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
4335 e->probability = REG_BR_PROB_BASE * 7 / 8;
4336 }
4337 else
4338 make_edge (bb, body_bb, EDGE_FALLTHRU);
4339 last_bb = bb;
4340 }
4341
4342 return collapse_bb;
4343}
4344
4345
773c5ba7 4346/* A subroutine of expand_omp_for. Generate code for a parallel
1e8e9920 4347 loop with any schedule. Given parameters:
4348
4349 for (V = N1; V cond N2; V += STEP) BODY;
4350
4351 where COND is "<" or ">", we generate pseudocode
4352
4353 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
773c5ba7 4354 if (more) goto L0; else goto L3;
1e8e9920 4355 L0:
4356 V = istart0;
4357 iend = iend0;
4358 L1:
4359 BODY;
4360 V += STEP;
773c5ba7 4361 if (V cond iend) goto L1; else goto L2;
1e8e9920 4362 L2:
773c5ba7 4363 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
4364 L3:
1e8e9920 4365
773c5ba7 4366 If this is a combined omp parallel loop, instead of the call to
fd6481cf 4367 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
4368
4369 For collapsed loops, given parameters:
4370 collapse(3)
4371 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
4372 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
4373 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
4374 BODY;
4375
4376 we generate pseudocode
4377
8e6b4515 4378 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
fd6481cf 4379 if (cond3 is <)
4380 adj = STEP3 - 1;
4381 else
4382 adj = STEP3 + 1;
4383 count3 = (adj + N32 - N31) / STEP3;
8e6b4515 4384 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
fd6481cf 4385 if (cond2 is <)
4386 adj = STEP2 - 1;
4387 else
4388 adj = STEP2 + 1;
4389 count2 = (adj + N22 - N21) / STEP2;
8e6b4515 4390 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
fd6481cf 4391 if (cond1 is <)
4392 adj = STEP1 - 1;
4393 else
4394 adj = STEP1 + 1;
4395 count1 = (adj + N12 - N11) / STEP1;
4396 count = count1 * count2 * count3;
8e6b4515 4397 goto Z1;
4398 Z0:
4399 count = 0;
4400 Z1:
fd6481cf 4401 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
4402 if (more) goto L0; else goto L3;
4403 L0:
4404 V = istart0;
4405 T = V;
4406 V3 = N31 + (T % count3) * STEP3;
4407 T = T / count3;
4408 V2 = N21 + (T % count2) * STEP2;
4409 T = T / count2;
4410 V1 = N11 + T * STEP1;
4411 iend = iend0;
4412 L1:
4413 BODY;
4414 V += 1;
4415 if (V < iend) goto L10; else goto L2;
4416 L10:
4417 V3 += STEP3;
4418 if (V3 cond3 N32) goto L1; else goto L11;
4419 L11:
4420 V3 = N31;
4421 V2 += STEP2;
4422 if (V2 cond2 N22) goto L1; else goto L12;
4423 L12:
4424 V2 = N21;
4425 V1 += STEP1;
4426 goto L1;
4427 L2:
4428 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
4429 L3:
4430
4431 */
1e8e9920 4432
61e47ac8 4433static void
773c5ba7 4434expand_omp_for_generic (struct omp_region *region,
4435 struct omp_for_data *fd,
1e8e9920 4436 enum built_in_function start_fn,
4437 enum built_in_function next_fn)
4438{
75a70cf9 4439 tree type, istart0, iend0, iend;
fd6481cf 4440 tree t, vmain, vback, bias = NULL_TREE;
4441 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
03ed154b 4442 basic_block l2_bb = NULL, l3_bb = NULL;
75a70cf9 4443 gimple_stmt_iterator gsi;
4444 gimple stmt;
773c5ba7 4445 bool in_combined_parallel = is_combined_parallel (region);
ac6e3339 4446 bool broken_loop = region->cont == NULL;
79acaae1 4447 edge e, ne;
fd6481cf 4448 tree *counts = NULL;
4449 int i;
ac6e3339 4450
4451 gcc_assert (!broken_loop || !in_combined_parallel);
fd6481cf 4452 gcc_assert (fd->iter_type == long_integer_type_node
4453 || !in_combined_parallel);
1e8e9920 4454
fd6481cf 4455 type = TREE_TYPE (fd->loop.v);
4456 istart0 = create_tmp_var (fd->iter_type, ".istart0");
4457 iend0 = create_tmp_var (fd->iter_type, ".iend0");
6d63fc03 4458 TREE_ADDRESSABLE (istart0) = 1;
4459 TREE_ADDRESSABLE (iend0) = 1;
1e8e9920 4460
fd6481cf 4461 /* See if we need to bias by LLONG_MIN. */
4462 if (fd->iter_type == long_long_unsigned_type_node
4463 && TREE_CODE (type) == INTEGER_TYPE
4464 && !TYPE_UNSIGNED (type))
4465 {
4466 tree n1, n2;
4467
4468 if (fd->loop.cond_code == LT_EXPR)
4469 {
4470 n1 = fd->loop.n1;
4471 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
4472 }
4473 else
4474 {
4475 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
4476 n2 = fd->loop.n1;
4477 }
4478 if (TREE_CODE (n1) != INTEGER_CST
4479 || TREE_CODE (n2) != INTEGER_CST
4480 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
4481 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
4482 }
4483
61e47ac8 4484 entry_bb = region->entry;
03ed154b 4485 cont_bb = region->cont;
fd6481cf 4486 collapse_bb = NULL;
ac6e3339 4487 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4488 gcc_assert (broken_loop
4489 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4490 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4491 l1_bb = single_succ (l0_bb);
4492 if (!broken_loop)
03ed154b 4493 {
4494 l2_bb = create_empty_bb (cont_bb);
ac6e3339 4495 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
4496 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
03ed154b 4497 }
ac6e3339 4498 else
4499 l2_bb = NULL;
4500 l3_bb = BRANCH_EDGE (entry_bb)->dest;
4501 exit_bb = region->exit;
773c5ba7 4502
75a70cf9 4503 gsi = gsi_last_bb (entry_bb);
fd6481cf 4504
75a70cf9 4505 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
fd6481cf 4506 if (fd->collapse > 1)
4507 {
8e6b4515 4508 int first_zero_iter = -1;
3d483a94 4509 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
8e6b4515 4510
3d483a94 4511 counts = XALLOCAVEC (tree, fd->collapse);
4512 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
4513 zero_iter_bb, first_zero_iter,
4514 l2_dom_bb);
fd6481cf 4515
8e6b4515 4516 if (zero_iter_bb)
4517 {
4518 /* Some counts[i] vars might be uninitialized if
4519 some loop has zero iterations. But the body shouldn't
4520 be executed in that case, so just avoid uninit warnings. */
4521 for (i = first_zero_iter; i < fd->collapse; i++)
4522 if (SSA_VAR_P (counts[i]))
4523 TREE_NO_WARNING (counts[i]) = 1;
4524 gsi_prev (&gsi);
4525 e = split_block (entry_bb, gsi_stmt (gsi));
4526 entry_bb = e->dest;
4527 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
4528 gsi = gsi_last_bb (entry_bb);
4529 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
4530 get_immediate_dominator (CDI_DOMINATORS,
4531 zero_iter_bb));
4532 }
fd6481cf 4533 }
79acaae1 4534 if (in_combined_parallel)
4535 {
4536 /* In a combined parallel loop, emit a call to
4537 GOMP_loop_foo_next. */
b9a16870 4538 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
79acaae1 4539 build_fold_addr_expr (istart0),
4540 build_fold_addr_expr (iend0));
4541 }
4542 else
1e8e9920 4543 {
c2f47e15 4544 tree t0, t1, t2, t3, t4;
773c5ba7 4545 /* If this is not a combined parallel loop, emit a call to
4546 GOMP_loop_foo_start in ENTRY_BB. */
c2f47e15 4547 t4 = build_fold_addr_expr (iend0);
4548 t3 = build_fold_addr_expr (istart0);
fd6481cf 4549 t2 = fold_convert (fd->iter_type, fd->loop.step);
3d483a94 4550 t1 = fd->loop.n2;
4551 t0 = fd->loop.n1;
4552 if (POINTER_TYPE_P (TREE_TYPE (t0))
4553 && TYPE_PRECISION (TREE_TYPE (t0))
4554 != TYPE_PRECISION (fd->iter_type))
c799f233 4555 {
4556 /* Avoid casting pointers to integer of a different size. */
3cea8318 4557 tree itype = signed_type_for (type);
3d483a94 4558 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
4559 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
c799f233 4560 }
4561 else
4562 {
3d483a94 4563 t1 = fold_convert (fd->iter_type, t1);
4564 t0 = fold_convert (fd->iter_type, t0);
c799f233 4565 }
fd6481cf 4566 if (bias)
1e8e9920 4567 {
fd6481cf 4568 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
4569 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
4570 }
4571 if (fd->iter_type == long_integer_type_node)
4572 {
4573 if (fd->chunk_size)
4574 {
4575 t = fold_convert (fd->iter_type, fd->chunk_size);
b9a16870 4576 t = build_call_expr (builtin_decl_explicit (start_fn),
4577 6, t0, t1, t2, t, t3, t4);
fd6481cf 4578 }
4579 else
b9a16870 4580 t = build_call_expr (builtin_decl_explicit (start_fn),
4581 5, t0, t1, t2, t3, t4);
1e8e9920 4582 }
c2f47e15 4583 else
fd6481cf 4584 {
4585 tree t5;
4586 tree c_bool_type;
b9a16870 4587 tree bfn_decl;
fd6481cf 4588
4589 /* The GOMP_loop_ull_*start functions have additional boolean
4590 argument, true for < loops and false for > loops.
4591 In Fortran, the C bool type can be different from
4592 boolean_type_node. */
b9a16870 4593 bfn_decl = builtin_decl_explicit (start_fn);
4594 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
fd6481cf 4595 t5 = build_int_cst (c_bool_type,
4596 fd->loop.cond_code == LT_EXPR ? 1 : 0);
4597 if (fd->chunk_size)
4598 {
b9a16870 4599 tree bfn_decl = builtin_decl_explicit (start_fn);
fd6481cf 4600 t = fold_convert (fd->iter_type, fd->chunk_size);
b9a16870 4601 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
fd6481cf 4602 }
4603 else
b9a16870 4604 t = build_call_expr (builtin_decl_explicit (start_fn),
4605 6, t5, t0, t1, t2, t3, t4);
fd6481cf 4606 }
1e8e9920 4607 }
fd6481cf 4608 if (TREE_TYPE (t) != boolean_type_node)
4609 t = fold_build2 (NE_EXPR, boolean_type_node,
4610 t, build_int_cst (TREE_TYPE (t), 0));
75a70cf9 4611 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4612 true, GSI_SAME_STMT);
4613 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
79acaae1 4614
75a70cf9 4615 /* Remove the GIMPLE_OMP_FOR statement. */
4616 gsi_remove (&gsi, true);
1e8e9920 4617
773c5ba7 4618 /* Iteration setup for sequential loop goes in L0_BB. */
3d483a94 4619 tree startvar = fd->loop.v;
4620 tree endvar = NULL_TREE;
4621
75a70cf9 4622 gsi = gsi_start_bb (l0_bb);
1efcacec 4623 t = istart0;
fd6481cf 4624 if (bias)
1efcacec 4625 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3d483a94 4626 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
4627 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
4628 t = fold_convert (TREE_TYPE (startvar), t);
4abecb72 4629 t = force_gimple_operand_gsi (&gsi, t,
3d483a94 4630 DECL_P (startvar)
4631 && TREE_ADDRESSABLE (startvar),
4abecb72 4632 NULL_TREE, false, GSI_CONTINUE_LINKING);
3d483a94 4633 stmt = gimple_build_assign (startvar, t);
75a70cf9 4634 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1e8e9920 4635
1efcacec 4636 t = iend0;
fd6481cf 4637 if (bias)
1efcacec 4638 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3d483a94 4639 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
4640 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
4641 t = fold_convert (TREE_TYPE (startvar), t);
75a70cf9 4642 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4643 false, GSI_CONTINUE_LINKING);
3d483a94 4644 if (endvar)
fd6481cf 4645 {
3d483a94 4646 stmt = gimple_build_assign (endvar, iend);
75a70cf9 4647 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 4648 }
3d483a94 4649 if (fd->collapse > 1)
4650 expand_omp_for_init_vars (fd, &gsi, counts, startvar);
773c5ba7 4651
ac6e3339 4652 if (!broken_loop)
03ed154b 4653 {
ac6e3339 4654 /* Code to control the increment and predicate for the sequential
4655 loop goes in the CONT_BB. */
75a70cf9 4656 gsi = gsi_last_bb (cont_bb);
4657 stmt = gsi_stmt (gsi);
4658 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4659 vmain = gimple_omp_continue_control_use (stmt);
4660 vback = gimple_omp_continue_control_def (stmt);
79acaae1 4661
3d483a94 4662 /* OMP4 placeholder: if (!gimple_omp_for_combined_p (fd->for_stmt)). */
4663 if (1)
4664 {
4665 if (POINTER_TYPE_P (type))
4666 t = fold_build_pointer_plus (vmain, fd->loop.step);
4667 else
4668 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4669 t = force_gimple_operand_gsi (&gsi, t,
4670 DECL_P (vback)
4671 && TREE_ADDRESSABLE (vback),
4672 NULL_TREE, true, GSI_SAME_STMT);
4673 stmt = gimple_build_assign (vback, t);
4674 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4675
4676 t = build2 (fd->loop.cond_code, boolean_type_node,
4677 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
4678 iend);
4679 stmt = gimple_build_cond_empty (t);
4680 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4681 }
773c5ba7 4682
75a70cf9 4683 /* Remove GIMPLE_OMP_CONTINUE. */
4684 gsi_remove (&gsi, true);
773c5ba7 4685
fd6481cf 4686 if (fd->collapse > 1)
3d483a94 4687 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
fd6481cf 4688
ac6e3339 4689 /* Emit code to get the next parallel iteration in L2_BB. */
75a70cf9 4690 gsi = gsi_start_bb (l2_bb);
773c5ba7 4691
b9a16870 4692 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
ac6e3339 4693 build_fold_addr_expr (istart0),
4694 build_fold_addr_expr (iend0));
75a70cf9 4695 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4696 false, GSI_CONTINUE_LINKING);
fd6481cf 4697 if (TREE_TYPE (t) != boolean_type_node)
4698 t = fold_build2 (NE_EXPR, boolean_type_node,
4699 t, build_int_cst (TREE_TYPE (t), 0));
75a70cf9 4700 stmt = gimple_build_cond_empty (t);
4701 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
ac6e3339 4702 }
1e8e9920 4703
61e47ac8 4704 /* Add the loop cleanup function. */
75a70cf9 4705 gsi = gsi_last_bb (exit_bb);
4706 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
b9a16870 4707 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
61e47ac8 4708 else
b9a16870 4709 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
75a70cf9 4710 stmt = gimple_build_call (t, 0);
4711 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4712 gsi_remove (&gsi, true);
773c5ba7 4713
4714 /* Connect the new blocks. */
79acaae1 4715 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4716 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
1e8e9920 4717
ac6e3339 4718 if (!broken_loop)
4719 {
75a70cf9 4720 gimple_seq phis;
4721
79acaae1 4722 e = find_edge (cont_bb, l3_bb);
4723 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4724
75a70cf9 4725 phis = phi_nodes (l3_bb);
4726 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4727 {
4728 gimple phi = gsi_stmt (gsi);
4729 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4730 PHI_ARG_DEF_FROM_EDGE (phi, e));
4731 }
79acaae1 4732 remove_edge (e);
4733
ac6e3339 4734 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
f6568ea4 4735 if (current_loops)
4736 add_bb_to_loop (l2_bb, cont_bb->loop_father);
3d483a94 4737 e = find_edge (cont_bb, l1_bb);
4738 /* OMP4 placeholder for gimple_omp_for_combined_p (fd->for_stmt). */
4739 if (0)
4740 ;
4741 else if (fd->collapse > 1)
fd6481cf 4742 {
fd6481cf 4743 remove_edge (e);
4744 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4745 }
4746 else
3d483a94 4747 e->flags = EDGE_TRUE_VALUE;
4748 if (e)
fd6481cf 4749 {
3d483a94 4750 e->probability = REG_BR_PROB_BASE * 7 / 8;
4751 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4752 }
4753 else
4754 {
4755 e = find_edge (cont_bb, l2_bb);
4756 e->flags = EDGE_FALLTHRU;
fd6481cf 4757 }
ac6e3339 4758 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
79acaae1 4759
4760 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4761 recompute_dominator (CDI_DOMINATORS, l2_bb));
4762 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4763 recompute_dominator (CDI_DOMINATORS, l3_bb));
4764 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4765 recompute_dominator (CDI_DOMINATORS, l0_bb));
4766 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4767 recompute_dominator (CDI_DOMINATORS, l1_bb));
04c2922b 4768
4769 struct loop *outer_loop = alloc_loop ();
4770 outer_loop->header = l0_bb;
4771 outer_loop->latch = l2_bb;
4772 add_loop (outer_loop, l0_bb->loop_father);
4773
3d483a94 4774 /* OMP4 placeholder: if (!gimple_omp_for_combined_p (fd->for_stmt)). */
4775 if (1)
4776 {
4777 struct loop *loop = alloc_loop ();
4778 loop->header = l1_bb;
4779 /* The loop may have multiple latches. */
4780 add_loop (loop, outer_loop);
4781 }
ac6e3339 4782 }
1e8e9920 4783}
4784
4785
773c5ba7 4786/* A subroutine of expand_omp_for. Generate code for a parallel
4787 loop with static schedule and no specified chunk size. Given
4788 parameters:
1e8e9920 4789
4790 for (V = N1; V cond N2; V += STEP) BODY;
4791
4792 where COND is "<" or ">", we generate pseudocode
4793
8e6b4515 4794 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
1e8e9920 4795 if (cond is <)
4796 adj = STEP - 1;
4797 else
4798 adj = STEP + 1;
fd6481cf 4799 if ((__typeof (V)) -1 > 0 && cond is >)
4800 n = -(adj + N2 - N1) / -STEP;
4801 else
4802 n = (adj + N2 - N1) / STEP;
1e8e9920 4803 q = n / nthreads;
31712e83 4804 tt = n % nthreads;
4805 if (threadid < tt) goto L3; else goto L4;
4806 L3:
4807 tt = 0;
4808 q = q + 1;
4809 L4:
4810 s0 = q * threadid + tt;
4811 e0 = s0 + q;
79acaae1 4812 V = s0 * STEP + N1;
1e8e9920 4813 if (s0 >= e0) goto L2; else goto L0;
4814 L0:
1e8e9920 4815 e = e0 * STEP + N1;
4816 L1:
4817 BODY;
4818 V += STEP;
4819 if (V cond e) goto L1;
1e8e9920 4820 L2:
4821*/
4822
61e47ac8 4823static void
773c5ba7 4824expand_omp_for_static_nochunk (struct omp_region *region,
4825 struct omp_for_data *fd)
1e8e9920 4826{
31712e83 4827 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
fd6481cf 4828 tree type, itype, vmain, vback;
31712e83 4829 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4830 basic_block body_bb, cont_bb;
61e47ac8 4831 basic_block fin_bb;
75a70cf9 4832 gimple_stmt_iterator gsi;
4833 gimple stmt;
31712e83 4834 edge ep;
1e8e9920 4835
fd6481cf 4836 itype = type = TREE_TYPE (fd->loop.v);
4837 if (POINTER_TYPE_P (type))
3cea8318 4838 itype = signed_type_for (type);
1e8e9920 4839
61e47ac8 4840 entry_bb = region->entry;
61e47ac8 4841 cont_bb = region->cont;
ac6e3339 4842 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4843 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4844 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4845 body_bb = single_succ (seq_start_bb);
4846 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4847 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4848 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
61e47ac8 4849 exit_bb = region->exit;
4850
773c5ba7 4851 /* Iteration space partitioning goes in ENTRY_BB. */
75a70cf9 4852 gsi = gsi_last_bb (entry_bb);
4853 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
61e47ac8 4854
8e6b4515 4855 t = fold_binary (fd->loop.cond_code, boolean_type_node,
4856 fold_convert (type, fd->loop.n1),
4857 fold_convert (type, fd->loop.n2));
4858 if (TYPE_UNSIGNED (type)
4859 && (t == NULL_TREE || !integer_onep (t)))
4860 {
4861 tree n1, n2;
4862 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
4863 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
4864 true, GSI_SAME_STMT);
4865 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
4866 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
4867 true, GSI_SAME_STMT);
4868 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
4869 NULL_TREE, NULL_TREE);
4870 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4871 if (walk_tree (gimple_cond_lhs_ptr (stmt),
4872 expand_omp_regimplify_p, NULL, NULL)
4873 || walk_tree (gimple_cond_rhs_ptr (stmt),
4874 expand_omp_regimplify_p, NULL, NULL))
4875 {
4876 gsi = gsi_for_stmt (stmt);
4877 gimple_regimplify_operands (stmt, &gsi);
4878 }
4879 ep = split_block (entry_bb, stmt);
4880 ep->flags = EDGE_TRUE_VALUE;
4881 entry_bb = ep->dest;
4882 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
4883 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
4884 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
4885 if (gimple_in_ssa_p (cfun))
4886 {
4887 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
4888 for (gsi = gsi_start_phis (fin_bb);
4889 !gsi_end_p (gsi); gsi_next (&gsi))
4890 {
4891 gimple phi = gsi_stmt (gsi);
4892 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
4893 ep, UNKNOWN_LOCATION);
4894 }
4895 }
4896 gsi = gsi_last_bb (entry_bb);
4897 }
4898
b9a16870 4899 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
fd6481cf 4900 t = fold_convert (itype, t);
75a70cf9 4901 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4902 true, GSI_SAME_STMT);
48e1416a 4903
b9a16870 4904 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
fd6481cf 4905 t = fold_convert (itype, t);
75a70cf9 4906 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4907 true, GSI_SAME_STMT);
1e8e9920 4908
fd6481cf 4909 fd->loop.n1
75a70cf9 4910 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4911 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4912 fd->loop.n2
75a70cf9 4913 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4914 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4915 fd->loop.step
75a70cf9 4916 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4917 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4918
4919 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4920 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4921 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4922 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4923 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4924 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4925 fold_build1 (NEGATE_EXPR, itype, t),
4926 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4927 else
4928 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4929 t = fold_convert (itype, t);
75a70cf9 4930 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 4931
072f7ab1 4932 q = create_tmp_reg (itype, "q");
fd6481cf 4933 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
31712e83 4934 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4935 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4936
072f7ab1 4937 tt = create_tmp_reg (itype, "tt");
31712e83 4938 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4939 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4940 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
1e8e9920 4941
31712e83 4942 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4943 stmt = gimple_build_cond_empty (t);
4944 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4945
4946 second_bb = split_block (entry_bb, stmt)->dest;
4947 gsi = gsi_last_bb (second_bb);
4948 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4949
4950 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4951 GSI_SAME_STMT);
4952 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4953 build_int_cst (itype, 1));
4954 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4955
4956 third_bb = split_block (second_bb, stmt)->dest;
4957 gsi = gsi_last_bb (third_bb);
4958 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
1e8e9920 4959
fd6481cf 4960 t = build2 (MULT_EXPR, itype, q, threadid);
31712e83 4961 t = build2 (PLUS_EXPR, itype, t, tt);
75a70cf9 4962 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 4963
fd6481cf 4964 t = fold_build2 (PLUS_EXPR, itype, s0, q);
75a70cf9 4965 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
79acaae1 4966
1e8e9920 4967 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
75a70cf9 4968 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
773c5ba7 4969
75a70cf9 4970 /* Remove the GIMPLE_OMP_FOR statement. */
4971 gsi_remove (&gsi, true);
773c5ba7 4972
4973 /* Setup code for sequential iteration goes in SEQ_START_BB. */
75a70cf9 4974 gsi = gsi_start_bb (seq_start_bb);
1e8e9920 4975
fd6481cf 4976 t = fold_convert (itype, s0);
4977 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4978 if (POINTER_TYPE_P (type))
2cc66f2a 4979 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4980 else
4981 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4abecb72 4982 t = force_gimple_operand_gsi (&gsi, t,
4983 DECL_P (fd->loop.v)
4984 && TREE_ADDRESSABLE (fd->loop.v),
4985 NULL_TREE, false, GSI_CONTINUE_LINKING);
75a70cf9 4986 stmt = gimple_build_assign (fd->loop.v, t);
4987 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
48e1416a 4988
fd6481cf 4989 t = fold_convert (itype, e0);
4990 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4991 if (POINTER_TYPE_P (type))
2cc66f2a 4992 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4993 else
4994 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4995 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4996 false, GSI_CONTINUE_LINKING);
1e8e9920 4997
75a70cf9 4998 /* The code controlling the sequential loop replaces the
4999 GIMPLE_OMP_CONTINUE. */
5000 gsi = gsi_last_bb (cont_bb);
5001 stmt = gsi_stmt (gsi);
5002 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5003 vmain = gimple_omp_continue_control_use (stmt);
5004 vback = gimple_omp_continue_control_def (stmt);
79acaae1 5005
fd6481cf 5006 if (POINTER_TYPE_P (type))
2cc66f2a 5007 t = fold_build_pointer_plus (vmain, fd->loop.step);
fd6481cf 5008 else
5009 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4abecb72 5010 t = force_gimple_operand_gsi (&gsi, t,
5011 DECL_P (vback) && TREE_ADDRESSABLE (vback),
5012 NULL_TREE, true, GSI_SAME_STMT);
75a70cf9 5013 stmt = gimple_build_assign (vback, t);
5014 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
79acaae1 5015
4abecb72 5016 t = build2 (fd->loop.cond_code, boolean_type_node,
5017 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e);
75a70cf9 5018 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
1e8e9920 5019
75a70cf9 5020 /* Remove the GIMPLE_OMP_CONTINUE statement. */
5021 gsi_remove (&gsi, true);
773c5ba7 5022
75a70cf9 5023 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
5024 gsi = gsi_last_bb (exit_bb);
5025 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
5026 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
5027 false, GSI_SAME_STMT);
5028 gsi_remove (&gsi, true);
773c5ba7 5029
5030 /* Connect all the blocks. */
31712e83 5031 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
5032 ep->probability = REG_BR_PROB_BASE / 4 * 3;
5033 ep = find_edge (entry_bb, second_bb);
5034 ep->flags = EDGE_TRUE_VALUE;
5035 ep->probability = REG_BR_PROB_BASE / 4;
5036 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
5037 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
79acaae1 5038
ac6e3339 5039 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
61e47ac8 5040 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
48e1416a 5041
31712e83 5042 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
5043 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
5044 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
79acaae1 5045 set_immediate_dominator (CDI_DOMINATORS, body_bb,
5046 recompute_dominator (CDI_DOMINATORS, body_bb));
5047 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
5048 recompute_dominator (CDI_DOMINATORS, fin_bb));
04c2922b 5049
5050 struct loop *loop = alloc_loop ();
5051 loop->header = body_bb;
5052 loop->latch = cont_bb;
5053 add_loop (loop, body_bb->loop_father);
1e8e9920 5054}
5055
773c5ba7 5056
5057/* A subroutine of expand_omp_for. Generate code for a parallel
5058 loop with static schedule and a specified chunk size. Given
5059 parameters:
1e8e9920 5060
5061 for (V = N1; V cond N2; V += STEP) BODY;
5062
5063 where COND is "<" or ">", we generate pseudocode
5064
8e6b4515 5065 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
1e8e9920 5066 if (cond is <)
5067 adj = STEP - 1;
5068 else
5069 adj = STEP + 1;
fd6481cf 5070 if ((__typeof (V)) -1 > 0 && cond is >)
5071 n = -(adj + N2 - N1) / -STEP;
5072 else
5073 n = (adj + N2 - N1) / STEP;
1e8e9920 5074 trip = 0;
79acaae1 5075 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
5076 here so that V is defined
5077 if the loop is not entered
1e8e9920 5078 L0:
5079 s0 = (trip * nthreads + threadid) * CHUNK;
5080 e0 = min(s0 + CHUNK, n);
5081 if (s0 < n) goto L1; else goto L4;
5082 L1:
5083 V = s0 * STEP + N1;
5084 e = e0 * STEP + N1;
5085 L2:
5086 BODY;
5087 V += STEP;
5088 if (V cond e) goto L2; else goto L3;
5089 L3:
5090 trip += 1;
5091 goto L0;
5092 L4:
1e8e9920 5093*/
5094
61e47ac8 5095static void
75a70cf9 5096expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
1e8e9920 5097{
75a70cf9 5098 tree n, s0, e0, e, t;
79acaae1 5099 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
75a70cf9 5100 tree type, itype, v_main, v_back, v_extra;
773c5ba7 5101 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
61e47ac8 5102 basic_block trip_update_bb, cont_bb, fin_bb;
75a70cf9 5103 gimple_stmt_iterator si;
5104 gimple stmt;
5105 edge se;
1e8e9920 5106
fd6481cf 5107 itype = type = TREE_TYPE (fd->loop.v);
5108 if (POINTER_TYPE_P (type))
3cea8318 5109 itype = signed_type_for (type);
1e8e9920 5110
61e47ac8 5111 entry_bb = region->entry;
ac6e3339 5112 se = split_block (entry_bb, last_stmt (entry_bb));
5113 entry_bb = se->src;
5114 iter_part_bb = se->dest;
61e47ac8 5115 cont_bb = region->cont;
ac6e3339 5116 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
5117 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
5118 == FALLTHRU_EDGE (cont_bb)->dest);
5119 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
5120 body_bb = single_succ (seq_start_bb);
5121 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
5122 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5123 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
5124 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
61e47ac8 5125 exit_bb = region->exit;
773c5ba7 5126
773c5ba7 5127 /* Trip and adjustment setup goes in ENTRY_BB. */
75a70cf9 5128 si = gsi_last_bb (entry_bb);
5129 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
773c5ba7 5130
8e6b4515 5131 t = fold_binary (fd->loop.cond_code, boolean_type_node,
5132 fold_convert (type, fd->loop.n1),
5133 fold_convert (type, fd->loop.n2));
5134 if (TYPE_UNSIGNED (type)
5135 && (t == NULL_TREE || !integer_onep (t)))
5136 {
5137 tree n1, n2;
5138 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
5139 n1 = force_gimple_operand_gsi (&si, n1, true, NULL_TREE,
5140 true, GSI_SAME_STMT);
5141 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
5142 n2 = force_gimple_operand_gsi (&si, n2, true, NULL_TREE,
5143 true, GSI_SAME_STMT);
5144 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
5145 NULL_TREE, NULL_TREE);
5146 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5147 if (walk_tree (gimple_cond_lhs_ptr (stmt),
5148 expand_omp_regimplify_p, NULL, NULL)
5149 || walk_tree (gimple_cond_rhs_ptr (stmt),
5150 expand_omp_regimplify_p, NULL, NULL))
5151 {
5152 si = gsi_for_stmt (stmt);
5153 gimple_regimplify_operands (stmt, &si);
5154 }
5155 se = split_block (entry_bb, stmt);
5156 se->flags = EDGE_TRUE_VALUE;
5157 entry_bb = se->dest;
5158 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
5159 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
5160 se->probability = REG_BR_PROB_BASE / 2000 - 1;
5161 if (gimple_in_ssa_p (cfun))
5162 {
5163 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
5164 for (si = gsi_start_phis (fin_bb);
5165 !gsi_end_p (si); gsi_next (&si))
5166 {
5167 gimple phi = gsi_stmt (si);
5168 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
5169 se, UNKNOWN_LOCATION);
5170 }
5171 }
5172 si = gsi_last_bb (entry_bb);
5173 }
5174
b9a16870 5175 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
fd6481cf 5176 t = fold_convert (itype, t);
75a70cf9 5177 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5178 true, GSI_SAME_STMT);
48e1416a 5179
b9a16870 5180 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
fd6481cf 5181 t = fold_convert (itype, t);
75a70cf9 5182 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5183 true, GSI_SAME_STMT);
79acaae1 5184
fd6481cf 5185 fd->loop.n1
75a70cf9 5186 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
5187 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 5188 fd->loop.n2
75a70cf9 5189 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
5190 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 5191 fd->loop.step
75a70cf9 5192 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
5193 true, NULL_TREE, true, GSI_SAME_STMT);
79acaae1 5194 fd->chunk_size
75a70cf9 5195 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
5196 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 5197
5198 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
5199 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
5200 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
5201 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
5202 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
5203 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5204 fold_build1 (NEGATE_EXPR, itype, t),
5205 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
5206 else
5207 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
5208 t = fold_convert (itype, t);
75a70cf9 5209 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5210 true, GSI_SAME_STMT);
79acaae1 5211
083152fb 5212 trip_var = create_tmp_reg (itype, ".trip");
79acaae1 5213 if (gimple_in_ssa_p (cfun))
5214 {
75a70cf9 5215 trip_init = make_ssa_name (trip_var, NULL);
5216 trip_main = make_ssa_name (trip_var, NULL);
5217 trip_back = make_ssa_name (trip_var, NULL);
79acaae1 5218 }
1e8e9920 5219 else
79acaae1 5220 {
5221 trip_init = trip_var;
5222 trip_main = trip_var;
5223 trip_back = trip_var;
5224 }
1e8e9920 5225
75a70cf9 5226 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
5227 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
773c5ba7 5228
fd6481cf 5229 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
5230 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
5231 if (POINTER_TYPE_P (type))
2cc66f2a 5232 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 5233 else
5234 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 5235 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5236 true, GSI_SAME_STMT);
79acaae1 5237
75a70cf9 5238 /* Remove the GIMPLE_OMP_FOR. */
5239 gsi_remove (&si, true);
773c5ba7 5240
5241 /* Iteration space partitioning goes in ITER_PART_BB. */
75a70cf9 5242 si = gsi_last_bb (iter_part_bb);
1e8e9920 5243
fd6481cf 5244 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
5245 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
5246 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
75a70cf9 5247 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5248 false, GSI_CONTINUE_LINKING);
1e8e9920 5249
fd6481cf 5250 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
5251 t = fold_build2 (MIN_EXPR, itype, t, n);
75a70cf9 5252 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5253 false, GSI_CONTINUE_LINKING);
1e8e9920 5254
5255 t = build2 (LT_EXPR, boolean_type_node, s0, n);
75a70cf9 5256 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
773c5ba7 5257
5258 /* Setup code for sequential iteration goes in SEQ_START_BB. */
75a70cf9 5259 si = gsi_start_bb (seq_start_bb);
1e8e9920 5260
fd6481cf 5261 t = fold_convert (itype, s0);
5262 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
5263 if (POINTER_TYPE_P (type))
2cc66f2a 5264 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 5265 else
5266 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4abecb72 5267 t = force_gimple_operand_gsi (&si, t,
5268 DECL_P (fd->loop.v)
5269 && TREE_ADDRESSABLE (fd->loop.v),
5270 NULL_TREE, false, GSI_CONTINUE_LINKING);
75a70cf9 5271 stmt = gimple_build_assign (fd->loop.v, t);
5272 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
1e8e9920 5273
fd6481cf 5274 t = fold_convert (itype, e0);
5275 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
5276 if (POINTER_TYPE_P (type))
2cc66f2a 5277 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 5278 else
5279 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 5280 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5281 false, GSI_CONTINUE_LINKING);
1e8e9920 5282
61e47ac8 5283 /* The code controlling the sequential loop goes in CONT_BB,
75a70cf9 5284 replacing the GIMPLE_OMP_CONTINUE. */
5285 si = gsi_last_bb (cont_bb);
5286 stmt = gsi_stmt (si);
5287 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5288 v_main = gimple_omp_continue_control_use (stmt);
5289 v_back = gimple_omp_continue_control_def (stmt);
79acaae1 5290
fd6481cf 5291 if (POINTER_TYPE_P (type))
2cc66f2a 5292 t = fold_build_pointer_plus (v_main, fd->loop.step);
fd6481cf 5293 else
75a70cf9 5294 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4abecb72 5295 if (DECL_P (v_back) && TREE_ADDRESSABLE (v_back))
5296 t = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5297 true, GSI_SAME_STMT);
75a70cf9 5298 stmt = gimple_build_assign (v_back, t);
5299 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
79acaae1 5300
4abecb72 5301 t = build2 (fd->loop.cond_code, boolean_type_node,
5302 DECL_P (v_back) && TREE_ADDRESSABLE (v_back)
5303 ? t : v_back, e);
75a70cf9 5304 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
48e1416a 5305
75a70cf9 5306 /* Remove GIMPLE_OMP_CONTINUE. */
5307 gsi_remove (&si, true);
773c5ba7 5308
5309 /* Trip update code goes into TRIP_UPDATE_BB. */
75a70cf9 5310 si = gsi_start_bb (trip_update_bb);
1e8e9920 5311
fd6481cf 5312 t = build_int_cst (itype, 1);
5313 t = build2 (PLUS_EXPR, itype, trip_main, t);
75a70cf9 5314 stmt = gimple_build_assign (trip_back, t);
5315 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
1e8e9920 5316
75a70cf9 5317 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
5318 si = gsi_last_bb (exit_bb);
5319 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
5320 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
5321 false, GSI_SAME_STMT);
5322 gsi_remove (&si, true);
1e8e9920 5323
773c5ba7 5324 /* Connect the new blocks. */
ac6e3339 5325 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
5326 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
79acaae1 5327
ac6e3339 5328 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
5329 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
79acaae1 5330
ac6e3339 5331 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
79acaae1 5332
5333 if (gimple_in_ssa_p (cfun))
5334 {
75a70cf9 5335 gimple_stmt_iterator psi;
5336 gimple phi;
5337 edge re, ene;
f1f41a6c 5338 edge_var_map_vector *head;
75a70cf9 5339 edge_var_map *vm;
5340 size_t i;
5341
79acaae1 5342 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
5343 remove arguments of the phi nodes in fin_bb. We need to create
5344 appropriate phi nodes in iter_part_bb instead. */
5345 se = single_pred_edge (fin_bb);
5346 re = single_succ_edge (trip_update_bb);
75a70cf9 5347 head = redirect_edge_var_map_vector (re);
79acaae1 5348 ene = single_succ_edge (entry_bb);
5349
75a70cf9 5350 psi = gsi_start_phis (fin_bb);
f1f41a6c 5351 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
75a70cf9 5352 gsi_next (&psi), ++i)
79acaae1 5353 {
75a70cf9 5354 gimple nphi;
efbcb6de 5355 source_location locus;
75a70cf9 5356
5357 phi = gsi_stmt (psi);
5358 t = gimple_phi_result (phi);
5359 gcc_assert (t == redirect_edge_var_map_result (vm));
79acaae1 5360 nphi = create_phi_node (t, iter_part_bb);
79acaae1 5361
5362 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
efbcb6de 5363 locus = gimple_phi_arg_location_from_edge (phi, se);
5364
fd6481cf 5365 /* A special case -- fd->loop.v is not yet computed in
5366 iter_part_bb, we need to use v_extra instead. */
5367 if (t == fd->loop.v)
79acaae1 5368 t = v_extra;
60d535d2 5369 add_phi_arg (nphi, t, ene, locus);
efbcb6de 5370 locus = redirect_edge_var_map_location (vm);
60d535d2 5371 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
75a70cf9 5372 }
f1f41a6c 5373 gcc_assert (!gsi_end_p (psi) && i == head->length ());
75a70cf9 5374 redirect_edge_var_map_clear (re);
5375 while (1)
5376 {
5377 psi = gsi_start_phis (fin_bb);
5378 if (gsi_end_p (psi))
5379 break;
5380 remove_phi_node (&psi, false);
79acaae1 5381 }
79acaae1 5382
5383 /* Make phi node for trip. */
5384 phi = create_phi_node (trip_main, iter_part_bb);
efbcb6de 5385 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
60d535d2 5386 UNKNOWN_LOCATION);
efbcb6de 5387 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
60d535d2 5388 UNKNOWN_LOCATION);
79acaae1 5389 }
5390
5391 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
5392 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
5393 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
5394 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
5395 recompute_dominator (CDI_DOMINATORS, fin_bb));
5396 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
5397 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
5398 set_immediate_dominator (CDI_DOMINATORS, body_bb,
5399 recompute_dominator (CDI_DOMINATORS, body_bb));
04c2922b 5400
5401 struct loop *trip_loop = alloc_loop ();
5402 trip_loop->header = iter_part_bb;
5403 trip_loop->latch = trip_update_bb;
5404 add_loop (trip_loop, iter_part_bb->loop_father);
5405
5406 struct loop *loop = alloc_loop ();
5407 loop->header = body_bb;
5408 loop->latch = cont_bb;
5409 add_loop (loop, trip_loop);
1e8e9920 5410}
5411
3d483a94 5412/* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
5413 loop. Given parameters:
5414
5415 for (V = N1; V cond N2; V += STEP) BODY;
5416
5417 where COND is "<" or ">", we generate pseudocode
5418
5419 V = N1;
5420 goto L1;
5421 L0:
5422 BODY;
5423 V += STEP;
5424 L1:
5425 if (V cond N2) goto L0; else goto L2;
5426 L2:
5427
5428 For collapsed loops, given parameters:
5429 collapse(3)
5430 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5431 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5432 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5433 BODY;
5434
5435 we generate pseudocode
5436
5437 if (cond3 is <)
5438 adj = STEP3 - 1;
5439 else
5440 adj = STEP3 + 1;
5441 count3 = (adj + N32 - N31) / STEP3;
5442 if (cond2 is <)
5443 adj = STEP2 - 1;
5444 else
5445 adj = STEP2 + 1;
5446 count2 = (adj + N22 - N21) / STEP2;
5447 if (cond1 is <)
5448 adj = STEP1 - 1;
5449 else
5450 adj = STEP1 + 1;
5451 count1 = (adj + N12 - N11) / STEP1;
5452 count = count1 * count2 * count3;
5453 V = 0;
5454 V1 = N11;
5455 V2 = N21;
5456 V3 = N31;
5457 goto L1;
5458 L0:
5459 BODY;
5460 V += 1;
5461 V3 += STEP3;
5462 V2 += (V3 cond3 N32) ? 0 : STEP2;
5463 V3 = (V3 cond3 N32) ? V3 : N31;
5464 V1 += (V2 cond2 N22) ? 0 : STEP1;
5465 V2 = (V2 cond2 N22) ? V2 : N21;
5466 L1:
5467 if (V < count) goto L0; else goto L2;
5468 L2:
5469
5470 */
5471
5472static void
5473expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
5474{
5475 tree type, t;
5476 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
5477 gimple_stmt_iterator gsi;
5478 gimple stmt;
5479 bool broken_loop = region->cont == NULL;
5480 edge e, ne;
5481 tree *counts = NULL;
5482 int i;
5483 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5484 OMP_CLAUSE_SAFELEN);
5485 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5486 OMP_CLAUSE__SIMDUID_);
5487 tree n2;
5488
5489 type = TREE_TYPE (fd->loop.v);
5490 entry_bb = region->entry;
5491 cont_bb = region->cont;
5492 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5493 gcc_assert (broken_loop
5494 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
5495 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
5496 if (!broken_loop)
5497 {
5498 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
5499 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5500 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
5501 l2_bb = BRANCH_EDGE (entry_bb)->dest;
5502 }
5503 else
5504 {
5505 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
5506 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
5507 l2_bb = single_succ (l1_bb);
5508 }
5509 exit_bb = region->exit;
5510 l2_dom_bb = NULL;
5511
5512 gsi = gsi_last_bb (entry_bb);
5513
5514 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5515 /* Not needed in SSA form right now. */
5516 gcc_assert (!gimple_in_ssa_p (cfun));
5517 if (fd->collapse > 1)
5518 {
5519 int first_zero_iter = -1;
5520 basic_block zero_iter_bb = l2_bb;
5521
5522 counts = XALLOCAVEC (tree, fd->collapse);
5523 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5524 zero_iter_bb, first_zero_iter,
5525 l2_dom_bb);
5526 }
5527 if (l2_dom_bb == NULL)
5528 l2_dom_bb = l1_bb;
5529
5530 n2 = fd->loop.n2;
5531 if (0)
5532 /* Place holder for gimple_omp_for_combined_into_p() in
5533 the upcoming gomp-4_0-branch merge. */;
5534 else
5535 {
5536 expand_omp_build_assign (&gsi, fd->loop.v,
5537 fold_convert (type, fd->loop.n1));
5538 if (fd->collapse > 1)
5539 for (i = 0; i < fd->collapse; i++)
5540 {
5541 tree itype = TREE_TYPE (fd->loops[i].v);
5542 if (POINTER_TYPE_P (itype))
5543 itype = signed_type_for (itype);
5544 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
5545 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
5546 }
5547 }
5548
5549 /* Remove the GIMPLE_OMP_FOR statement. */
5550 gsi_remove (&gsi, true);
5551
5552 if (!broken_loop)
5553 {
5554 /* Code to control the increment goes in the CONT_BB. */
5555 gsi = gsi_last_bb (cont_bb);
5556 stmt = gsi_stmt (gsi);
5557 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5558
5559 if (POINTER_TYPE_P (type))
5560 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
5561 else
5562 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
5563 expand_omp_build_assign (&gsi, fd->loop.v, t);
5564
5565 if (fd->collapse > 1)
5566 {
5567 i = fd->collapse - 1;
5568 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
5569 {
5570 t = fold_convert (sizetype, fd->loops[i].step);
5571 t = fold_build_pointer_plus (fd->loops[i].v, t);
5572 }
5573 else
5574 {
5575 t = fold_convert (TREE_TYPE (fd->loops[i].v),
5576 fd->loops[i].step);
5577 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
5578 fd->loops[i].v, t);
5579 }
5580 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
5581
5582 for (i = fd->collapse - 1; i > 0; i--)
5583 {
5584 tree itype = TREE_TYPE (fd->loops[i].v);
5585 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
5586 if (POINTER_TYPE_P (itype2))
5587 itype2 = signed_type_for (itype2);
5588 t = build3 (COND_EXPR, itype2,
5589 build2 (fd->loops[i].cond_code, boolean_type_node,
5590 fd->loops[i].v,
5591 fold_convert (itype, fd->loops[i].n2)),
5592 build_int_cst (itype2, 0),
5593 fold_convert (itype2, fd->loops[i - 1].step));
5594 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
5595 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
5596 else
5597 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
5598 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
5599
5600 t = build3 (COND_EXPR, itype,
5601 build2 (fd->loops[i].cond_code, boolean_type_node,
5602 fd->loops[i].v,
5603 fold_convert (itype, fd->loops[i].n2)),
5604 fd->loops[i].v,
5605 fold_convert (itype, fd->loops[i].n1));
5606 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
5607 }
5608 }
5609
5610 /* Remove GIMPLE_OMP_CONTINUE. */
5611 gsi_remove (&gsi, true);
5612 }
5613
5614 /* Emit the condition in L1_BB. */
5615 gsi = gsi_start_bb (l1_bb);
5616
5617 t = fold_convert (type, n2);
5618 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5619 false, GSI_CONTINUE_LINKING);
5620 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
5621 stmt = gimple_build_cond_empty (t);
5622 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5623 if (walk_tree (gimple_cond_lhs_ptr (stmt), expand_omp_regimplify_p,
5624 NULL, NULL)
5625 || walk_tree (gimple_cond_rhs_ptr (stmt), expand_omp_regimplify_p,
5626 NULL, NULL))
5627 {
5628 gsi = gsi_for_stmt (stmt);
5629 gimple_regimplify_operands (stmt, &gsi);
5630 }
5631
5632 /* Remove GIMPLE_OMP_RETURN. */
5633 gsi = gsi_last_bb (exit_bb);
5634 gsi_remove (&gsi, true);
5635
5636 /* Connect the new blocks. */
5637 remove_edge (FALLTHRU_EDGE (entry_bb));
5638
5639 if (!broken_loop)
5640 {
5641 remove_edge (BRANCH_EDGE (entry_bb));
5642 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
5643
5644 e = BRANCH_EDGE (l1_bb);
5645 ne = FALLTHRU_EDGE (l1_bb);
5646 e->flags = EDGE_TRUE_VALUE;
5647 }
5648 else
5649 {
5650 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
5651
5652 ne = single_succ_edge (l1_bb);
5653 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
5654
5655 }
5656 ne->flags = EDGE_FALSE_VALUE;
5657 e->probability = REG_BR_PROB_BASE * 7 / 8;
5658 ne->probability = REG_BR_PROB_BASE / 8;
5659
5660 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
5661 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
5662 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
5663
5664 if (!broken_loop)
5665 {
5666 struct loop *loop = alloc_loop ();
5667 loop->header = l1_bb;
5668 loop->latch = e->dest;
5669 add_loop (loop, l1_bb->loop_father);
5670 if (safelen == NULL_TREE)
5671 loop->safelen = INT_MAX;
5672 else
5673 {
5674 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
5675 if (!host_integerp (safelen, 1)
5676 || (unsigned HOST_WIDE_INT) tree_low_cst (safelen, 1)
5677 > INT_MAX)
5678 loop->safelen = INT_MAX;
5679 else
5680 loop->safelen = tree_low_cst (safelen, 1);
5681 if (loop->safelen == 1)
5682 loop->safelen = 0;
5683 }
5684 if (simduid)
5685 {
5686 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
5687 cfun->has_simduid_loops = true;
5688 }
043115ec 5689 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
3d483a94 5690 the loop. */
043115ec 5691 if ((flag_tree_loop_vectorize
5692 || (!global_options_set.x_flag_tree_loop_vectorize
5693 && !global_options_set.x_flag_tree_vectorize))
3d483a94 5694 && loop->safelen > 1)
5695 {
5696 loop->force_vect = true;
5697 cfun->has_force_vect_loops = true;
5698 }
5699 }
5700}
5701
1e8e9920 5702
773c5ba7 5703/* Expand the OpenMP loop defined by REGION. */
1e8e9920 5704
773c5ba7 5705static void
5706expand_omp_for (struct omp_region *region)
5707{
5708 struct omp_for_data fd;
fd6481cf 5709 struct omp_for_data_loop *loops;
1e8e9920 5710
fd6481cf 5711 loops
5712 = (struct omp_for_data_loop *)
75a70cf9 5713 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
fd6481cf 5714 * sizeof (struct omp_for_data_loop));
fd6481cf 5715 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
f77459c5 5716 region->sched_kind = fd.sched_kind;
1e8e9920 5717
b3a3ddec 5718 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
5719 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
5720 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
5721 if (region->cont)
5722 {
5723 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
5724 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
5725 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
5726 }
04c2922b 5727 else
75de4aa2 5728 /* If there isn't a continue then this is a degerate case where
04c2922b 5729 the introduction of abnormal edges during lowering will prevent
5730 original loops from being detected. Fix that up. */
5731 loops_state_set (LOOPS_NEED_FIXUP);
b3a3ddec 5732
3d483a94 5733 if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_SIMD)
5734 expand_omp_simd (region, &fd);
5735 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
03ed154b 5736 && !fd.have_ordered
fd6481cf 5737 && fd.collapse == 1
ac6e3339 5738 && region->cont != NULL)
1e8e9920 5739 {
5740 if (fd.chunk_size == NULL)
61e47ac8 5741 expand_omp_for_static_nochunk (region, &fd);
1e8e9920 5742 else
61e47ac8 5743 expand_omp_for_static_chunk (region, &fd);
1e8e9920 5744 }
5745 else
5746 {
fd6481cf 5747 int fn_index, start_ix, next_ix;
5748
3d483a94 5749 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
5750 == GF_OMP_FOR_KIND_FOR);
0416ca72 5751 if (fd.chunk_size == NULL
5752 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
5753 fd.chunk_size = integer_zero_node;
fd6481cf 5754 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
5755 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
75a70cf9 5756 ? 3 : fd.sched_kind;
fd6481cf 5757 fn_index += fd.have_ordered * 4;
b9a16870 5758 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
5759 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
fd6481cf 5760 if (fd.iter_type == long_long_unsigned_type_node)
5761 {
b9a16870 5762 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
5763 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
5764 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
5765 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
fd6481cf 5766 }
b9c74b4d 5767 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
5768 (enum built_in_function) next_ix);
1e8e9920 5769 }
28c92cbb 5770
083152fb 5771 if (gimple_in_ssa_p (cfun))
5772 update_ssa (TODO_update_ssa_only_virtuals);
1e8e9920 5773}
5774
1e8e9920 5775
5776/* Expand code for an OpenMP sections directive. In pseudo code, we generate
5777
1e8e9920 5778 v = GOMP_sections_start (n);
5779 L0:
5780 switch (v)
5781 {
5782 case 0:
5783 goto L2;
5784 case 1:
5785 section 1;
5786 goto L1;
5787 case 2:
5788 ...
5789 case n:
5790 ...
1e8e9920 5791 default:
5792 abort ();
5793 }
5794 L1:
5795 v = GOMP_sections_next ();
5796 goto L0;
5797 L2:
5798 reduction;
5799
773c5ba7 5800 If this is a combined parallel sections, replace the call to
79acaae1 5801 GOMP_sections_start with call to GOMP_sections_next. */
1e8e9920 5802
5803static void
773c5ba7 5804expand_omp_sections (struct omp_region *region)
1e8e9920 5805{
f018d957 5806 tree t, u, vin = NULL, vmain, vnext, l2;
f1f41a6c 5807 vec<tree> label_vec;
75a70cf9 5808 unsigned len;
ac6e3339 5809 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
75a70cf9 5810 gimple_stmt_iterator si, switch_si;
5811 gimple sections_stmt, stmt, cont;
9884aaf8 5812 edge_iterator ei;
5813 edge e;
61e47ac8 5814 struct omp_region *inner;
75a70cf9 5815 unsigned i, casei;
ac6e3339 5816 bool exit_reachable = region->cont != NULL;
1e8e9920 5817
d244d9de 5818 gcc_assert (region->exit != NULL);
61e47ac8 5819 entry_bb = region->entry;
ac6e3339 5820 l0_bb = single_succ (entry_bb);
61e47ac8 5821 l1_bb = region->cont;
ac6e3339 5822 l2_bb = region->exit;
d244d9de 5823 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
5824 l2 = gimple_block_label (l2_bb);
5825 else
03ed154b 5826 {
d244d9de 5827 /* This can happen if there are reductions. */
5828 len = EDGE_COUNT (l0_bb->succs);
5829 gcc_assert (len > 0);
5830 e = EDGE_SUCC (l0_bb, len - 1);
5831 si = gsi_last_bb (e->dest);
5832 l2 = NULL_TREE;
5833 if (gsi_end_p (si)
5834 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
5835 l2 = gimple_block_label (e->dest);
9884aaf8 5836 else
d244d9de 5837 FOR_EACH_EDGE (e, ei, l0_bb->succs)
5838 {
5839 si = gsi_last_bb (e->dest);
5840 if (gsi_end_p (si)
5841 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
9884aaf8 5842 {
d244d9de 5843 l2 = gimple_block_label (e->dest);
5844 break;
9884aaf8 5845 }
d244d9de 5846 }
03ed154b 5847 }
d244d9de 5848 if (exit_reachable)
5849 default_bb = create_empty_bb (l1_bb->prev_bb);
03ed154b 5850 else
d244d9de 5851 default_bb = create_empty_bb (l0_bb);
773c5ba7 5852
5853 /* We will build a switch() with enough cases for all the
75a70cf9 5854 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
773c5ba7 5855 and a default case to abort if something goes wrong. */
ac6e3339 5856 len = EDGE_COUNT (l0_bb->succs);
75a70cf9 5857
f1f41a6c 5858 /* Use vec::quick_push on label_vec throughout, since we know the size
75a70cf9 5859 in advance. */
f1f41a6c 5860 label_vec.create (len);
1e8e9920 5861
61e47ac8 5862 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
75a70cf9 5863 GIMPLE_OMP_SECTIONS statement. */
5864 si = gsi_last_bb (entry_bb);
5865 sections_stmt = gsi_stmt (si);
5866 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
5867 vin = gimple_omp_sections_control (sections_stmt);
773c5ba7 5868 if (!is_combined_parallel (region))
1e8e9920 5869 {
773c5ba7 5870 /* If we are not inside a combined parallel+sections region,
5871 call GOMP_sections_start. */
ac6e3339 5872 t = build_int_cst (unsigned_type_node,
5873 exit_reachable ? len - 1 : len);
b9a16870 5874 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
75a70cf9 5875 stmt = gimple_build_call (u, 1, t);
1e8e9920 5876 }
79acaae1 5877 else
5878 {
5879 /* Otherwise, call GOMP_sections_next. */
b9a16870 5880 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
75a70cf9 5881 stmt = gimple_build_call (u, 0);
79acaae1 5882 }
75a70cf9 5883 gimple_call_set_lhs (stmt, vin);
5884 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5885 gsi_remove (&si, true);
5886
5887 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
5888 L0_BB. */
5889 switch_si = gsi_last_bb (l0_bb);
5890 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
79acaae1 5891 if (exit_reachable)
5892 {
5893 cont = last_stmt (l1_bb);
75a70cf9 5894 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
5895 vmain = gimple_omp_continue_control_use (cont);
5896 vnext = gimple_omp_continue_control_def (cont);
79acaae1 5897 }
5898 else
5899 {
5900 vmain = vin;
5901 vnext = NULL_TREE;
5902 }
1e8e9920 5903
d244d9de 5904 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
f1f41a6c 5905 label_vec.quick_push (t);
d244d9de 5906 i = 1;
03ed154b 5907
75a70cf9 5908 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
ac6e3339 5909 for (inner = region->inner, casei = 1;
5910 inner;
5911 inner = inner->next, i++, casei++)
1e8e9920 5912 {
773c5ba7 5913 basic_block s_entry_bb, s_exit_bb;
5914
9884aaf8 5915 /* Skip optional reduction region. */
75a70cf9 5916 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
9884aaf8 5917 {
5918 --i;
5919 --casei;
5920 continue;
5921 }
5922
61e47ac8 5923 s_entry_bb = inner->entry;
5924 s_exit_bb = inner->exit;
1e8e9920 5925
75a70cf9 5926 t = gimple_block_label (s_entry_bb);
ac6e3339 5927 u = build_int_cst (unsigned_type_node, casei);
b6e3dd65 5928 u = build_case_label (u, NULL, t);
f1f41a6c 5929 label_vec.quick_push (u);
61e47ac8 5930
75a70cf9 5931 si = gsi_last_bb (s_entry_bb);
5932 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
5933 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
5934 gsi_remove (&si, true);
61e47ac8 5935 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
03ed154b 5936
5937 if (s_exit_bb == NULL)
5938 continue;
5939
75a70cf9 5940 si = gsi_last_bb (s_exit_bb);
5941 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
5942 gsi_remove (&si, true);
03ed154b 5943
773c5ba7 5944 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
1e8e9920 5945 }
5946
773c5ba7 5947 /* Error handling code goes in DEFAULT_BB. */
75a70cf9 5948 t = gimple_block_label (default_bb);
b6e3dd65 5949 u = build_case_label (NULL, NULL, t);
61e47ac8 5950 make_edge (l0_bb, default_bb, 0);
f6568ea4 5951 if (current_loops)
04c2922b 5952 add_bb_to_loop (default_bb, current_loops->tree_root);
1e8e9920 5953
49a70175 5954 stmt = gimple_build_switch (vmain, u, label_vec);
75a70cf9 5955 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
5956 gsi_remove (&switch_si, true);
f1f41a6c 5957 label_vec.release ();
75a70cf9 5958
5959 si = gsi_start_bb (default_bb);
b9a16870 5960 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
75a70cf9 5961 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
773c5ba7 5962
ac6e3339 5963 if (exit_reachable)
03ed154b 5964 {
b9a16870 5965 tree bfn_decl;
5966
ac6e3339 5967 /* Code to get the next section goes in L1_BB. */
75a70cf9 5968 si = gsi_last_bb (l1_bb);
5969 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
1e8e9920 5970
b9a16870 5971 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
5972 stmt = gimple_build_call (bfn_decl, 0);
75a70cf9 5973 gimple_call_set_lhs (stmt, vnext);
5974 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5975 gsi_remove (&si, true);
773c5ba7 5976
ac6e3339 5977 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
03ed154b 5978 }
773c5ba7 5979
d244d9de 5980 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
5981 si = gsi_last_bb (l2_bb);
5982 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
5983 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
5984 else
5985 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
5986 stmt = gimple_build_call (t, 0);
5987 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5988 gsi_remove (&si, true);
5989
79acaae1 5990 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
773c5ba7 5991}
1e8e9920 5992
1e8e9920 5993
61e47ac8 5994/* Expand code for an OpenMP single directive. We've already expanded
5995 much of the code, here we simply place the GOMP_barrier call. */
5996
5997static void
5998expand_omp_single (struct omp_region *region)
5999{
6000 basic_block entry_bb, exit_bb;
75a70cf9 6001 gimple_stmt_iterator si;
61e47ac8 6002 bool need_barrier = false;
6003
6004 entry_bb = region->entry;
6005 exit_bb = region->exit;
6006
75a70cf9 6007 si = gsi_last_bb (entry_bb);
61e47ac8 6008 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
6009 be removed. We need to ensure that the thread that entered the single
6010 does not exit before the data is copied out by the other threads. */
75a70cf9 6011 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
61e47ac8 6012 OMP_CLAUSE_COPYPRIVATE))
6013 need_barrier = true;
75a70cf9 6014 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
6015 gsi_remove (&si, true);
61e47ac8 6016 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6017
75a70cf9 6018 si = gsi_last_bb (exit_bb);
6019 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
6020 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
6021 false, GSI_SAME_STMT);
6022 gsi_remove (&si, true);
61e47ac8 6023 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
6024}
6025
6026
6027/* Generic expansion for OpenMP synchronization directives: master,
6028 ordered and critical. All we need to do here is remove the entry
6029 and exit markers for REGION. */
773c5ba7 6030
6031static void
6032expand_omp_synch (struct omp_region *region)
6033{
6034 basic_block entry_bb, exit_bb;
75a70cf9 6035 gimple_stmt_iterator si;
773c5ba7 6036
61e47ac8 6037 entry_bb = region->entry;
6038 exit_bb = region->exit;
773c5ba7 6039
75a70cf9 6040 si = gsi_last_bb (entry_bb);
6041 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
6042 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
6043 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
6044 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
6045 gsi_remove (&si, true);
773c5ba7 6046 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6047
03ed154b 6048 if (exit_bb)
6049 {
75a70cf9 6050 si = gsi_last_bb (exit_bb);
6051 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
6052 gsi_remove (&si, true);
03ed154b 6053 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
6054 }
773c5ba7 6055}
1e8e9920 6056
2169f33b 6057/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6058 operation as a normal volatile load. */
6059
6060static bool
3ec11c49 6061expand_omp_atomic_load (basic_block load_bb, tree addr,
6062 tree loaded_val, int index)
2169f33b 6063{
3ec11c49 6064 enum built_in_function tmpbase;
6065 gimple_stmt_iterator gsi;
6066 basic_block store_bb;
6067 location_t loc;
6068 gimple stmt;
6069 tree decl, call, type, itype;
6070
6071 gsi = gsi_last_bb (load_bb);
6072 stmt = gsi_stmt (gsi);
6073 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
6074 loc = gimple_location (stmt);
6075
6076 /* ??? If the target does not implement atomic_load_optab[mode], and mode
6077 is smaller than word size, then expand_atomic_load assumes that the load
6078 is atomic. We could avoid the builtin entirely in this case. */
6079
6080 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
6081 decl = builtin_decl_explicit (tmpbase);
6082 if (decl == NULL_TREE)
6083 return false;
6084
6085 type = TREE_TYPE (loaded_val);
6086 itype = TREE_TYPE (TREE_TYPE (decl));
6087
6088 call = build_call_expr_loc (loc, decl, 2, addr,
6089 build_int_cst (NULL, MEMMODEL_RELAXED));
6090 if (!useless_type_conversion_p (type, itype))
6091 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
6092 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
6093
6094 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6095 gsi_remove (&gsi, true);
6096
6097 store_bb = single_succ (load_bb);
6098 gsi = gsi_last_bb (store_bb);
6099 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
6100 gsi_remove (&gsi, true);
6101
6102 if (gimple_in_ssa_p (cfun))
6103 update_ssa (TODO_update_ssa_no_phi);
6104
6105 return true;
2169f33b 6106}
6107
6108/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6109 operation as a normal volatile store. */
6110
6111static bool
3ec11c49 6112expand_omp_atomic_store (basic_block load_bb, tree addr,
6113 tree loaded_val, tree stored_val, int index)
2169f33b 6114{
3ec11c49 6115 enum built_in_function tmpbase;
6116 gimple_stmt_iterator gsi;
6117 basic_block store_bb = single_succ (load_bb);
6118 location_t loc;
6119 gimple stmt;
6120 tree decl, call, type, itype;
6121 enum machine_mode imode;
6122 bool exchange;
6123
6124 gsi = gsi_last_bb (load_bb);
6125 stmt = gsi_stmt (gsi);
6126 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
6127
6128 /* If the load value is needed, then this isn't a store but an exchange. */
6129 exchange = gimple_omp_atomic_need_value_p (stmt);
6130
6131 gsi = gsi_last_bb (store_bb);
6132 stmt = gsi_stmt (gsi);
6133 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
6134 loc = gimple_location (stmt);
6135
6136 /* ??? If the target does not implement atomic_store_optab[mode], and mode
6137 is smaller than word size, then expand_atomic_store assumes that the store
6138 is atomic. We could avoid the builtin entirely in this case. */
6139
6140 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
6141 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
6142 decl = builtin_decl_explicit (tmpbase);
6143 if (decl == NULL_TREE)
6144 return false;
6145
6146 type = TREE_TYPE (stored_val);
6147
6148 /* Dig out the type of the function's second argument. */
6149 itype = TREE_TYPE (decl);
6150 itype = TYPE_ARG_TYPES (itype);
6151 itype = TREE_CHAIN (itype);
6152 itype = TREE_VALUE (itype);
6153 imode = TYPE_MODE (itype);
6154
6155 if (exchange && !can_atomic_exchange_p (imode, true))
6156 return false;
6157
6158 if (!useless_type_conversion_p (itype, type))
6159 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
6160 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
6161 build_int_cst (NULL, MEMMODEL_RELAXED));
6162 if (exchange)
6163 {
6164 if (!useless_type_conversion_p (type, itype))
6165 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
6166 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
6167 }
6168
6169 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6170 gsi_remove (&gsi, true);
6171
6172 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
6173 gsi = gsi_last_bb (load_bb);
6174 gsi_remove (&gsi, true);
6175
6176 if (gimple_in_ssa_p (cfun))
6177 update_ssa (TODO_update_ssa_no_phi);
6178
6179 return true;
2169f33b 6180}
6181
cb7f680b 6182/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
1cd6e20d 6183 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
cb7f680b 6184 size of the data type, and thus usable to find the index of the builtin
6185 decl. Returns false if the expression is not of the proper form. */
6186
6187static bool
6188expand_omp_atomic_fetch_op (basic_block load_bb,
6189 tree addr, tree loaded_val,
6190 tree stored_val, int index)
6191{
b9a16870 6192 enum built_in_function oldbase, newbase, tmpbase;
cb7f680b 6193 tree decl, itype, call;
2169f33b 6194 tree lhs, rhs;
cb7f680b 6195 basic_block store_bb = single_succ (load_bb);
75a70cf9 6196 gimple_stmt_iterator gsi;
6197 gimple stmt;
389dd41b 6198 location_t loc;
1cd6e20d 6199 enum tree_code code;
2169f33b 6200 bool need_old, need_new;
1cd6e20d 6201 enum machine_mode imode;
cb7f680b 6202
6203 /* We expect to find the following sequences:
48e1416a 6204
cb7f680b 6205 load_bb:
75a70cf9 6206 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
cb7f680b 6207
6208 store_bb:
6209 val = tmp OP something; (or: something OP tmp)
48e1416a 6210 GIMPLE_OMP_STORE (val)
cb7f680b 6211
48e1416a 6212 ???FIXME: Allow a more flexible sequence.
cb7f680b 6213 Perhaps use data flow to pick the statements.
48e1416a 6214
cb7f680b 6215 */
6216
75a70cf9 6217 gsi = gsi_after_labels (store_bb);
6218 stmt = gsi_stmt (gsi);
389dd41b 6219 loc = gimple_location (stmt);
75a70cf9 6220 if (!is_gimple_assign (stmt))
cb7f680b 6221 return false;
75a70cf9 6222 gsi_next (&gsi);
6223 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
cb7f680b 6224 return false;
2169f33b 6225 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
6226 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
6227 gcc_checking_assert (!need_old || !need_new);
cb7f680b 6228
75a70cf9 6229 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
cb7f680b 6230 return false;
6231
cb7f680b 6232 /* Check for one of the supported fetch-op operations. */
1cd6e20d 6233 code = gimple_assign_rhs_code (stmt);
6234 switch (code)
cb7f680b 6235 {
6236 case PLUS_EXPR:
6237 case POINTER_PLUS_EXPR:
1cd6e20d 6238 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
6239 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
cb7f680b 6240 break;
6241 case MINUS_EXPR:
1cd6e20d 6242 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
6243 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
cb7f680b 6244 break;
6245 case BIT_AND_EXPR:
1cd6e20d 6246 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
6247 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
cb7f680b 6248 break;
6249 case BIT_IOR_EXPR:
1cd6e20d 6250 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
6251 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
cb7f680b 6252 break;
6253 case BIT_XOR_EXPR:
1cd6e20d 6254 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
6255 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
cb7f680b 6256 break;
6257 default:
6258 return false;
6259 }
1cd6e20d 6260
cb7f680b 6261 /* Make sure the expression is of the proper form. */
75a70cf9 6262 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
6263 rhs = gimple_assign_rhs2 (stmt);
6264 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
6265 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
6266 rhs = gimple_assign_rhs1 (stmt);
cb7f680b 6267 else
6268 return false;
6269
b9a16870 6270 tmpbase = ((enum built_in_function)
6271 ((need_new ? newbase : oldbase) + index + 1));
6272 decl = builtin_decl_explicit (tmpbase);
0f94f46b 6273 if (decl == NULL_TREE)
6274 return false;
cb7f680b 6275 itype = TREE_TYPE (TREE_TYPE (decl));
1cd6e20d 6276 imode = TYPE_MODE (itype);
cb7f680b 6277
1cd6e20d 6278 /* We could test all of the various optabs involved, but the fact of the
6279 matter is that (with the exception of i486 vs i586 and xadd) all targets
6280 that support any atomic operaton optab also implements compare-and-swap.
6281 Let optabs.c take care of expanding any compare-and-swap loop. */
29139cdc 6282 if (!can_compare_and_swap_p (imode, true))
cb7f680b 6283 return false;
6284
75a70cf9 6285 gsi = gsi_last_bb (load_bb);
6286 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
1cd6e20d 6287
6288 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
6289 It only requires that the operation happen atomically. Thus we can
6290 use the RELAXED memory model. */
6291 call = build_call_expr_loc (loc, decl, 3, addr,
6292 fold_convert_loc (loc, itype, rhs),
6293 build_int_cst (NULL, MEMMODEL_RELAXED));
6294
2169f33b 6295 if (need_old || need_new)
6296 {
6297 lhs = need_old ? loaded_val : stored_val;
6298 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
6299 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
6300 }
6301 else
6302 call = fold_convert_loc (loc, void_type_node, call);
75a70cf9 6303 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6304 gsi_remove (&gsi, true);
cb7f680b 6305
75a70cf9 6306 gsi = gsi_last_bb (store_bb);
6307 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
6308 gsi_remove (&gsi, true);
6309 gsi = gsi_last_bb (store_bb);
6310 gsi_remove (&gsi, true);
cb7f680b 6311
6312 if (gimple_in_ssa_p (cfun))
6313 update_ssa (TODO_update_ssa_no_phi);
6314
6315 return true;
6316}
6317
6318/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
6319
6320 oldval = *addr;
6321 repeat:
6322 newval = rhs; // with oldval replacing *addr in rhs
6323 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
6324 if (oldval != newval)
6325 goto repeat;
6326
6327 INDEX is log2 of the size of the data type, and thus usable to find the
6328 index of the builtin decl. */
6329
6330static bool
6331expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
6332 tree addr, tree loaded_val, tree stored_val,
6333 int index)
6334{
790368c5 6335 tree loadedi, storedi, initial, new_storedi, old_vali;
cb7f680b 6336 tree type, itype, cmpxchg, iaddr;
75a70cf9 6337 gimple_stmt_iterator si;
cb7f680b 6338 basic_block loop_header = single_succ (load_bb);
75a70cf9 6339 gimple phi, stmt;
cb7f680b 6340 edge e;
b9a16870 6341 enum built_in_function fncode;
cb7f680b 6342
1cd6e20d 6343 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
6344 order to use the RELAXED memory model effectively. */
b9a16870 6345 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
6346 + index + 1);
6347 cmpxchg = builtin_decl_explicit (fncode);
0f94f46b 6348 if (cmpxchg == NULL_TREE)
6349 return false;
cb7f680b 6350 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
6351 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
6352
29139cdc 6353 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
cb7f680b 6354 return false;
6355
75a70cf9 6356 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
6357 si = gsi_last_bb (load_bb);
6358 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
6359
790368c5 6360 /* For floating-point values, we'll need to view-convert them to integers
6361 so that we can perform the atomic compare and swap. Simplify the
6362 following code by always setting up the "i"ntegral variables. */
6363 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
6364 {
75a70cf9 6365 tree iaddr_val;
6366
072f7ab1 6367 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
6368 true), NULL);
75a70cf9 6369 iaddr_val
6370 = force_gimple_operand_gsi (&si,
6371 fold_convert (TREE_TYPE (iaddr), addr),
6372 false, NULL_TREE, true, GSI_SAME_STMT);
6373 stmt = gimple_build_assign (iaddr, iaddr_val);
6374 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
790368c5 6375 loadedi = create_tmp_var (itype, NULL);
6376 if (gimple_in_ssa_p (cfun))
b03e5397 6377 loadedi = make_ssa_name (loadedi, NULL);
790368c5 6378 }
6379 else
6380 {
6381 iaddr = addr;
6382 loadedi = loaded_val;
6383 }
75a70cf9 6384
182cf5a9 6385 initial
6386 = force_gimple_operand_gsi (&si,
6387 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
6388 iaddr,
6389 build_int_cst (TREE_TYPE (iaddr), 0)),
6390 true, NULL_TREE, true, GSI_SAME_STMT);
790368c5 6391
6392 /* Move the value to the LOADEDI temporary. */
cb7f680b 6393 if (gimple_in_ssa_p (cfun))
6394 {
75a70cf9 6395 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
790368c5 6396 phi = create_phi_node (loadedi, loop_header);
cb7f680b 6397 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
6398 initial);
6399 }
6400 else
75a70cf9 6401 gsi_insert_before (&si,
6402 gimple_build_assign (loadedi, initial),
6403 GSI_SAME_STMT);
790368c5 6404 if (loadedi != loaded_val)
6405 {
75a70cf9 6406 gimple_stmt_iterator gsi2;
6407 tree x;
790368c5 6408
6409 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
75a70cf9 6410 gsi2 = gsi_start_bb (loop_header);
790368c5 6411 if (gimple_in_ssa_p (cfun))
6412 {
75a70cf9 6413 gimple stmt;
6414 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
6415 true, GSI_SAME_STMT);
6416 stmt = gimple_build_assign (loaded_val, x);
6417 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
790368c5 6418 }
6419 else
6420 {
75a70cf9 6421 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
6422 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
6423 true, GSI_SAME_STMT);
790368c5 6424 }
6425 }
75a70cf9 6426 gsi_remove (&si, true);
cb7f680b 6427
75a70cf9 6428 si = gsi_last_bb (store_bb);
6429 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
cb7f680b 6430
790368c5 6431 if (iaddr == addr)
6432 storedi = stored_val;
cb7f680b 6433 else
790368c5 6434 storedi =
75a70cf9 6435 force_gimple_operand_gsi (&si,
790368c5 6436 build1 (VIEW_CONVERT_EXPR, itype,
6437 stored_val), true, NULL_TREE, true,
75a70cf9 6438 GSI_SAME_STMT);
cb7f680b 6439
6440 /* Build the compare&swap statement. */
6441 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
75a70cf9 6442 new_storedi = force_gimple_operand_gsi (&si,
87f9ffa4 6443 fold_convert (TREE_TYPE (loadedi),
6444 new_storedi),
cb7f680b 6445 true, NULL_TREE,
75a70cf9 6446 true, GSI_SAME_STMT);
cb7f680b 6447
6448 if (gimple_in_ssa_p (cfun))
6449 old_vali = loadedi;
6450 else
6451 {
87f9ffa4 6452 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
75a70cf9 6453 stmt = gimple_build_assign (old_vali, loadedi);
6454 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 6455
75a70cf9 6456 stmt = gimple_build_assign (loadedi, new_storedi);
6457 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 6458 }
6459
6460 /* Note that we always perform the comparison as an integer, even for
48e1416a 6461 floating point. This allows the atomic operation to properly
cb7f680b 6462 succeed even with NaNs and -0.0. */
75a70cf9 6463 stmt = gimple_build_cond_empty
6464 (build2 (NE_EXPR, boolean_type_node,
6465 new_storedi, old_vali));
6466 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 6467
6468 /* Update cfg. */
6469 e = single_succ_edge (store_bb);
6470 e->flags &= ~EDGE_FALLTHRU;
6471 e->flags |= EDGE_FALSE_VALUE;
6472
6473 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
6474
790368c5 6475 /* Copy the new value to loadedi (we already did that before the condition
cb7f680b 6476 if we are not in SSA). */
6477 if (gimple_in_ssa_p (cfun))
6478 {
75a70cf9 6479 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
790368c5 6480 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
cb7f680b 6481 }
6482
75a70cf9 6483 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
6484 gsi_remove (&si, true);
cb7f680b 6485
04c2922b 6486 struct loop *loop = alloc_loop ();
6487 loop->header = loop_header;
5f037457 6488 loop->latch = store_bb;
04c2922b 6489 add_loop (loop, loop_header->loop_father);
6490
cb7f680b 6491 if (gimple_in_ssa_p (cfun))
6492 update_ssa (TODO_update_ssa_no_phi);
6493
6494 return true;
6495}
6496
6497/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
6498
6499 GOMP_atomic_start ();
6500 *addr = rhs;
6501 GOMP_atomic_end ();
6502
6503 The result is not globally atomic, but works so long as all parallel
6504 references are within #pragma omp atomic directives. According to
6505 responses received from omp@openmp.org, appears to be within spec.
6506 Which makes sense, since that's how several other compilers handle
48e1416a 6507 this situation as well.
75a70cf9 6508 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
6509 expanding. STORED_VAL is the operand of the matching
6510 GIMPLE_OMP_ATOMIC_STORE.
cb7f680b 6511
48e1416a 6512 We replace
6513 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
cb7f680b 6514 loaded_val = *addr;
6515
6516 and replace
3ec11c49 6517 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
48e1416a 6518 *addr = stored_val;
cb7f680b 6519*/
6520
6521static bool
6522expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
6523 tree addr, tree loaded_val, tree stored_val)
6524{
75a70cf9 6525 gimple_stmt_iterator si;
6526 gimple stmt;
cb7f680b 6527 tree t;
6528
75a70cf9 6529 si = gsi_last_bb (load_bb);
6530 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
cb7f680b 6531
b9a16870 6532 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
414c3a2c 6533 t = build_call_expr (t, 0);
75a70cf9 6534 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
cb7f680b 6535
182cf5a9 6536 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
75a70cf9 6537 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6538 gsi_remove (&si, true);
cb7f680b 6539
75a70cf9 6540 si = gsi_last_bb (store_bb);
6541 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
cb7f680b 6542
182cf5a9 6543 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
6544 stored_val);
75a70cf9 6545 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 6546
b9a16870 6547 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
414c3a2c 6548 t = build_call_expr (t, 0);
75a70cf9 6549 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
6550 gsi_remove (&si, true);
cb7f680b 6551
6552 if (gimple_in_ssa_p (cfun))
6553 update_ssa (TODO_update_ssa_no_phi);
6554 return true;
6555}
6556
48e1416a 6557/* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
6558 using expand_omp_atomic_fetch_op. If it failed, we try to
cb7f680b 6559 call expand_omp_atomic_pipeline, and if it fails too, the
6560 ultimate fallback is wrapping the operation in a mutex
48e1416a 6561 (expand_omp_atomic_mutex). REGION is the atomic region built
6562 by build_omp_regions_1(). */
cb7f680b 6563
6564static void
6565expand_omp_atomic (struct omp_region *region)
6566{
6567 basic_block load_bb = region->entry, store_bb = region->exit;
75a70cf9 6568 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
6569 tree loaded_val = gimple_omp_atomic_load_lhs (load);
6570 tree addr = gimple_omp_atomic_load_rhs (load);
6571 tree stored_val = gimple_omp_atomic_store_val (store);
cb7f680b 6572 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
6573 HOST_WIDE_INT index;
6574
6575 /* Make sure the type is one of the supported sizes. */
6576 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
6577 index = exact_log2 (index);
6578 if (index >= 0 && index <= 4)
6579 {
6580 unsigned int align = TYPE_ALIGN_UNIT (type);
6581
6582 /* __sync builtins require strict data alignment. */
dcf7024c 6583 if (exact_log2 (align) >= index)
cb7f680b 6584 {
3ec11c49 6585 /* Atomic load. */
2169f33b 6586 if (loaded_val == stored_val
6587 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
6588 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
6589 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
3ec11c49 6590 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
2169f33b 6591 return;
6592
3ec11c49 6593 /* Atomic store. */
2169f33b 6594 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
6595 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
6596 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
6597 && store_bb == single_succ (load_bb)
6598 && first_stmt (store_bb) == store
3ec11c49 6599 && expand_omp_atomic_store (load_bb, addr, loaded_val,
6600 stored_val, index))
2169f33b 6601 return;
6602
cb7f680b 6603 /* When possible, use specialized atomic update functions. */
6604 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
3ec11c49 6605 && store_bb == single_succ (load_bb)
6606 && expand_omp_atomic_fetch_op (load_bb, addr,
6607 loaded_val, stored_val, index))
6608 return;
cb7f680b 6609
6610 /* If we don't have specialized __sync builtins, try and implement
6611 as a compare and swap loop. */
6612 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
6613 loaded_val, stored_val, index))
6614 return;
6615 }
6616 }
6617
6618 /* The ultimate fallback is wrapping the operation in a mutex. */
6619 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
6620}
6621
1e8e9920 6622
773c5ba7 6623/* Expand the parallel region tree rooted at REGION. Expansion
6624 proceeds in depth-first order. Innermost regions are expanded
6625 first. This way, parallel regions that require a new function to
75a70cf9 6626 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
773c5ba7 6627 internal dependencies in their body. */
6628
6629static void
6630expand_omp (struct omp_region *region)
6631{
6632 while (region)
6633 {
1d22f541 6634 location_t saved_location;
6635
d1d5b012 6636 /* First, determine whether this is a combined parallel+workshare
6637 region. */
75a70cf9 6638 if (region->type == GIMPLE_OMP_PARALLEL)
d1d5b012 6639 determine_parallel_type (region);
6640
773c5ba7 6641 if (region->inner)
6642 expand_omp (region->inner);
6643
1d22f541 6644 saved_location = input_location;
75a70cf9 6645 if (gimple_has_location (last_stmt (region->entry)))
6646 input_location = gimple_location (last_stmt (region->entry));
1d22f541 6647
61e47ac8 6648 switch (region->type)
773c5ba7 6649 {
75a70cf9 6650 case GIMPLE_OMP_PARALLEL:
6651 case GIMPLE_OMP_TASK:
fd6481cf 6652 expand_omp_taskreg (region);
6653 break;
6654
75a70cf9 6655 case GIMPLE_OMP_FOR:
61e47ac8 6656 expand_omp_for (region);
6657 break;
773c5ba7 6658
75a70cf9 6659 case GIMPLE_OMP_SECTIONS:
61e47ac8 6660 expand_omp_sections (region);
6661 break;
773c5ba7 6662
75a70cf9 6663 case GIMPLE_OMP_SECTION:
61e47ac8 6664 /* Individual omp sections are handled together with their
75a70cf9 6665 parent GIMPLE_OMP_SECTIONS region. */
61e47ac8 6666 break;
773c5ba7 6667
75a70cf9 6668 case GIMPLE_OMP_SINGLE:
61e47ac8 6669 expand_omp_single (region);
6670 break;
773c5ba7 6671
75a70cf9 6672 case GIMPLE_OMP_MASTER:
6673 case GIMPLE_OMP_ORDERED:
6674 case GIMPLE_OMP_CRITICAL:
61e47ac8 6675 expand_omp_synch (region);
6676 break;
773c5ba7 6677
75a70cf9 6678 case GIMPLE_OMP_ATOMIC_LOAD:
cb7f680b 6679 expand_omp_atomic (region);
6680 break;
6681
61e47ac8 6682 default:
6683 gcc_unreachable ();
6684 }
cc5982dc 6685
1d22f541 6686 input_location = saved_location;
773c5ba7 6687 region = region->next;
6688 }
6689}
6690
6691
6692/* Helper for build_omp_regions. Scan the dominator tree starting at
28c92cbb 6693 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
6694 true, the function ends once a single tree is built (otherwise, whole
6695 forest of OMP constructs may be built). */
773c5ba7 6696
6697static void
28c92cbb 6698build_omp_regions_1 (basic_block bb, struct omp_region *parent,
6699 bool single_tree)
773c5ba7 6700{
75a70cf9 6701 gimple_stmt_iterator gsi;
6702 gimple stmt;
773c5ba7 6703 basic_block son;
6704
75a70cf9 6705 gsi = gsi_last_bb (bb);
6706 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
773c5ba7 6707 {
6708 struct omp_region *region;
75a70cf9 6709 enum gimple_code code;
773c5ba7 6710
75a70cf9 6711 stmt = gsi_stmt (gsi);
6712 code = gimple_code (stmt);
6713 if (code == GIMPLE_OMP_RETURN)
773c5ba7 6714 {
6715 /* STMT is the return point out of region PARENT. Mark it
6716 as the exit point and make PARENT the immediately
6717 enclosing region. */
6718 gcc_assert (parent);
6719 region = parent;
61e47ac8 6720 region->exit = bb;
773c5ba7 6721 parent = parent->outer;
773c5ba7 6722 }
75a70cf9 6723 else if (code == GIMPLE_OMP_ATOMIC_STORE)
cb7f680b 6724 {
75a70cf9 6725 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
6726 GIMPLE_OMP_RETURN, but matches with
6727 GIMPLE_OMP_ATOMIC_LOAD. */
cb7f680b 6728 gcc_assert (parent);
75a70cf9 6729 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
cb7f680b 6730 region = parent;
6731 region->exit = bb;
6732 parent = parent->outer;
6733 }
6734
75a70cf9 6735 else if (code == GIMPLE_OMP_CONTINUE)
61e47ac8 6736 {
6737 gcc_assert (parent);
6738 parent->cont = bb;
6739 }
75a70cf9 6740 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
ac6e3339 6741 {
75a70cf9 6742 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
6743 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
6744 ;
ac6e3339 6745 }
773c5ba7 6746 else
6747 {
6748 /* Otherwise, this directive becomes the parent for a new
6749 region. */
61e47ac8 6750 region = new_omp_region (bb, code, parent);
773c5ba7 6751 parent = region;
6752 }
773c5ba7 6753 }
6754
28c92cbb 6755 if (single_tree && !parent)
6756 return;
6757
773c5ba7 6758 for (son = first_dom_son (CDI_DOMINATORS, bb);
6759 son;
6760 son = next_dom_son (CDI_DOMINATORS, son))
28c92cbb 6761 build_omp_regions_1 (son, parent, single_tree);
6762}
6763
6764/* Builds the tree of OMP regions rooted at ROOT, storing it to
6765 root_omp_region. */
6766
6767static void
6768build_omp_regions_root (basic_block root)
6769{
6770 gcc_assert (root_omp_region == NULL);
6771 build_omp_regions_1 (root, NULL, true);
6772 gcc_assert (root_omp_region != NULL);
773c5ba7 6773}
6774
28c92cbb 6775/* Expands omp construct (and its subconstructs) starting in HEAD. */
6776
6777void
6778omp_expand_local (basic_block head)
6779{
6780 build_omp_regions_root (head);
6781 if (dump_file && (dump_flags & TDF_DETAILS))
6782 {
6783 fprintf (dump_file, "\nOMP region tree\n\n");
6784 dump_omp_region (dump_file, root_omp_region, 0);
6785 fprintf (dump_file, "\n");
6786 }
6787
6788 remove_exit_barriers (root_omp_region);
6789 expand_omp (root_omp_region);
6790
6791 free_omp_regions ();
6792}
773c5ba7 6793
6794/* Scan the CFG and build a tree of OMP regions. Return the root of
6795 the OMP region tree. */
6796
6797static void
6798build_omp_regions (void)
6799{
61e47ac8 6800 gcc_assert (root_omp_region == NULL);
773c5ba7 6801 calculate_dominance_info (CDI_DOMINATORS);
28c92cbb 6802 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
773c5ba7 6803}
6804
773c5ba7 6805/* Main entry point for expanding OMP-GIMPLE into runtime calls. */
6806
2a1990e9 6807static unsigned int
773c5ba7 6808execute_expand_omp (void)
6809{
6810 build_omp_regions ();
6811
61e47ac8 6812 if (!root_omp_region)
6813 return 0;
773c5ba7 6814
61e47ac8 6815 if (dump_file)
6816 {
6817 fprintf (dump_file, "\nOMP region tree\n\n");
6818 dump_omp_region (dump_file, root_omp_region, 0);
6819 fprintf (dump_file, "\n");
773c5ba7 6820 }
61e47ac8 6821
6822 remove_exit_barriers (root_omp_region);
6823
6824 expand_omp (root_omp_region);
6825
61e47ac8 6826 cleanup_tree_cfg ();
6827
6828 free_omp_regions ();
6829
2a1990e9 6830 return 0;
773c5ba7 6831}
6832
79acaae1 6833/* OMP expansion -- the default pass, run before creation of SSA form. */
6834
773c5ba7 6835static bool
6836gate_expand_omp (void)
6837{
852f689e 6838 return (flag_openmp != 0 && !seen_error ());
773c5ba7 6839}
6840
cbe8bda8 6841namespace {
6842
6843const pass_data pass_data_expand_omp =
6844{
6845 GIMPLE_PASS, /* type */
6846 "ompexp", /* name */
6847 OPTGROUP_NONE, /* optinfo_flags */
6848 true, /* has_gate */
6849 true, /* has_execute */
6850 TV_NONE, /* tv_id */
6851 PROP_gimple_any, /* properties_required */
6852 0, /* properties_provided */
6853 0, /* properties_destroyed */
6854 0, /* todo_flags_start */
6855 0, /* todo_flags_finish */
773c5ba7 6856};
cbe8bda8 6857
6858class pass_expand_omp : public gimple_opt_pass
6859{
6860public:
6861 pass_expand_omp(gcc::context *ctxt)
6862 : gimple_opt_pass(pass_data_expand_omp, ctxt)
6863 {}
6864
6865 /* opt_pass methods: */
6866 bool gate () { return gate_expand_omp (); }
6867 unsigned int execute () { return execute_expand_omp (); }
6868
6869}; // class pass_expand_omp
6870
6871} // anon namespace
6872
6873gimple_opt_pass *
6874make_pass_expand_omp (gcc::context *ctxt)
6875{
6876 return new pass_expand_omp (ctxt);
6877}
773c5ba7 6878\f
6879/* Routines to lower OpenMP directives into OMP-GIMPLE. */
6880
75a70cf9 6881/* Lower the OpenMP sections directive in the current statement in GSI_P.
6882 CTX is the enclosing OMP context for the current statement. */
773c5ba7 6883
6884static void
75a70cf9 6885lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 6886{
75a70cf9 6887 tree block, control;
6888 gimple_stmt_iterator tgsi;
75a70cf9 6889 gimple stmt, new_stmt, bind, t;
e3a19533 6890 gimple_seq ilist, dlist, olist, new_body;
dac18d1a 6891 struct gimplify_ctx gctx;
773c5ba7 6892
75a70cf9 6893 stmt = gsi_stmt (*gsi_p);
773c5ba7 6894
dac18d1a 6895 push_gimplify_context (&gctx);
773c5ba7 6896
6897 dlist = NULL;
6898 ilist = NULL;
75a70cf9 6899 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
6900 &ilist, &dlist, ctx);
773c5ba7 6901
e3a19533 6902 new_body = gimple_omp_body (stmt);
6903 gimple_omp_set_body (stmt, NULL);
6904 tgsi = gsi_start (new_body);
6905 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
773c5ba7 6906 {
6907 omp_context *sctx;
75a70cf9 6908 gimple sec_start;
773c5ba7 6909
75a70cf9 6910 sec_start = gsi_stmt (tgsi);
773c5ba7 6911 sctx = maybe_lookup_ctx (sec_start);
6912 gcc_assert (sctx);
6913
e3a19533 6914 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
6915 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
6916 GSI_CONTINUE_LINKING);
75a70cf9 6917 gimple_omp_set_body (sec_start, NULL);
773c5ba7 6918
e3a19533 6919 if (gsi_one_before_end_p (tgsi))
773c5ba7 6920 {
75a70cf9 6921 gimple_seq l = NULL;
6922 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
773c5ba7 6923 &l, ctx);
e3a19533 6924 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
75a70cf9 6925 gimple_omp_section_set_last (sec_start);
773c5ba7 6926 }
48e1416a 6927
e3a19533 6928 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
6929 GSI_CONTINUE_LINKING);
773c5ba7 6930 }
1e8e9920 6931
6932 block = make_node (BLOCK);
e3a19533 6933 bind = gimple_build_bind (NULL, new_body, block);
1e8e9920 6934
75a70cf9 6935 olist = NULL;
6936 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
773c5ba7 6937
1d22f541 6938 block = make_node (BLOCK);
75a70cf9 6939 new_stmt = gimple_build_bind (NULL, NULL, block);
e3a19533 6940 gsi_replace (gsi_p, new_stmt, true);
773c5ba7 6941
1d22f541 6942 pop_gimplify_context (new_stmt);
75a70cf9 6943 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6944 BLOCK_VARS (block) = gimple_bind_vars (bind);
1d22f541 6945 if (BLOCK_VARS (block))
6946 TREE_USED (block) = 1;
6947
75a70cf9 6948 new_body = NULL;
6949 gimple_seq_add_seq (&new_body, ilist);
6950 gimple_seq_add_stmt (&new_body, stmt);
6951 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
6952 gimple_seq_add_stmt (&new_body, bind);
61e47ac8 6953
ac6e3339 6954 control = create_tmp_var (unsigned_type_node, ".section");
75a70cf9 6955 t = gimple_build_omp_continue (control, control);
6956 gimple_omp_sections_set_control (stmt, control);
6957 gimple_seq_add_stmt (&new_body, t);
61e47ac8 6958
75a70cf9 6959 gimple_seq_add_seq (&new_body, olist);
6960 gimple_seq_add_seq (&new_body, dlist);
773c5ba7 6961
75a70cf9 6962 new_body = maybe_catch_exception (new_body);
aade31a0 6963
75a70cf9 6964 t = gimple_build_omp_return
6965 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
6966 OMP_CLAUSE_NOWAIT));
6967 gimple_seq_add_stmt (&new_body, t);
61e47ac8 6968
75a70cf9 6969 gimple_bind_set_body (new_stmt, new_body);
1e8e9920 6970}
6971
6972
773c5ba7 6973/* A subroutine of lower_omp_single. Expand the simple form of
75a70cf9 6974 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
1e8e9920 6975
6976 if (GOMP_single_start ())
6977 BODY;
6978 [ GOMP_barrier (); ] -> unless 'nowait' is present.
773c5ba7 6979
6980 FIXME. It may be better to delay expanding the logic of this until
6981 pass_expand_omp. The expanded logic may make the job more difficult
6982 to a synchronization analysis pass. */
1e8e9920 6983
6984static void
75a70cf9 6985lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
1e8e9920 6986{
e60a6f7b 6987 location_t loc = gimple_location (single_stmt);
6988 tree tlabel = create_artificial_label (loc);
6989 tree flabel = create_artificial_label (loc);
75a70cf9 6990 gimple call, cond;
6991 tree lhs, decl;
6992
b9a16870 6993 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
75a70cf9 6994 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
6995 call = gimple_build_call (decl, 0);
6996 gimple_call_set_lhs (call, lhs);
6997 gimple_seq_add_stmt (pre_p, call);
6998
6999 cond = gimple_build_cond (EQ_EXPR, lhs,
389dd41b 7000 fold_convert_loc (loc, TREE_TYPE (lhs),
7001 boolean_true_node),
75a70cf9 7002 tlabel, flabel);
7003 gimple_seq_add_stmt (pre_p, cond);
7004 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
7005 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
7006 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
1e8e9920 7007}
7008
773c5ba7 7009
7010/* A subroutine of lower_omp_single. Expand the simple form of
75a70cf9 7011 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
1e8e9920 7012
7013 #pragma omp single copyprivate (a, b, c)
7014
7015 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
7016
7017 {
7018 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
7019 {
7020 BODY;
7021 copyout.a = a;
7022 copyout.b = b;
7023 copyout.c = c;
7024 GOMP_single_copy_end (&copyout);
7025 }
7026 else
7027 {
7028 a = copyout_p->a;
7029 b = copyout_p->b;
7030 c = copyout_p->c;
7031 }
7032 GOMP_barrier ();
7033 }
773c5ba7 7034
7035 FIXME. It may be better to delay expanding the logic of this until
7036 pass_expand_omp. The expanded logic may make the job more difficult
7037 to a synchronization analysis pass. */
1e8e9920 7038
7039static void
75a70cf9 7040lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
1e8e9920 7041{
b9a16870 7042 tree ptr_type, t, l0, l1, l2, bfn_decl;
75a70cf9 7043 gimple_seq copyin_seq;
e60a6f7b 7044 location_t loc = gimple_location (single_stmt);
1e8e9920 7045
7046 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
7047
7048 ptr_type = build_pointer_type (ctx->record_type);
7049 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
7050
e60a6f7b 7051 l0 = create_artificial_label (loc);
7052 l1 = create_artificial_label (loc);
7053 l2 = create_artificial_label (loc);
1e8e9920 7054
b9a16870 7055 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
7056 t = build_call_expr_loc (loc, bfn_decl, 0);
389dd41b 7057 t = fold_convert_loc (loc, ptr_type, t);
75a70cf9 7058 gimplify_assign (ctx->receiver_decl, t, pre_p);
1e8e9920 7059
7060 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
7061 build_int_cst (ptr_type, 0));
7062 t = build3 (COND_EXPR, void_type_node, t,
7063 build_and_jump (&l0), build_and_jump (&l1));
7064 gimplify_and_add (t, pre_p);
7065
75a70cf9 7066 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
1e8e9920 7067
75a70cf9 7068 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
1e8e9920 7069
7070 copyin_seq = NULL;
75a70cf9 7071 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
1e8e9920 7072 &copyin_seq, ctx);
7073
389dd41b 7074 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
b9a16870 7075 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
7076 t = build_call_expr_loc (loc, bfn_decl, 1, t);
1e8e9920 7077 gimplify_and_add (t, pre_p);
7078
7079 t = build_and_jump (&l2);
7080 gimplify_and_add (t, pre_p);
7081
75a70cf9 7082 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
1e8e9920 7083
75a70cf9 7084 gimple_seq_add_seq (pre_p, copyin_seq);
1e8e9920 7085
75a70cf9 7086 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
1e8e9920 7087}
7088
773c5ba7 7089
1e8e9920 7090/* Expand code for an OpenMP single directive. */
7091
7092static void
75a70cf9 7093lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 7094{
75a70cf9 7095 tree block;
7096 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
7097 gimple_seq bind_body, dlist;
dac18d1a 7098 struct gimplify_ctx gctx;
1e8e9920 7099
dac18d1a 7100 push_gimplify_context (&gctx);
1e8e9920 7101
e3a19533 7102 block = make_node (BLOCK);
7103 bind = gimple_build_bind (NULL, NULL, block);
7104 gsi_replace (gsi_p, bind, true);
75a70cf9 7105 bind_body = NULL;
e3a19533 7106 dlist = NULL;
75a70cf9 7107 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
7108 &bind_body, &dlist, ctx);
e3a19533 7109 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
1e8e9920 7110
75a70cf9 7111 gimple_seq_add_stmt (&bind_body, single_stmt);
1e8e9920 7112
7113 if (ctx->record_type)
75a70cf9 7114 lower_omp_single_copy (single_stmt, &bind_body, ctx);
1e8e9920 7115 else
75a70cf9 7116 lower_omp_single_simple (single_stmt, &bind_body);
7117
7118 gimple_omp_set_body (single_stmt, NULL);
1e8e9920 7119
75a70cf9 7120 gimple_seq_add_seq (&bind_body, dlist);
61e47ac8 7121
75a70cf9 7122 bind_body = maybe_catch_exception (bind_body);
61e47ac8 7123
48e1416a 7124 t = gimple_build_omp_return
75a70cf9 7125 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
7126 OMP_CLAUSE_NOWAIT));
7127 gimple_seq_add_stmt (&bind_body, t);
e3a19533 7128 gimple_bind_set_body (bind, bind_body);
61e47ac8 7129
1e8e9920 7130 pop_gimplify_context (bind);
773c5ba7 7131
75a70cf9 7132 gimple_bind_append_vars (bind, ctx->block_vars);
7133 BLOCK_VARS (block) = ctx->block_vars;
1d22f541 7134 if (BLOCK_VARS (block))
7135 TREE_USED (block) = 1;
1e8e9920 7136}
7137
773c5ba7 7138
1e8e9920 7139/* Expand code for an OpenMP master directive. */
7140
7141static void
75a70cf9 7142lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 7143{
b9a16870 7144 tree block, lab = NULL, x, bfn_decl;
75a70cf9 7145 gimple stmt = gsi_stmt (*gsi_p), bind;
389dd41b 7146 location_t loc = gimple_location (stmt);
75a70cf9 7147 gimple_seq tseq;
dac18d1a 7148 struct gimplify_ctx gctx;
1e8e9920 7149
dac18d1a 7150 push_gimplify_context (&gctx);
1e8e9920 7151
7152 block = make_node (BLOCK);
e3a19533 7153 bind = gimple_build_bind (NULL, NULL, block);
7154 gsi_replace (gsi_p, bind, true);
7155 gimple_bind_add_stmt (bind, stmt);
61e47ac8 7156
b9a16870 7157 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
7158 x = build_call_expr_loc (loc, bfn_decl, 0);
1e8e9920 7159 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
7160 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
75a70cf9 7161 tseq = NULL;
7162 gimplify_and_add (x, &tseq);
7163 gimple_bind_add_seq (bind, tseq);
1e8e9920 7164
e3a19533 7165 lower_omp (gimple_omp_body_ptr (stmt), ctx);
75a70cf9 7166 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
7167 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
7168 gimple_omp_set_body (stmt, NULL);
1e8e9920 7169
75a70cf9 7170 gimple_bind_add_stmt (bind, gimple_build_label (lab));
61e47ac8 7171
75a70cf9 7172 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
61e47ac8 7173
1e8e9920 7174 pop_gimplify_context (bind);
773c5ba7 7175
75a70cf9 7176 gimple_bind_append_vars (bind, ctx->block_vars);
7177 BLOCK_VARS (block) = ctx->block_vars;
1e8e9920 7178}
7179
773c5ba7 7180
1e8e9920 7181/* Expand code for an OpenMP ordered directive. */
7182
7183static void
75a70cf9 7184lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 7185{
75a70cf9 7186 tree block;
7187 gimple stmt = gsi_stmt (*gsi_p), bind, x;
dac18d1a 7188 struct gimplify_ctx gctx;
1e8e9920 7189
dac18d1a 7190 push_gimplify_context (&gctx);
1e8e9920 7191
7192 block = make_node (BLOCK);
e3a19533 7193 bind = gimple_build_bind (NULL, NULL, block);
7194 gsi_replace (gsi_p, bind, true);
7195 gimple_bind_add_stmt (bind, stmt);
61e47ac8 7196
b9a16870 7197 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
7198 0);
75a70cf9 7199 gimple_bind_add_stmt (bind, x);
1e8e9920 7200
e3a19533 7201 lower_omp (gimple_omp_body_ptr (stmt), ctx);
75a70cf9 7202 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
7203 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
7204 gimple_omp_set_body (stmt, NULL);
1e8e9920 7205
b9a16870 7206 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
75a70cf9 7207 gimple_bind_add_stmt (bind, x);
61e47ac8 7208
75a70cf9 7209 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
61e47ac8 7210
1e8e9920 7211 pop_gimplify_context (bind);
773c5ba7 7212
75a70cf9 7213 gimple_bind_append_vars (bind, ctx->block_vars);
7214 BLOCK_VARS (block) = gimple_bind_vars (bind);
1e8e9920 7215}
7216
1e8e9920 7217
75a70cf9 7218/* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
1e8e9920 7219 substitution of a couple of function calls. But in the NAMED case,
7220 requires that languages coordinate a symbol name. It is therefore
7221 best put here in common code. */
7222
7223static GTY((param1_is (tree), param2_is (tree)))
7224 splay_tree critical_name_mutexes;
7225
7226static void
75a70cf9 7227lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 7228{
75a70cf9 7229 tree block;
7230 tree name, lock, unlock;
7231 gimple stmt = gsi_stmt (*gsi_p), bind;
389dd41b 7232 location_t loc = gimple_location (stmt);
75a70cf9 7233 gimple_seq tbody;
dac18d1a 7234 struct gimplify_ctx gctx;
1e8e9920 7235
75a70cf9 7236 name = gimple_omp_critical_name (stmt);
1e8e9920 7237 if (name)
7238 {
c2f47e15 7239 tree decl;
1e8e9920 7240 splay_tree_node n;
7241
7242 if (!critical_name_mutexes)
7243 critical_name_mutexes
ba72912a 7244 = splay_tree_new_ggc (splay_tree_compare_pointers,
7245 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
7246 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
1e8e9920 7247
7248 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
7249 if (n == NULL)
7250 {
7251 char *new_str;
7252
7253 decl = create_tmp_var_raw (ptr_type_node, NULL);
7254
7255 new_str = ACONCAT ((".gomp_critical_user_",
7256 IDENTIFIER_POINTER (name), NULL));
7257 DECL_NAME (decl) = get_identifier (new_str);
7258 TREE_PUBLIC (decl) = 1;
7259 TREE_STATIC (decl) = 1;
7260 DECL_COMMON (decl) = 1;
7261 DECL_ARTIFICIAL (decl) = 1;
7262 DECL_IGNORED_P (decl) = 1;
1d416bd7 7263 varpool_finalize_decl (decl);
1e8e9920 7264
7265 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
7266 (splay_tree_value) decl);
7267 }
7268 else
7269 decl = (tree) n->value;
7270
b9a16870 7271 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
389dd41b 7272 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
1e8e9920 7273
b9a16870 7274 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
389dd41b 7275 unlock = build_call_expr_loc (loc, unlock, 1,
7276 build_fold_addr_expr_loc (loc, decl));
1e8e9920 7277 }
7278 else
7279 {
b9a16870 7280 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
389dd41b 7281 lock = build_call_expr_loc (loc, lock, 0);
1e8e9920 7282
b9a16870 7283 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
389dd41b 7284 unlock = build_call_expr_loc (loc, unlock, 0);
1e8e9920 7285 }
7286
dac18d1a 7287 push_gimplify_context (&gctx);
1e8e9920 7288
7289 block = make_node (BLOCK);
e3a19533 7290 bind = gimple_build_bind (NULL, NULL, block);
7291 gsi_replace (gsi_p, bind, true);
7292 gimple_bind_add_stmt (bind, stmt);
61e47ac8 7293
75a70cf9 7294 tbody = gimple_bind_body (bind);
7295 gimplify_and_add (lock, &tbody);
7296 gimple_bind_set_body (bind, tbody);
1e8e9920 7297
e3a19533 7298 lower_omp (gimple_omp_body_ptr (stmt), ctx);
75a70cf9 7299 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
7300 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
7301 gimple_omp_set_body (stmt, NULL);
1e8e9920 7302
75a70cf9 7303 tbody = gimple_bind_body (bind);
7304 gimplify_and_add (unlock, &tbody);
7305 gimple_bind_set_body (bind, tbody);
61e47ac8 7306
75a70cf9 7307 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
1e8e9920 7308
7309 pop_gimplify_context (bind);
75a70cf9 7310 gimple_bind_append_vars (bind, ctx->block_vars);
7311 BLOCK_VARS (block) = gimple_bind_vars (bind);
773c5ba7 7312}
7313
7314
7315/* A subroutine of lower_omp_for. Generate code to emit the predicate
7316 for a lastprivate clause. Given a loop control predicate of (V
7317 cond N2), we gate the clause on (!(V cond N2)). The lowered form
1e4afe3c 7318 is appended to *DLIST, iterator initialization is appended to
7319 *BODY_P. */
773c5ba7 7320
7321static void
75a70cf9 7322lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
7323 gimple_seq *dlist, struct omp_context *ctx)
773c5ba7 7324{
75a70cf9 7325 tree clauses, cond, vinit;
773c5ba7 7326 enum tree_code cond_code;
75a70cf9 7327 gimple_seq stmts;
48e1416a 7328
fd6481cf 7329 cond_code = fd->loop.cond_code;
773c5ba7 7330 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
7331
7332 /* When possible, use a strict equality expression. This can let VRP
7333 type optimizations deduce the value and remove a copy. */
fd6481cf 7334 if (host_integerp (fd->loop.step, 0))
773c5ba7 7335 {
fd6481cf 7336 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
773c5ba7 7337 if (step == 1 || step == -1)
7338 cond_code = EQ_EXPR;
7339 }
7340
fd6481cf 7341 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
773c5ba7 7342
75a70cf9 7343 clauses = gimple_omp_for_clauses (fd->for_stmt);
1e4afe3c 7344 stmts = NULL;
7345 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
75a70cf9 7346 if (!gimple_seq_empty_p (stmts))
1e4afe3c 7347 {
75a70cf9 7348 gimple_seq_add_seq (&stmts, *dlist);
fd6481cf 7349 *dlist = stmts;
1e4afe3c 7350
7351 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
fd6481cf 7352 vinit = fd->loop.n1;
1e4afe3c 7353 if (cond_code == EQ_EXPR
fd6481cf 7354 && host_integerp (fd->loop.n2, 0)
7355 && ! integer_zerop (fd->loop.n2))
7356 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
3d483a94 7357 else
7358 vinit = unshare_expr (vinit);
1e4afe3c 7359
7360 /* Initialize the iterator variable, so that threads that don't execute
7361 any iterations don't execute the lastprivate clauses by accident. */
75a70cf9 7362 gimplify_assign (fd->loop.v, vinit, body_p);
1e4afe3c 7363 }
773c5ba7 7364}
7365
7366
7367/* Lower code for an OpenMP loop directive. */
7368
7369static void
75a70cf9 7370lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 7371{
75a70cf9 7372 tree *rhs_p, block;
773c5ba7 7373 struct omp_for_data fd;
75a70cf9 7374 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
f018d957 7375 gimple_seq omp_for_body, body, dlist;
75a70cf9 7376 size_t i;
dac18d1a 7377 struct gimplify_ctx gctx;
773c5ba7 7378
dac18d1a 7379 push_gimplify_context (&gctx);
773c5ba7 7380
e3a19533 7381 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
773c5ba7 7382
1d22f541 7383 block = make_node (BLOCK);
75a70cf9 7384 new_stmt = gimple_build_bind (NULL, NULL, block);
e3a19533 7385 /* Replace at gsi right away, so that 'stmt' is no member
7386 of a sequence anymore as we're going to add to to a different
7387 one below. */
7388 gsi_replace (gsi_p, new_stmt, true);
1d22f541 7389
773c5ba7 7390 /* Move declaration of temporaries in the loop body before we make
7391 it go away. */
75a70cf9 7392 omp_for_body = gimple_omp_body (stmt);
7393 if (!gimple_seq_empty_p (omp_for_body)
7394 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
7395 {
7396 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
7397 gimple_bind_append_vars (new_stmt, vars);
7398 }
773c5ba7 7399
75a70cf9 7400 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
773c5ba7 7401 dlist = NULL;
75a70cf9 7402 body = NULL;
7403 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
7404 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
773c5ba7 7405
3d483a94 7406 lower_omp (gimple_omp_body_ptr (stmt), ctx);
7407
773c5ba7 7408 /* Lower the header expressions. At this point, we can assume that
7409 the header is of the form:
7410
7411 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
7412
7413 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
7414 using the .omp_data_s mapping, if needed. */
75a70cf9 7415 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
fd6481cf 7416 {
75a70cf9 7417 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
fd6481cf 7418 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 7419 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 7420
75a70cf9 7421 rhs_p = gimple_omp_for_final_ptr (stmt, i);
fd6481cf 7422 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 7423 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 7424
75a70cf9 7425 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
fd6481cf 7426 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 7427 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 7428 }
773c5ba7 7429
7430 /* Once lowered, extract the bounds and clauses. */
fd6481cf 7431 extract_omp_for_data (stmt, &fd, NULL);
773c5ba7 7432
75a70cf9 7433 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
773c5ba7 7434
75a70cf9 7435 gimple_seq_add_stmt (&body, stmt);
7436 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
61e47ac8 7437
75a70cf9 7438 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
7439 fd.loop.v));
61e47ac8 7440
773c5ba7 7441 /* After the loop, add exit clauses. */
75a70cf9 7442 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
7443 gimple_seq_add_seq (&body, dlist);
773c5ba7 7444
75a70cf9 7445 body = maybe_catch_exception (body);
aade31a0 7446
61e47ac8 7447 /* Region exit marker goes at the end of the loop body. */
75a70cf9 7448 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
773c5ba7 7449
1d22f541 7450 pop_gimplify_context (new_stmt);
75a70cf9 7451
7452 gimple_bind_append_vars (new_stmt, ctx->block_vars);
7453 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
1d22f541 7454 if (BLOCK_VARS (block))
7455 TREE_USED (block) = 1;
773c5ba7 7456
75a70cf9 7457 gimple_bind_set_body (new_stmt, body);
7458 gimple_omp_set_body (stmt, NULL);
7459 gimple_omp_for_set_pre_body (stmt, NULL);
1e8e9920 7460}
7461
48e1416a 7462/* Callback for walk_stmts. Check if the current statement only contains
75a70cf9 7463 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
de7ef844 7464
7465static tree
75a70cf9 7466check_combined_parallel (gimple_stmt_iterator *gsi_p,
7467 bool *handled_ops_p,
7468 struct walk_stmt_info *wi)
de7ef844 7469{
4077bf7a 7470 int *info = (int *) wi->info;
75a70cf9 7471 gimple stmt = gsi_stmt (*gsi_p);
de7ef844 7472
75a70cf9 7473 *handled_ops_p = true;
7474 switch (gimple_code (stmt))
de7ef844 7475 {
75a70cf9 7476 WALK_SUBSTMTS;
7477
7478 case GIMPLE_OMP_FOR:
7479 case GIMPLE_OMP_SECTIONS:
de7ef844 7480 *info = *info == 0 ? 1 : -1;
7481 break;
7482 default:
7483 *info = -1;
7484 break;
7485 }
7486 return NULL;
7487}
773c5ba7 7488
fd6481cf 7489struct omp_taskcopy_context
7490{
7491 /* This field must be at the beginning, as we do "inheritance": Some
7492 callback functions for tree-inline.c (e.g., omp_copy_decl)
7493 receive a copy_body_data pointer that is up-casted to an
7494 omp_context pointer. */
7495 copy_body_data cb;
7496 omp_context *ctx;
7497};
7498
7499static tree
7500task_copyfn_copy_decl (tree var, copy_body_data *cb)
7501{
7502 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
7503
7504 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
7505 return create_tmp_var (TREE_TYPE (var), NULL);
7506
7507 return var;
7508}
7509
7510static tree
7511task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
7512{
7513 tree name, new_fields = NULL, type, f;
7514
7515 type = lang_hooks.types.make_type (RECORD_TYPE);
7516 name = DECL_NAME (TYPE_NAME (orig_type));
e60a6f7b 7517 name = build_decl (gimple_location (tcctx->ctx->stmt),
7518 TYPE_DECL, name, type);
fd6481cf 7519 TYPE_NAME (type) = name;
7520
7521 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
7522 {
7523 tree new_f = copy_node (f);
7524 DECL_CONTEXT (new_f) = type;
7525 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
7526 TREE_CHAIN (new_f) = new_fields;
75a70cf9 7527 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
7528 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
7529 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
7530 &tcctx->cb, NULL);
fd6481cf 7531 new_fields = new_f;
7532 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
7533 }
7534 TYPE_FIELDS (type) = nreverse (new_fields);
7535 layout_type (type);
7536 return type;
7537}
7538
7539/* Create task copyfn. */
7540
7541static void
75a70cf9 7542create_task_copyfn (gimple task_stmt, omp_context *ctx)
fd6481cf 7543{
7544 struct function *child_cfun;
7545 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
7546 tree record_type, srecord_type, bind, list;
7547 bool record_needs_remap = false, srecord_needs_remap = false;
7548 splay_tree_node n;
7549 struct omp_taskcopy_context tcctx;
dac18d1a 7550 struct gimplify_ctx gctx;
389dd41b 7551 location_t loc = gimple_location (task_stmt);
fd6481cf 7552
75a70cf9 7553 child_fn = gimple_omp_task_copy_fn (task_stmt);
fd6481cf 7554 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
7555 gcc_assert (child_cfun->cfg == NULL);
fd6481cf 7556 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
7557
7558 /* Reset DECL_CONTEXT on function arguments. */
1767a056 7559 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
fd6481cf 7560 DECL_CONTEXT (t) = child_fn;
7561
7562 /* Populate the function. */
dac18d1a 7563 push_gimplify_context (&gctx);
9078126c 7564 push_cfun (child_cfun);
fd6481cf 7565
7566 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
7567 TREE_SIDE_EFFECTS (bind) = 1;
7568 list = NULL;
7569 DECL_SAVED_TREE (child_fn) = bind;
75a70cf9 7570 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
fd6481cf 7571
7572 /* Remap src and dst argument types if needed. */
7573 record_type = ctx->record_type;
7574 srecord_type = ctx->srecord_type;
1767a056 7575 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
fd6481cf 7576 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
7577 {
7578 record_needs_remap = true;
7579 break;
7580 }
1767a056 7581 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
fd6481cf 7582 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
7583 {
7584 srecord_needs_remap = true;
7585 break;
7586 }
7587
7588 if (record_needs_remap || srecord_needs_remap)
7589 {
7590 memset (&tcctx, '\0', sizeof (tcctx));
7591 tcctx.cb.src_fn = ctx->cb.src_fn;
7592 tcctx.cb.dst_fn = child_fn;
53f79206 7593 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
7594 gcc_checking_assert (tcctx.cb.src_node);
fd6481cf 7595 tcctx.cb.dst_node = tcctx.cb.src_node;
7596 tcctx.cb.src_cfun = ctx->cb.src_cfun;
7597 tcctx.cb.copy_decl = task_copyfn_copy_decl;
e38def9c 7598 tcctx.cb.eh_lp_nr = 0;
fd6481cf 7599 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
7600 tcctx.cb.decl_map = pointer_map_create ();
7601 tcctx.ctx = ctx;
7602
7603 if (record_needs_remap)
7604 record_type = task_copyfn_remap_type (&tcctx, record_type);
7605 if (srecord_needs_remap)
7606 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
7607 }
7608 else
7609 tcctx.cb.decl_map = NULL;
7610
fd6481cf 7611 arg = DECL_ARGUMENTS (child_fn);
7612 TREE_TYPE (arg) = build_pointer_type (record_type);
1767a056 7613 sarg = DECL_CHAIN (arg);
fd6481cf 7614 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
7615
7616 /* First pass: initialize temporaries used in record_type and srecord_type
7617 sizes and field offsets. */
7618 if (tcctx.cb.decl_map)
75a70cf9 7619 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 7620 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
7621 {
7622 tree *p;
7623
7624 decl = OMP_CLAUSE_DECL (c);
7625 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
7626 if (p == NULL)
7627 continue;
7628 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
7629 sf = (tree) n->value;
7630 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 7631 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 7632 src = omp_build_component_ref (src, sf);
75a70cf9 7633 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
fd6481cf 7634 append_to_statement_list (t, &list);
7635 }
7636
7637 /* Second pass: copy shared var pointers and copy construct non-VLA
7638 firstprivate vars. */
75a70cf9 7639 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 7640 switch (OMP_CLAUSE_CODE (c))
7641 {
7642 case OMP_CLAUSE_SHARED:
7643 decl = OMP_CLAUSE_DECL (c);
7644 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
7645 if (n == NULL)
7646 break;
7647 f = (tree) n->value;
7648 if (tcctx.cb.decl_map)
7649 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
7650 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
7651 sf = (tree) n->value;
7652 if (tcctx.cb.decl_map)
7653 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 7654 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 7655 src = omp_build_component_ref (src, sf);
182cf5a9 7656 dst = build_simple_mem_ref_loc (loc, arg);
445d06b6 7657 dst = omp_build_component_ref (dst, f);
75a70cf9 7658 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
fd6481cf 7659 append_to_statement_list (t, &list);
7660 break;
7661 case OMP_CLAUSE_FIRSTPRIVATE:
7662 decl = OMP_CLAUSE_DECL (c);
7663 if (is_variable_sized (decl))
7664 break;
7665 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
7666 if (n == NULL)
7667 break;
7668 f = (tree) n->value;
7669 if (tcctx.cb.decl_map)
7670 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
7671 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
7672 if (n != NULL)
7673 {
7674 sf = (tree) n->value;
7675 if (tcctx.cb.decl_map)
7676 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 7677 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 7678 src = omp_build_component_ref (src, sf);
fd6481cf 7679 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
182cf5a9 7680 src = build_simple_mem_ref_loc (loc, src);
fd6481cf 7681 }
7682 else
7683 src = decl;
182cf5a9 7684 dst = build_simple_mem_ref_loc (loc, arg);
445d06b6 7685 dst = omp_build_component_ref (dst, f);
fd6481cf 7686 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
7687 append_to_statement_list (t, &list);
7688 break;
7689 case OMP_CLAUSE_PRIVATE:
7690 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
7691 break;
7692 decl = OMP_CLAUSE_DECL (c);
7693 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
7694 f = (tree) n->value;
7695 if (tcctx.cb.decl_map)
7696 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
7697 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
7698 if (n != NULL)
7699 {
7700 sf = (tree) n->value;
7701 if (tcctx.cb.decl_map)
7702 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 7703 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 7704 src = omp_build_component_ref (src, sf);
fd6481cf 7705 if (use_pointer_for_field (decl, NULL))
182cf5a9 7706 src = build_simple_mem_ref_loc (loc, src);
fd6481cf 7707 }
7708 else
7709 src = decl;
182cf5a9 7710 dst = build_simple_mem_ref_loc (loc, arg);
445d06b6 7711 dst = omp_build_component_ref (dst, f);
75a70cf9 7712 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
fd6481cf 7713 append_to_statement_list (t, &list);
7714 break;
7715 default:
7716 break;
7717 }
7718
7719 /* Last pass: handle VLA firstprivates. */
7720 if (tcctx.cb.decl_map)
75a70cf9 7721 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 7722 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
7723 {
7724 tree ind, ptr, df;
7725
7726 decl = OMP_CLAUSE_DECL (c);
7727 if (!is_variable_sized (decl))
7728 continue;
7729 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
7730 if (n == NULL)
7731 continue;
7732 f = (tree) n->value;
7733 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
7734 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
7735 ind = DECL_VALUE_EXPR (decl);
7736 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
7737 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
7738 n = splay_tree_lookup (ctx->sfield_map,
7739 (splay_tree_key) TREE_OPERAND (ind, 0));
7740 sf = (tree) n->value;
7741 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 7742 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 7743 src = omp_build_component_ref (src, sf);
182cf5a9 7744 src = build_simple_mem_ref_loc (loc, src);
7745 dst = build_simple_mem_ref_loc (loc, arg);
445d06b6 7746 dst = omp_build_component_ref (dst, f);
fd6481cf 7747 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
7748 append_to_statement_list (t, &list);
7749 n = splay_tree_lookup (ctx->field_map,
7750 (splay_tree_key) TREE_OPERAND (ind, 0));
7751 df = (tree) n->value;
7752 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
182cf5a9 7753 ptr = build_simple_mem_ref_loc (loc, arg);
445d06b6 7754 ptr = omp_build_component_ref (ptr, df);
75a70cf9 7755 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
389dd41b 7756 build_fold_addr_expr_loc (loc, dst));
fd6481cf 7757 append_to_statement_list (t, &list);
7758 }
7759
7760 t = build1 (RETURN_EXPR, void_type_node, NULL);
7761 append_to_statement_list (t, &list);
7762
7763 if (tcctx.cb.decl_map)
7764 pointer_map_destroy (tcctx.cb.decl_map);
7765 pop_gimplify_context (NULL);
7766 BIND_EXPR_BODY (bind) = list;
7767 pop_cfun ();
fd6481cf 7768}
7769
75a70cf9 7770/* Lower the OpenMP parallel or task directive in the current statement
7771 in GSI_P. CTX holds context information for the directive. */
773c5ba7 7772
7773static void
75a70cf9 7774lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 7775{
75a70cf9 7776 tree clauses;
7777 tree child_fn, t;
7778 gimple stmt = gsi_stmt (*gsi_p);
7779 gimple par_bind, bind;
7780 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
dac18d1a 7781 struct gimplify_ctx gctx;
389dd41b 7782 location_t loc = gimple_location (stmt);
773c5ba7 7783
75a70cf9 7784 clauses = gimple_omp_taskreg_clauses (stmt);
7785 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
7786 par_body = gimple_bind_body (par_bind);
773c5ba7 7787 child_fn = ctx->cb.dst_fn;
75a70cf9 7788 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
7789 && !gimple_omp_parallel_combined_p (stmt))
de7ef844 7790 {
7791 struct walk_stmt_info wi;
7792 int ws_num = 0;
7793
7794 memset (&wi, 0, sizeof (wi));
de7ef844 7795 wi.info = &ws_num;
7796 wi.val_only = true;
75a70cf9 7797 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
de7ef844 7798 if (ws_num == 1)
75a70cf9 7799 gimple_omp_parallel_set_combined_p (stmt, true);
de7ef844 7800 }
fd6481cf 7801 if (ctx->srecord_type)
7802 create_task_copyfn (stmt, ctx);
773c5ba7 7803
dac18d1a 7804 push_gimplify_context (&gctx);
773c5ba7 7805
75a70cf9 7806 par_olist = NULL;
7807 par_ilist = NULL;
773c5ba7 7808 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
e3a19533 7809 lower_omp (&par_body, ctx);
75a70cf9 7810 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
fd6481cf 7811 lower_reduction_clauses (clauses, &par_olist, ctx);
773c5ba7 7812
7813 /* Declare all the variables created by mapping and the variables
7814 declared in the scope of the parallel body. */
7815 record_vars_into (ctx->block_vars, child_fn);
75a70cf9 7816 record_vars_into (gimple_bind_vars (par_bind), child_fn);
773c5ba7 7817
7818 if (ctx->record_type)
7819 {
fd6481cf 7820 ctx->sender_decl
7821 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
7822 : ctx->record_type, ".omp_data_o");
84bfaaeb 7823 DECL_NAMELESS (ctx->sender_decl) = 1;
86f2ad37 7824 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
75a70cf9 7825 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
773c5ba7 7826 }
7827
75a70cf9 7828 olist = NULL;
7829 ilist = NULL;
773c5ba7 7830 lower_send_clauses (clauses, &ilist, &olist, ctx);
7831 lower_send_shared_vars (&ilist, &olist, ctx);
7832
7833 /* Once all the expansions are done, sequence all the different
75a70cf9 7834 fragments inside gimple_omp_body. */
773c5ba7 7835
75a70cf9 7836 new_body = NULL;
773c5ba7 7837
7838 if (ctx->record_type)
7839 {
389dd41b 7840 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
cc6b725b 7841 /* fixup_child_record_type might have changed receiver_decl's type. */
389dd41b 7842 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
75a70cf9 7843 gimple_seq_add_stmt (&new_body,
7844 gimple_build_assign (ctx->receiver_decl, t));
773c5ba7 7845 }
7846
75a70cf9 7847 gimple_seq_add_seq (&new_body, par_ilist);
7848 gimple_seq_add_seq (&new_body, par_body);
7849 gimple_seq_add_seq (&new_body, par_olist);
7850 new_body = maybe_catch_exception (new_body);
7851 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
7852 gimple_omp_set_body (stmt, new_body);
773c5ba7 7853
75a70cf9 7854 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
75a70cf9 7855 gsi_replace (gsi_p, bind, true);
e3a19533 7856 gimple_bind_add_seq (bind, ilist);
7857 gimple_bind_add_stmt (bind, stmt);
7858 gimple_bind_add_seq (bind, olist);
773c5ba7 7859
75a70cf9 7860 pop_gimplify_context (NULL);
773c5ba7 7861}
7862
a4890dc9 7863/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
75a70cf9 7864 regimplified. If DATA is non-NULL, lower_omp_1 is outside
7865 of OpenMP context, but with task_shared_vars set. */
46515c92 7866
7867static tree
75a70cf9 7868lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
7869 void *data)
46515c92 7870{
a4890dc9 7871 tree t = *tp;
46515c92 7872
a4890dc9 7873 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
75a70cf9 7874 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
9f49e155 7875 return t;
7876
7877 if (task_shared_vars
7878 && DECL_P (t)
7879 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
a4890dc9 7880 return t;
46515c92 7881
a4890dc9 7882 /* If a global variable has been privatized, TREE_CONSTANT on
7883 ADDR_EXPR might be wrong. */
75a70cf9 7884 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
a4890dc9 7885 recompute_tree_invariant_for_addr_expr (t);
46515c92 7886
a4890dc9 7887 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
7888 return NULL_TREE;
46515c92 7889}
773c5ba7 7890
a4890dc9 7891static void
75a70cf9 7892lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 7893{
75a70cf9 7894 gimple stmt = gsi_stmt (*gsi_p);
7895 struct walk_stmt_info wi;
1e8e9920 7896
75a70cf9 7897 if (gimple_has_location (stmt))
7898 input_location = gimple_location (stmt);
a4890dc9 7899
75a70cf9 7900 if (task_shared_vars)
7901 memset (&wi, '\0', sizeof (wi));
a4890dc9 7902
773c5ba7 7903 /* If we have issued syntax errors, avoid doing any heavy lifting.
7904 Just replace the OpenMP directives with a NOP to avoid
7905 confusing RTL expansion. */
852f689e 7906 if (seen_error () && is_gimple_omp (stmt))
773c5ba7 7907 {
75a70cf9 7908 gsi_replace (gsi_p, gimple_build_nop (), true);
a4890dc9 7909 return;
773c5ba7 7910 }
7911
75a70cf9 7912 switch (gimple_code (stmt))
1e8e9920 7913 {
75a70cf9 7914 case GIMPLE_COND:
fd6481cf 7915 if ((ctx || task_shared_vars)
75a70cf9 7916 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
7917 ctx ? NULL : &wi, NULL)
7918 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
7919 ctx ? NULL : &wi, NULL)))
7920 gimple_regimplify_operands (stmt, gsi_p);
a4890dc9 7921 break;
75a70cf9 7922 case GIMPLE_CATCH:
e3a19533 7923 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
a4890dc9 7924 break;
75a70cf9 7925 case GIMPLE_EH_FILTER:
e3a19533 7926 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
a4890dc9 7927 break;
75a70cf9 7928 case GIMPLE_TRY:
e3a19533 7929 lower_omp (gimple_try_eval_ptr (stmt), ctx);
7930 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
a4890dc9 7931 break;
35215227 7932 case GIMPLE_TRANSACTION:
7933 lower_omp (gimple_transaction_body_ptr (stmt), ctx);
7934 break;
75a70cf9 7935 case GIMPLE_BIND:
e3a19533 7936 lower_omp (gimple_bind_body_ptr (stmt), ctx);
a4890dc9 7937 break;
75a70cf9 7938 case GIMPLE_OMP_PARALLEL:
7939 case GIMPLE_OMP_TASK:
7940 ctx = maybe_lookup_ctx (stmt);
7941 lower_omp_taskreg (gsi_p, ctx);
a4890dc9 7942 break;
75a70cf9 7943 case GIMPLE_OMP_FOR:
7944 ctx = maybe_lookup_ctx (stmt);
1e8e9920 7945 gcc_assert (ctx);
75a70cf9 7946 lower_omp_for (gsi_p, ctx);
1e8e9920 7947 break;
75a70cf9 7948 case GIMPLE_OMP_SECTIONS:
7949 ctx = maybe_lookup_ctx (stmt);
1e8e9920 7950 gcc_assert (ctx);
75a70cf9 7951 lower_omp_sections (gsi_p, ctx);
1e8e9920 7952 break;
75a70cf9 7953 case GIMPLE_OMP_SINGLE:
7954 ctx = maybe_lookup_ctx (stmt);
1e8e9920 7955 gcc_assert (ctx);
75a70cf9 7956 lower_omp_single (gsi_p, ctx);
1e8e9920 7957 break;
75a70cf9 7958 case GIMPLE_OMP_MASTER:
7959 ctx = maybe_lookup_ctx (stmt);
1e8e9920 7960 gcc_assert (ctx);
75a70cf9 7961 lower_omp_master (gsi_p, ctx);
1e8e9920 7962 break;
75a70cf9 7963 case GIMPLE_OMP_ORDERED:
7964 ctx = maybe_lookup_ctx (stmt);
1e8e9920 7965 gcc_assert (ctx);
75a70cf9 7966 lower_omp_ordered (gsi_p, ctx);
1e8e9920 7967 break;
75a70cf9 7968 case GIMPLE_OMP_CRITICAL:
7969 ctx = maybe_lookup_ctx (stmt);
1e8e9920 7970 gcc_assert (ctx);
75a70cf9 7971 lower_omp_critical (gsi_p, ctx);
7972 break;
7973 case GIMPLE_OMP_ATOMIC_LOAD:
7974 if ((ctx || task_shared_vars)
7975 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
7976 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
7977 gimple_regimplify_operands (stmt, gsi_p);
1e8e9920 7978 break;
a4890dc9 7979 default:
fd6481cf 7980 if ((ctx || task_shared_vars)
75a70cf9 7981 && walk_gimple_op (stmt, lower_omp_regimplify_p,
7982 ctx ? NULL : &wi))
7983 gimple_regimplify_operands (stmt, gsi_p);
1e8e9920 7984 break;
1e8e9920 7985 }
1e8e9920 7986}
7987
7988static void
e3a19533 7989lower_omp (gimple_seq *body, omp_context *ctx)
1e8e9920 7990{
1d22f541 7991 location_t saved_location = input_location;
e3a19533 7992 gimple_stmt_iterator gsi;
7993 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
75a70cf9 7994 lower_omp_1 (&gsi, ctx);
1d22f541 7995 input_location = saved_location;
1e8e9920 7996}
7997\f
7998/* Main entry point. */
7999
2a1990e9 8000static unsigned int
1e8e9920 8001execute_lower_omp (void)
8002{
75a70cf9 8003 gimple_seq body;
8004
41709826 8005 /* This pass always runs, to provide PROP_gimple_lomp.
8006 But there is nothing to do unless -fopenmp is given. */
8007 if (flag_openmp == 0)
8008 return 0;
8009
1e8e9920 8010 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
8011 delete_omp_context);
8012
75a70cf9 8013 body = gimple_body (current_function_decl);
ab129075 8014 scan_omp (&body, NULL);
fd6481cf 8015 gcc_assert (taskreg_nesting_level == 0);
1e8e9920 8016
8017 if (all_contexts->root)
fd6481cf 8018 {
dac18d1a 8019 struct gimplify_ctx gctx;
8020
fd6481cf 8021 if (task_shared_vars)
dac18d1a 8022 push_gimplify_context (&gctx);
e3a19533 8023 lower_omp (&body, NULL);
fd6481cf 8024 if (task_shared_vars)
8025 pop_gimplify_context (NULL);
8026 }
1e8e9920 8027
773c5ba7 8028 if (all_contexts)
8029 {
8030 splay_tree_delete (all_contexts);
8031 all_contexts = NULL;
8032 }
fd6481cf 8033 BITMAP_FREE (task_shared_vars);
2a1990e9 8034 return 0;
1e8e9920 8035}
8036
cbe8bda8 8037namespace {
8038
8039const pass_data pass_data_lower_omp =
8040{
8041 GIMPLE_PASS, /* type */
8042 "omplower", /* name */
8043 OPTGROUP_NONE, /* optinfo_flags */
8044 false, /* has_gate */
8045 true, /* has_execute */
8046 TV_NONE, /* tv_id */
8047 PROP_gimple_any, /* properties_required */
8048 PROP_gimple_lomp, /* properties_provided */
8049 0, /* properties_destroyed */
8050 0, /* todo_flags_start */
8051 0, /* todo_flags_finish */
1e8e9920 8052};
cbe8bda8 8053
8054class pass_lower_omp : public gimple_opt_pass
8055{
8056public:
8057 pass_lower_omp(gcc::context *ctxt)
8058 : gimple_opt_pass(pass_data_lower_omp, ctxt)
8059 {}
8060
8061 /* opt_pass methods: */
8062 unsigned int execute () { return execute_lower_omp (); }
8063
8064}; // class pass_lower_omp
8065
8066} // anon namespace
8067
8068gimple_opt_pass *
8069make_pass_lower_omp (gcc::context *ctxt)
8070{
8071 return new pass_lower_omp (ctxt);
8072}
1e8e9920 8073\f
8074/* The following is a utility to diagnose OpenMP structured block violations.
61e47ac8 8075 It is not part of the "omplower" pass, as that's invoked too late. It
8076 should be invoked by the respective front ends after gimplification. */
1e8e9920 8077
8078static splay_tree all_labels;
8079
8080/* Check for mismatched contexts and generate an error if needed. Return
8081 true if an error is detected. */
8082
8083static bool
75a70cf9 8084diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
8085 gimple branch_ctx, gimple label_ctx)
1e8e9920 8086{
75a70cf9 8087 if (label_ctx == branch_ctx)
1e8e9920 8088 return false;
8089
48e1416a 8090
75a70cf9 8091 /*
8092 Previously we kept track of the label's entire context in diagnose_sb_[12]
8093 so we could traverse it and issue a correct "exit" or "enter" error
8094 message upon a structured block violation.
8095
8096 We built the context by building a list with tree_cons'ing, but there is
8097 no easy counterpart in gimple tuples. It seems like far too much work
8098 for issuing exit/enter error messages. If someone really misses the
8099 distinct error message... patches welcome.
8100 */
48e1416a 8101
75a70cf9 8102#if 0
1e8e9920 8103 /* Try to avoid confusing the user by producing and error message
f0b5f617 8104 with correct "exit" or "enter" verbiage. We prefer "exit"
1e8e9920 8105 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
8106 if (branch_ctx == NULL)
8107 exit_p = false;
8108 else
8109 {
8110 while (label_ctx)
8111 {
8112 if (TREE_VALUE (label_ctx) == branch_ctx)
8113 {
8114 exit_p = false;
8115 break;
8116 }
8117 label_ctx = TREE_CHAIN (label_ctx);
8118 }
8119 }
8120
8121 if (exit_p)
8122 error ("invalid exit from OpenMP structured block");
8123 else
8124 error ("invalid entry to OpenMP structured block");
75a70cf9 8125#endif
1e8e9920 8126
75a70cf9 8127 /* If it's obvious we have an invalid entry, be specific about the error. */
8128 if (branch_ctx == NULL)
8129 error ("invalid entry to OpenMP structured block");
8130 else
8131 /* Otherwise, be vague and lazy, but efficient. */
8132 error ("invalid branch to/from an OpenMP structured block");
8133
8134 gsi_replace (gsi_p, gimple_build_nop (), false);
1e8e9920 8135 return true;
8136}
8137
8138/* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
75a70cf9 8139 where each label is found. */
1e8e9920 8140
8141static tree
75a70cf9 8142diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
8143 struct walk_stmt_info *wi)
1e8e9920 8144{
75a70cf9 8145 gimple context = (gimple) wi->info;
8146 gimple inner_context;
8147 gimple stmt = gsi_stmt (*gsi_p);
1e8e9920 8148
75a70cf9 8149 *handled_ops_p = true;
8150
8151 switch (gimple_code (stmt))
1e8e9920 8152 {
75a70cf9 8153 WALK_SUBSTMTS;
48e1416a 8154
75a70cf9 8155 case GIMPLE_OMP_PARALLEL:
8156 case GIMPLE_OMP_TASK:
8157 case GIMPLE_OMP_SECTIONS:
8158 case GIMPLE_OMP_SINGLE:
8159 case GIMPLE_OMP_SECTION:
8160 case GIMPLE_OMP_MASTER:
8161 case GIMPLE_OMP_ORDERED:
8162 case GIMPLE_OMP_CRITICAL:
8163 /* The minimal context here is just the current OMP construct. */
8164 inner_context = stmt;
1e8e9920 8165 wi->info = inner_context;
75a70cf9 8166 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
1e8e9920 8167 wi->info = context;
8168 break;
8169
75a70cf9 8170 case GIMPLE_OMP_FOR:
8171 inner_context = stmt;
1e8e9920 8172 wi->info = inner_context;
75a70cf9 8173 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
8174 walk them. */
8175 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
8176 diagnose_sb_1, NULL, wi);
8177 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
1e8e9920 8178 wi->info = context;
8179 break;
8180
75a70cf9 8181 case GIMPLE_LABEL:
8182 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
1e8e9920 8183 (splay_tree_value) context);
8184 break;
8185
8186 default:
8187 break;
8188 }
8189
8190 return NULL_TREE;
8191}
8192
8193/* Pass 2: Check each branch and see if its context differs from that of
8194 the destination label's context. */
8195
8196static tree
75a70cf9 8197diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
8198 struct walk_stmt_info *wi)
1e8e9920 8199{
75a70cf9 8200 gimple context = (gimple) wi->info;
1e8e9920 8201 splay_tree_node n;
75a70cf9 8202 gimple stmt = gsi_stmt (*gsi_p);
1e8e9920 8203
75a70cf9 8204 *handled_ops_p = true;
8205
8206 switch (gimple_code (stmt))
1e8e9920 8207 {
75a70cf9 8208 WALK_SUBSTMTS;
8209
8210 case GIMPLE_OMP_PARALLEL:
8211 case GIMPLE_OMP_TASK:
8212 case GIMPLE_OMP_SECTIONS:
8213 case GIMPLE_OMP_SINGLE:
8214 case GIMPLE_OMP_SECTION:
8215 case GIMPLE_OMP_MASTER:
8216 case GIMPLE_OMP_ORDERED:
8217 case GIMPLE_OMP_CRITICAL:
8218 wi->info = stmt;
e3a19533 8219 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
1e8e9920 8220 wi->info = context;
8221 break;
8222
75a70cf9 8223 case GIMPLE_OMP_FOR:
8224 wi->info = stmt;
8225 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
8226 walk them. */
e3a19533 8227 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
8228 diagnose_sb_2, NULL, wi);
8229 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
1e8e9920 8230 wi->info = context;
8231 break;
8232
0e1818e7 8233 case GIMPLE_COND:
8234 {
8235 tree lab = gimple_cond_true_label (stmt);
8236 if (lab)
8237 {
8238 n = splay_tree_lookup (all_labels,
8239 (splay_tree_key) lab);
8240 diagnose_sb_0 (gsi_p, context,
8241 n ? (gimple) n->value : NULL);
8242 }
8243 lab = gimple_cond_false_label (stmt);
8244 if (lab)
8245 {
8246 n = splay_tree_lookup (all_labels,
8247 (splay_tree_key) lab);
8248 diagnose_sb_0 (gsi_p, context,
8249 n ? (gimple) n->value : NULL);
8250 }
8251 }
8252 break;
8253
75a70cf9 8254 case GIMPLE_GOTO:
1e8e9920 8255 {
75a70cf9 8256 tree lab = gimple_goto_dest (stmt);
1e8e9920 8257 if (TREE_CODE (lab) != LABEL_DECL)
8258 break;
8259
8260 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
75a70cf9 8261 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
1e8e9920 8262 }
8263 break;
8264
75a70cf9 8265 case GIMPLE_SWITCH:
1e8e9920 8266 {
75a70cf9 8267 unsigned int i;
8268 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
1e8e9920 8269 {
75a70cf9 8270 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
1e8e9920 8271 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
75a70cf9 8272 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
1e8e9920 8273 break;
8274 }
8275 }
8276 break;
8277
75a70cf9 8278 case GIMPLE_RETURN:
8279 diagnose_sb_0 (gsi_p, context, NULL);
1e8e9920 8280 break;
8281
8282 default:
8283 break;
8284 }
8285
8286 return NULL_TREE;
8287}
8288
bfec3452 8289static unsigned int
8290diagnose_omp_structured_block_errors (void)
1e8e9920 8291{
1e8e9920 8292 struct walk_stmt_info wi;
bfec3452 8293 gimple_seq body = gimple_body (current_function_decl);
1e8e9920 8294
8295 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
8296
8297 memset (&wi, 0, sizeof (wi));
75a70cf9 8298 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
1e8e9920 8299
8300 memset (&wi, 0, sizeof (wi));
1e8e9920 8301 wi.want_locations = true;
e3a19533 8302 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
8303
8304 gimple_set_body (current_function_decl, body);
1e8e9920 8305
8306 splay_tree_delete (all_labels);
8307 all_labels = NULL;
8308
bfec3452 8309 return 0;
1e8e9920 8310}
8311
bfec3452 8312static bool
8313gate_diagnose_omp_blocks (void)
8314{
8315 return flag_openmp != 0;
8316}
8317
cbe8bda8 8318namespace {
8319
8320const pass_data pass_data_diagnose_omp_blocks =
8321{
8322 GIMPLE_PASS, /* type */
8323 "*diagnose_omp_blocks", /* name */
8324 OPTGROUP_NONE, /* optinfo_flags */
8325 true, /* has_gate */
8326 true, /* has_execute */
8327 TV_NONE, /* tv_id */
8328 PROP_gimple_any, /* properties_required */
8329 0, /* properties_provided */
8330 0, /* properties_destroyed */
8331 0, /* todo_flags_start */
8332 0, /* todo_flags_finish */
bfec3452 8333};
8334
cbe8bda8 8335class pass_diagnose_omp_blocks : public gimple_opt_pass
8336{
8337public:
8338 pass_diagnose_omp_blocks(gcc::context *ctxt)
8339 : gimple_opt_pass(pass_data_diagnose_omp_blocks, ctxt)
8340 {}
8341
8342 /* opt_pass methods: */
8343 bool gate () { return gate_diagnose_omp_blocks (); }
8344 unsigned int execute () {
8345 return diagnose_omp_structured_block_errors ();
8346 }
8347
8348}; // class pass_diagnose_omp_blocks
8349
8350} // anon namespace
8351
8352gimple_opt_pass *
8353make_pass_diagnose_omp_blocks (gcc::context *ctxt)
8354{
8355 return new pass_diagnose_omp_blocks (ctxt);
8356}
8357
1e8e9920 8358#include "gt-omp-low.h"