]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/omp-low.c
2012-11-01 Sharad Singhai <singhai@google.com>
[thirdparty/gcc.git] / gcc / omp-low.c
CommitLineData
1e8e9920 1/* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
0416ca72 6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
7cf0dbf3 7 Free Software Foundation, Inc.
1e8e9920 8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
8c4c00c1 13Software Foundation; either version 3, or (at your option) any later
1e8e9920 14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
8c4c00c1 22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
1e8e9920 24
25#include "config.h"
26#include "system.h"
27#include "coretypes.h"
28#include "tm.h"
29#include "tree.h"
30#include "rtl.h"
75a70cf9 31#include "gimple.h"
32#include "tree-iterator.h"
1e8e9920 33#include "tree-inline.h"
34#include "langhooks.h"
852f689e 35#include "diagnostic-core.h"
1e8e9920 36#include "tree-flow.h"
1e8e9920 37#include "flags.h"
38#include "function.h"
39#include "expr.h"
1e8e9920 40#include "tree-pass.h"
41#include "ggc.h"
42#include "except.h"
e3022db7 43#include "splay-tree.h"
cb7f680b 44#include "optabs.h"
45#include "cfgloop.h"
1e8e9920 46
75a70cf9 47
48e1416a 48/* Lowering of OpenMP parallel and workshare constructs proceeds in two
1e8e9920 49 phases. The first phase scans the function looking for OMP statements
50 and then for variables that must be replaced to satisfy data sharing
51 clauses. The second phase expands code for the constructs, as well as
334ec2d8 52 re-gimplifying things when variables have been replaced with complex
1e8e9920 53 expressions.
54
d134bccc 55 Final code generation is done by pass_expand_omp. The flowgraph is
56 scanned for parallel regions which are then moved to a new
57 function, to be invoked by the thread library. */
1e8e9920 58
59/* Context structure. Used to store information about each parallel
60 directive in the code. */
61
62typedef struct omp_context
63{
64 /* This field must be at the beginning, as we do "inheritance": Some
65 callback functions for tree-inline.c (e.g., omp_copy_decl)
66 receive a copy_body_data pointer that is up-casted to an
67 omp_context pointer. */
68 copy_body_data cb;
69
70 /* The tree of contexts corresponding to the encountered constructs. */
71 struct omp_context *outer;
75a70cf9 72 gimple stmt;
1e8e9920 73
48e1416a 74 /* Map variables to fields in a structure that allows communication
1e8e9920 75 between sending and receiving threads. */
76 splay_tree field_map;
77 tree record_type;
78 tree sender_decl;
79 tree receiver_decl;
80
fd6481cf 81 /* These are used just by task contexts, if task firstprivate fn is
82 needed. srecord_type is used to communicate from the thread
83 that encountered the task construct to task firstprivate fn,
84 record_type is allocated by GOMP_task, initialized by task firstprivate
85 fn and passed to the task body fn. */
86 splay_tree sfield_map;
87 tree srecord_type;
88
1e8e9920 89 /* A chain of variables to add to the top-level block surrounding the
90 construct. In the case of a parallel, this is in the child function. */
91 tree block_vars;
92
93 /* What to do with variables with implicitly determined sharing
94 attributes. */
95 enum omp_clause_default_kind default_kind;
96
97 /* Nesting depth of this context. Used to beautify error messages re
98 invalid gotos. The outermost ctx is depth 1, with depth 0 being
99 reserved for the main body of the function. */
100 int depth;
101
1e8e9920 102 /* True if this parallel directive is nested within another. */
103 bool is_nested;
1e8e9920 104} omp_context;
105
106
fd6481cf 107struct omp_for_data_loop
108{
109 tree v, n1, n2, step;
110 enum tree_code cond_code;
111};
112
773c5ba7 113/* A structure describing the main elements of a parallel loop. */
1e8e9920 114
773c5ba7 115struct omp_for_data
1e8e9920 116{
fd6481cf 117 struct omp_for_data_loop loop;
75a70cf9 118 tree chunk_size;
119 gimple for_stmt;
fd6481cf 120 tree pre, iter_type;
121 int collapse;
1e8e9920 122 bool have_nowait, have_ordered;
123 enum omp_clause_schedule_kind sched_kind;
fd6481cf 124 struct omp_for_data_loop *loops;
1e8e9920 125};
126
773c5ba7 127
1e8e9920 128static splay_tree all_contexts;
fd6481cf 129static int taskreg_nesting_level;
61e47ac8 130struct omp_region *root_omp_region;
fd6481cf 131static bitmap task_shared_vars;
1e8e9920 132
ab129075 133static void scan_omp (gimple_seq *, omp_context *);
75a70cf9 134static tree scan_omp_1_op (tree *, int *, void *);
135
136#define WALK_SUBSTMTS \
137 case GIMPLE_BIND: \
138 case GIMPLE_TRY: \
139 case GIMPLE_CATCH: \
140 case GIMPLE_EH_FILTER: \
4c0315d0 141 case GIMPLE_TRANSACTION: \
75a70cf9 142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
145
146/* Convenience function for calling scan_omp_1_op on tree operands. */
147
148static inline tree
149scan_omp_op (tree *tp, omp_context *ctx)
150{
151 struct walk_stmt_info wi;
152
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
156
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158}
159
e3a19533 160static void lower_omp (gimple_seq *, omp_context *);
f49d7bb5 161static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
1e8e9920 163
164/* Find an OpenMP clause of type KIND within CLAUSES. */
165
79acaae1 166tree
590c3166 167find_omp_clause (tree clauses, enum omp_clause_code kind)
1e8e9920 168{
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
55d6e7cd 170 if (OMP_CLAUSE_CODE (clauses) == kind)
1e8e9920 171 return clauses;
172
173 return NULL_TREE;
174}
175
176/* Return true if CTX is for an omp parallel. */
177
178static inline bool
179is_parallel_ctx (omp_context *ctx)
180{
75a70cf9 181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
1e8e9920 182}
183
773c5ba7 184
fd6481cf 185/* Return true if CTX is for an omp task. */
186
187static inline bool
188is_task_ctx (omp_context *ctx)
189{
75a70cf9 190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
fd6481cf 191}
192
193
194/* Return true if CTX is for an omp parallel or omp task. */
195
196static inline bool
197is_taskreg_ctx (omp_context *ctx)
198{
75a70cf9 199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
fd6481cf 201}
202
203
773c5ba7 204/* Return true if REGION is a combined parallel+workshare region. */
1e8e9920 205
206static inline bool
773c5ba7 207is_combined_parallel (struct omp_region *region)
208{
209 return region->is_combined_parallel;
210}
211
212
213/* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
215
216static void
75a70cf9 217extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
fd6481cf 218 struct omp_for_data_loop *loops)
773c5ba7 219{
fd6481cf 220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
389dd41b 225 location_t loc = gimple_location (for_stmt);
773c5ba7 226
227 fd->for_stmt = for_stmt;
228 fd->pre = NULL;
75a70cf9 229 fd->collapse = gimple_omp_for_collapse (for_stmt);
fd6481cf 230 if (fd->collapse > 1)
231 fd->loops = loops;
232 else
233 fd->loops = &fd->loop;
773c5ba7 234
235 fd->have_nowait = fd->have_ordered = false;
236 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
237 fd->chunk_size = NULL_TREE;
fd6481cf 238 collapse_iter = NULL;
239 collapse_count = NULL;
773c5ba7 240
75a70cf9 241 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
55d6e7cd 242 switch (OMP_CLAUSE_CODE (t))
773c5ba7 243 {
244 case OMP_CLAUSE_NOWAIT:
245 fd->have_nowait = true;
246 break;
247 case OMP_CLAUSE_ORDERED:
248 fd->have_ordered = true;
249 break;
250 case OMP_CLAUSE_SCHEDULE:
251 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
252 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
253 break;
fd6481cf 254 case OMP_CLAUSE_COLLAPSE:
255 if (fd->collapse > 1)
256 {
257 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
258 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
259 }
773c5ba7 260 default:
261 break;
262 }
263
fd6481cf 264 /* FIXME: for now map schedule(auto) to schedule(static).
265 There should be analysis to determine whether all iterations
266 are approximately the same amount of work (then schedule(static)
bde357c8 267 is best) or if it varies (then schedule(dynamic,N) is better). */
fd6481cf 268 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
269 {
270 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
271 gcc_assert (fd->chunk_size == NULL);
272 }
273 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
773c5ba7 274 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
275 gcc_assert (fd->chunk_size == NULL);
276 else if (fd->chunk_size == NULL)
277 {
278 /* We only need to compute a default chunk size for ordered
279 static loops and dynamic loops. */
fd6481cf 280 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
281 || fd->have_ordered
282 || fd->collapse > 1)
773c5ba7 283 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
284 ? integer_zero_node : integer_one_node;
285 }
fd6481cf 286
287 for (i = 0; i < fd->collapse; i++)
288 {
289 if (fd->collapse == 1)
290 loop = &fd->loop;
291 else if (loops != NULL)
292 loop = loops + i;
293 else
294 loop = &dummy_loop;
295
48e1416a 296
75a70cf9 297 loop->v = gimple_omp_for_index (for_stmt, i);
fd6481cf 298 gcc_assert (SSA_VAR_P (loop->v));
299 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
300 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
301 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
75a70cf9 302 loop->n1 = gimple_omp_for_initial (for_stmt, i);
fd6481cf 303
75a70cf9 304 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
305 loop->n2 = gimple_omp_for_final (for_stmt, i);
fd6481cf 306 switch (loop->cond_code)
307 {
308 case LT_EXPR:
309 case GT_EXPR:
310 break;
311 case LE_EXPR:
312 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
2cc66f2a 313 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
fd6481cf 314 else
389dd41b 315 loop->n2 = fold_build2_loc (loc,
316 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
fd6481cf 317 build_int_cst (TREE_TYPE (loop->n2), 1));
318 loop->cond_code = LT_EXPR;
319 break;
320 case GE_EXPR:
321 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
2cc66f2a 322 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
fd6481cf 323 else
389dd41b 324 loop->n2 = fold_build2_loc (loc,
325 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
fd6481cf 326 build_int_cst (TREE_TYPE (loop->n2), 1));
327 loop->cond_code = GT_EXPR;
328 break;
329 default:
330 gcc_unreachable ();
331 }
332
75a70cf9 333 t = gimple_omp_for_incr (for_stmt, i);
fd6481cf 334 gcc_assert (TREE_OPERAND (t, 0) == var);
335 switch (TREE_CODE (t))
336 {
337 case PLUS_EXPR:
fd6481cf 338 loop->step = TREE_OPERAND (t, 1);
339 break;
85d86b55 340 case POINTER_PLUS_EXPR:
341 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
342 break;
fd6481cf 343 case MINUS_EXPR:
344 loop->step = TREE_OPERAND (t, 1);
389dd41b 345 loop->step = fold_build1_loc (loc,
346 NEGATE_EXPR, TREE_TYPE (loop->step),
fd6481cf 347 loop->step);
348 break;
349 default:
350 gcc_unreachable ();
351 }
352
353 if (iter_type != long_long_unsigned_type_node)
354 {
355 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
356 iter_type = long_long_unsigned_type_node;
357 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
358 && TYPE_PRECISION (TREE_TYPE (loop->v))
359 >= TYPE_PRECISION (iter_type))
360 {
361 tree n;
362
363 if (loop->cond_code == LT_EXPR)
389dd41b 364 n = fold_build2_loc (loc,
365 PLUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 366 loop->n2, loop->step);
367 else
368 n = loop->n1;
369 if (TREE_CODE (n) != INTEGER_CST
370 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
371 iter_type = long_long_unsigned_type_node;
372 }
373 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
374 > TYPE_PRECISION (iter_type))
375 {
376 tree n1, n2;
377
378 if (loop->cond_code == LT_EXPR)
379 {
380 n1 = loop->n1;
389dd41b 381 n2 = fold_build2_loc (loc,
382 PLUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 383 loop->n2, loop->step);
384 }
385 else
386 {
389dd41b 387 n1 = fold_build2_loc (loc,
388 MINUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 389 loop->n2, loop->step);
390 n2 = loop->n1;
391 }
392 if (TREE_CODE (n1) != INTEGER_CST
393 || TREE_CODE (n2) != INTEGER_CST
394 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
395 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
396 iter_type = long_long_unsigned_type_node;
397 }
398 }
399
400 if (collapse_count && *collapse_count == NULL)
401 {
402 if ((i == 0 || count != NULL_TREE)
403 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
404 && TREE_CONSTANT (loop->n1)
405 && TREE_CONSTANT (loop->n2)
406 && TREE_CODE (loop->step) == INTEGER_CST)
407 {
408 tree itype = TREE_TYPE (loop->v);
409
410 if (POINTER_TYPE_P (itype))
3cea8318 411 itype = signed_type_for (itype);
fd6481cf 412 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
389dd41b 413 t = fold_build2_loc (loc,
414 PLUS_EXPR, itype,
415 fold_convert_loc (loc, itype, loop->step), t);
416 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
417 fold_convert_loc (loc, itype, loop->n2));
418 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
419 fold_convert_loc (loc, itype, loop->n1));
fd6481cf 420 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
389dd41b 421 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
422 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
423 fold_build1_loc (loc, NEGATE_EXPR, itype,
424 fold_convert_loc (loc, itype,
425 loop->step)));
fd6481cf 426 else
389dd41b 427 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
428 fold_convert_loc (loc, itype, loop->step));
429 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
fd6481cf 430 if (count != NULL_TREE)
389dd41b 431 count = fold_build2_loc (loc,
432 MULT_EXPR, long_long_unsigned_type_node,
fd6481cf 433 count, t);
434 else
435 count = t;
436 if (TREE_CODE (count) != INTEGER_CST)
437 count = NULL_TREE;
438 }
439 else
440 count = NULL_TREE;
441 }
442 }
443
444 if (count)
445 {
446 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
447 iter_type = long_long_unsigned_type_node;
448 else
449 iter_type = long_integer_type_node;
450 }
451 else if (collapse_iter && *collapse_iter != NULL)
452 iter_type = TREE_TYPE (*collapse_iter);
453 fd->iter_type = iter_type;
454 if (collapse_iter && *collapse_iter == NULL)
455 *collapse_iter = create_tmp_var (iter_type, ".iter");
456 if (collapse_count && *collapse_count == NULL)
457 {
458 if (count)
389dd41b 459 *collapse_count = fold_convert_loc (loc, iter_type, count);
fd6481cf 460 else
461 *collapse_count = create_tmp_var (iter_type, ".count");
462 }
463
464 if (fd->collapse > 1)
465 {
466 fd->loop.v = *collapse_iter;
467 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
468 fd->loop.n2 = *collapse_count;
469 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
470 fd->loop.cond_code = LT_EXPR;
471 }
773c5ba7 472}
473
474
475/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
476 is the immediate dominator of PAR_ENTRY_BB, return true if there
477 are no data dependencies that would prevent expanding the parallel
478 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
479
480 When expanding a combined parallel+workshare region, the call to
481 the child function may need additional arguments in the case of
75a70cf9 482 GIMPLE_OMP_FOR regions. In some cases, these arguments are
483 computed out of variables passed in from the parent to the child
484 via 'struct .omp_data_s'. For instance:
773c5ba7 485
486 #pragma omp parallel for schedule (guided, i * 4)
487 for (j ...)
488
489 Is lowered into:
490
491 # BLOCK 2 (PAR_ENTRY_BB)
492 .omp_data_o.i = i;
493 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
48e1416a 494
773c5ba7 495 # BLOCK 3 (WS_ENTRY_BB)
496 .omp_data_i = &.omp_data_o;
497 D.1667 = .omp_data_i->i;
498 D.1598 = D.1667 * 4;
499 #pragma omp for schedule (guided, D.1598)
500
501 When we outline the parallel region, the call to the child function
502 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
503 that value is computed *after* the call site. So, in principle we
504 cannot do the transformation.
505
506 To see whether the code in WS_ENTRY_BB blocks the combined
507 parallel+workshare call, we collect all the variables used in the
75a70cf9 508 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
773c5ba7 509 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
510 call.
511
512 FIXME. If we had the SSA form built at this point, we could merely
513 hoist the code in block 3 into block 2 and be done with it. But at
514 this point we don't have dataflow information and though we could
515 hack something up here, it is really not worth the aggravation. */
516
517static bool
f018d957 518workshare_safe_to_combine_p (basic_block ws_entry_bb)
773c5ba7 519{
520 struct omp_for_data fd;
f018d957 521 gimple ws_stmt = last_stmt (ws_entry_bb);
773c5ba7 522
75a70cf9 523 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
773c5ba7 524 return true;
525
75a70cf9 526 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
773c5ba7 527
fd6481cf 528 extract_omp_for_data (ws_stmt, &fd, NULL);
529
530 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
531 return false;
532 if (fd.iter_type != long_integer_type_node)
533 return false;
773c5ba7 534
535 /* FIXME. We give up too easily here. If any of these arguments
536 are not constants, they will likely involve variables that have
537 been mapped into fields of .omp_data_s for sharing with the child
538 function. With appropriate data flow, it would be possible to
539 see through this. */
fd6481cf 540 if (!is_gimple_min_invariant (fd.loop.n1)
541 || !is_gimple_min_invariant (fd.loop.n2)
542 || !is_gimple_min_invariant (fd.loop.step)
773c5ba7 543 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
544 return false;
545
546 return true;
547}
548
549
550/* Collect additional arguments needed to emit a combined
551 parallel+workshare call. WS_STMT is the workshare directive being
552 expanded. */
553
414c3a2c 554static VEC(tree,gc) *
75a70cf9 555get_ws_args_for (gimple ws_stmt)
773c5ba7 556{
557 tree t;
389dd41b 558 location_t loc = gimple_location (ws_stmt);
414c3a2c 559 VEC(tree,gc) *ws_args;
773c5ba7 560
75a70cf9 561 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
773c5ba7 562 {
563 struct omp_for_data fd;
773c5ba7 564
fd6481cf 565 extract_omp_for_data (ws_stmt, &fd, NULL);
773c5ba7 566
414c3a2c 567 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
773c5ba7 568
414c3a2c 569 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
570 VEC_quick_push (tree, ws_args, t);
773c5ba7 571
389dd41b 572 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
414c3a2c 573 VEC_quick_push (tree, ws_args, t);
773c5ba7 574
414c3a2c 575 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
576 VEC_quick_push (tree, ws_args, t);
577
578 if (fd.chunk_size)
579 {
580 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
581 VEC_quick_push (tree, ws_args, t);
582 }
773c5ba7 583
584 return ws_args;
585 }
75a70cf9 586 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
773c5ba7 587 {
ac6e3339 588 /* Number of sections is equal to the number of edges from the
75a70cf9 589 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
590 the exit of the sections region. */
591 basic_block bb = single_succ (gimple_bb (ws_stmt));
ac6e3339 592 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
414c3a2c 593 ws_args = VEC_alloc (tree, gc, 1);
594 VEC_quick_push (tree, ws_args, t);
595 return ws_args;
773c5ba7 596 }
597
598 gcc_unreachable ();
599}
600
601
602/* Discover whether REGION is a combined parallel+workshare region. */
603
604static void
605determine_parallel_type (struct omp_region *region)
1e8e9920 606{
773c5ba7 607 basic_block par_entry_bb, par_exit_bb;
608 basic_block ws_entry_bb, ws_exit_bb;
609
03ed154b 610 if (region == NULL || region->inner == NULL
ac6e3339 611 || region->exit == NULL || region->inner->exit == NULL
612 || region->inner->cont == NULL)
773c5ba7 613 return;
614
615 /* We only support parallel+for and parallel+sections. */
75a70cf9 616 if (region->type != GIMPLE_OMP_PARALLEL
617 || (region->inner->type != GIMPLE_OMP_FOR
618 && region->inner->type != GIMPLE_OMP_SECTIONS))
773c5ba7 619 return;
620
621 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
622 WS_EXIT_BB -> PAR_EXIT_BB. */
61e47ac8 623 par_entry_bb = region->entry;
624 par_exit_bb = region->exit;
625 ws_entry_bb = region->inner->entry;
626 ws_exit_bb = region->inner->exit;
773c5ba7 627
628 if (single_succ (par_entry_bb) == ws_entry_bb
629 && single_succ (ws_exit_bb) == par_exit_bb
f018d957 630 && workshare_safe_to_combine_p (ws_entry_bb)
75a70cf9 631 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
de7ef844 632 || (last_and_only_stmt (ws_entry_bb)
633 && last_and_only_stmt (par_exit_bb))))
773c5ba7 634 {
75a70cf9 635 gimple ws_stmt = last_stmt (ws_entry_bb);
61e47ac8 636
75a70cf9 637 if (region->inner->type == GIMPLE_OMP_FOR)
773c5ba7 638 {
639 /* If this is a combined parallel loop, we need to determine
640 whether or not to use the combined library calls. There
641 are two cases where we do not apply the transformation:
642 static loops and any kind of ordered loop. In the first
643 case, we already open code the loop so there is no need
644 to do anything else. In the latter case, the combined
645 parallel loop call would still need extra synchronization
646 to implement ordered semantics, so there would not be any
647 gain in using the combined call. */
75a70cf9 648 tree clauses = gimple_omp_for_clauses (ws_stmt);
773c5ba7 649 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
650 if (c == NULL
651 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
652 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
653 {
654 region->is_combined_parallel = false;
655 region->inner->is_combined_parallel = false;
656 return;
657 }
658 }
659
660 region->is_combined_parallel = true;
661 region->inner->is_combined_parallel = true;
61e47ac8 662 region->ws_args = get_ws_args_for (ws_stmt);
773c5ba7 663 }
1e8e9920 664}
665
773c5ba7 666
1e8e9920 667/* Return true if EXPR is variable sized. */
668
669static inline bool
1f1872fd 670is_variable_sized (const_tree expr)
1e8e9920 671{
672 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
673}
674
675/* Return true if DECL is a reference type. */
676
677static inline bool
678is_reference (tree decl)
679{
680 return lang_hooks.decls.omp_privatize_by_reference (decl);
681}
682
683/* Lookup variables in the decl or field splay trees. The "maybe" form
684 allows for the variable form to not have been entered, otherwise we
685 assert that the variable must have been entered. */
686
687static inline tree
688lookup_decl (tree var, omp_context *ctx)
689{
e3022db7 690 tree *n;
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return *n;
1e8e9920 693}
694
695static inline tree
e8a588af 696maybe_lookup_decl (const_tree var, omp_context *ctx)
1e8e9920 697{
e3022db7 698 tree *n;
699 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
700 return n ? *n : NULL_TREE;
1e8e9920 701}
702
703static inline tree
704lookup_field (tree var, omp_context *ctx)
705{
706 splay_tree_node n;
707 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
708 return (tree) n->value;
709}
710
fd6481cf 711static inline tree
712lookup_sfield (tree var, omp_context *ctx)
713{
714 splay_tree_node n;
715 n = splay_tree_lookup (ctx->sfield_map
716 ? ctx->sfield_map : ctx->field_map,
717 (splay_tree_key) var);
718 return (tree) n->value;
719}
720
1e8e9920 721static inline tree
722maybe_lookup_field (tree var, omp_context *ctx)
723{
724 splay_tree_node n;
725 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
726 return n ? (tree) n->value : NULL_TREE;
727}
728
e8a588af 729/* Return true if DECL should be copied by pointer. SHARED_CTX is
730 the parallel context if DECL is to be shared. */
1e8e9920 731
732static bool
fd6481cf 733use_pointer_for_field (tree decl, omp_context *shared_ctx)
1e8e9920 734{
735 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
736 return true;
737
554f2707 738 /* We can only use copy-in/copy-out semantics for shared variables
1e8e9920 739 when we know the value is not accessible from an outer scope. */
e8a588af 740 if (shared_ctx)
1e8e9920 741 {
742 /* ??? Trivially accessible from anywhere. But why would we even
743 be passing an address in this case? Should we simply assert
744 this to be false, or should we have a cleanup pass that removes
745 these from the list of mappings? */
746 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
747 return true;
748
749 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
750 without analyzing the expression whether or not its location
751 is accessible to anyone else. In the case of nested parallel
752 regions it certainly may be. */
df2c34fc 753 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
1e8e9920 754 return true;
755
756 /* Do not use copy-in/copy-out for variables that have their
757 address taken. */
758 if (TREE_ADDRESSABLE (decl))
759 return true;
e8a588af 760
761 /* Disallow copy-in/out in nested parallel if
762 decl is shared in outer parallel, otherwise
763 each thread could store the shared variable
764 in its own copy-in location, making the
765 variable no longer really shared. */
766 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
767 {
768 omp_context *up;
769
770 for (up = shared_ctx->outer; up; up = up->outer)
0cb159ec 771 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
e8a588af 772 break;
773
0cb159ec 774 if (up)
e8a588af 775 {
776 tree c;
777
75a70cf9 778 for (c = gimple_omp_taskreg_clauses (up->stmt);
e8a588af 779 c; c = OMP_CLAUSE_CHAIN (c))
780 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
781 && OMP_CLAUSE_DECL (c) == decl)
782 break;
783
784 if (c)
784ad964 785 goto maybe_mark_addressable_and_ret;
e8a588af 786 }
787 }
fd6481cf 788
789 /* For tasks avoid using copy-in/out, unless they are readonly
790 (in which case just copy-in is used). As tasks can be
791 deferred or executed in different thread, when GOMP_task
792 returns, the task hasn't necessarily terminated. */
793 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
794 {
784ad964 795 tree outer;
796 maybe_mark_addressable_and_ret:
797 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
fd6481cf 798 if (is_gimple_reg (outer))
799 {
800 /* Taking address of OUTER in lower_send_shared_vars
801 might need regimplification of everything that uses the
802 variable. */
803 if (!task_shared_vars)
804 task_shared_vars = BITMAP_ALLOC (NULL);
805 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
806 TREE_ADDRESSABLE (outer) = 1;
807 }
808 return true;
809 }
1e8e9920 810 }
811
812 return false;
813}
814
79acaae1 815/* Create a new VAR_DECL and copy information from VAR to it. */
1e8e9920 816
79acaae1 817tree
818copy_var_decl (tree var, tree name, tree type)
1e8e9920 819{
e60a6f7b 820 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
1e8e9920 821
822 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
79acaae1 823 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
8ea8de24 824 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
1e8e9920 825 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
826 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
79acaae1 827 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
1e8e9920 828 TREE_USED (copy) = 1;
1e8e9920 829 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
830
79acaae1 831 return copy;
832}
833
834/* Construct a new automatic decl similar to VAR. */
835
836static tree
837omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
838{
839 tree copy = copy_var_decl (var, name, type);
840
841 DECL_CONTEXT (copy) = current_function_decl;
1767a056 842 DECL_CHAIN (copy) = ctx->block_vars;
1e8e9920 843 ctx->block_vars = copy;
844
845 return copy;
846}
847
848static tree
849omp_copy_decl_1 (tree var, omp_context *ctx)
850{
851 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
852}
853
445d06b6 854/* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
855 as appropriate. */
856static tree
857omp_build_component_ref (tree obj, tree field)
858{
859 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
860 if (TREE_THIS_VOLATILE (field))
861 TREE_THIS_VOLATILE (ret) |= 1;
862 if (TREE_READONLY (field))
863 TREE_READONLY (ret) |= 1;
864 return ret;
865}
866
1e8e9920 867/* Build tree nodes to access the field for VAR on the receiver side. */
868
869static tree
870build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
871{
872 tree x, field = lookup_field (var, ctx);
873
874 /* If the receiver record type was remapped in the child function,
875 remap the field into the new record type. */
876 x = maybe_lookup_field (field, ctx);
877 if (x != NULL)
878 field = x;
879
182cf5a9 880 x = build_simple_mem_ref (ctx->receiver_decl);
445d06b6 881 x = omp_build_component_ref (x, field);
1e8e9920 882 if (by_ref)
182cf5a9 883 x = build_simple_mem_ref (x);
1e8e9920 884
885 return x;
886}
887
888/* Build tree nodes to access VAR in the scope outer to CTX. In the case
889 of a parallel, this is a component reference; for workshare constructs
890 this is some variable. */
891
892static tree
893build_outer_var_ref (tree var, omp_context *ctx)
894{
895 tree x;
896
f49d7bb5 897 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1e8e9920 898 x = var;
899 else if (is_variable_sized (var))
900 {
901 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
902 x = build_outer_var_ref (x, ctx);
182cf5a9 903 x = build_simple_mem_ref (x);
1e8e9920 904 }
fd6481cf 905 else if (is_taskreg_ctx (ctx))
1e8e9920 906 {
e8a588af 907 bool by_ref = use_pointer_for_field (var, NULL);
1e8e9920 908 x = build_receiver_ref (var, by_ref, ctx);
909 }
910 else if (ctx->outer)
911 x = lookup_decl (var, ctx->outer);
9438af57 912 else if (is_reference (var))
913 /* This can happen with orphaned constructs. If var is reference, it is
914 possible it is shared and as such valid. */
915 x = var;
1e8e9920 916 else
917 gcc_unreachable ();
918
919 if (is_reference (var))
182cf5a9 920 x = build_simple_mem_ref (x);
1e8e9920 921
922 return x;
923}
924
925/* Build tree nodes to access the field for VAR on the sender side. */
926
927static tree
928build_sender_ref (tree var, omp_context *ctx)
929{
fd6481cf 930 tree field = lookup_sfield (var, ctx);
445d06b6 931 return omp_build_component_ref (ctx->sender_decl, field);
1e8e9920 932}
933
934/* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
935
936static void
fd6481cf 937install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1e8e9920 938{
fd6481cf 939 tree field, type, sfield = NULL_TREE;
1e8e9920 940
fd6481cf 941 gcc_assert ((mask & 1) == 0
942 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
943 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
944 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1e8e9920 945
946 type = TREE_TYPE (var);
947 if (by_ref)
948 type = build_pointer_type (type);
fd6481cf 949 else if ((mask & 3) == 1 && is_reference (var))
950 type = TREE_TYPE (type);
1e8e9920 951
e60a6f7b 952 field = build_decl (DECL_SOURCE_LOCATION (var),
953 FIELD_DECL, DECL_NAME (var), type);
1e8e9920 954
955 /* Remember what variable this field was created for. This does have a
956 side effect of making dwarf2out ignore this member, so for helpful
957 debugging we clear it later in delete_omp_context. */
958 DECL_ABSTRACT_ORIGIN (field) = var;
fd6481cf 959 if (type == TREE_TYPE (var))
960 {
961 DECL_ALIGN (field) = DECL_ALIGN (var);
962 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
963 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
964 }
965 else
966 DECL_ALIGN (field) = TYPE_ALIGN (type);
1e8e9920 967
fd6481cf 968 if ((mask & 3) == 3)
969 {
970 insert_field_into_struct (ctx->record_type, field);
971 if (ctx->srecord_type)
972 {
e60a6f7b 973 sfield = build_decl (DECL_SOURCE_LOCATION (var),
974 FIELD_DECL, DECL_NAME (var), type);
fd6481cf 975 DECL_ABSTRACT_ORIGIN (sfield) = var;
976 DECL_ALIGN (sfield) = DECL_ALIGN (field);
977 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
978 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
979 insert_field_into_struct (ctx->srecord_type, sfield);
980 }
981 }
982 else
983 {
984 if (ctx->srecord_type == NULL_TREE)
985 {
986 tree t;
987
988 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
989 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
990 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
991 {
e60a6f7b 992 sfield = build_decl (DECL_SOURCE_LOCATION (var),
993 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
fd6481cf 994 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
995 insert_field_into_struct (ctx->srecord_type, sfield);
996 splay_tree_insert (ctx->sfield_map,
997 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
998 (splay_tree_value) sfield);
999 }
1000 }
1001 sfield = field;
1002 insert_field_into_struct ((mask & 1) ? ctx->record_type
1003 : ctx->srecord_type, field);
1004 }
1e8e9920 1005
fd6481cf 1006 if (mask & 1)
1007 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1008 (splay_tree_value) field);
1009 if ((mask & 2) && ctx->sfield_map)
1010 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1011 (splay_tree_value) sfield);
1e8e9920 1012}
1013
1014static tree
1015install_var_local (tree var, omp_context *ctx)
1016{
1017 tree new_var = omp_copy_decl_1 (var, ctx);
1018 insert_decl_map (&ctx->cb, var, new_var);
1019 return new_var;
1020}
1021
1022/* Adjust the replacement for DECL in CTX for the new context. This means
1023 copying the DECL_VALUE_EXPR, and fixing up the type. */
1024
1025static void
1026fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1027{
1028 tree new_decl, size;
1029
1030 new_decl = lookup_decl (decl, ctx);
1031
1032 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1033
1034 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1035 && DECL_HAS_VALUE_EXPR_P (decl))
1036 {
1037 tree ve = DECL_VALUE_EXPR (decl);
75a70cf9 1038 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1e8e9920 1039 SET_DECL_VALUE_EXPR (new_decl, ve);
1040 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1041 }
1042
1043 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1044 {
1045 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1046 if (size == error_mark_node)
1047 size = TYPE_SIZE (TREE_TYPE (new_decl));
1048 DECL_SIZE (new_decl) = size;
1049
1050 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1051 if (size == error_mark_node)
1052 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1053 DECL_SIZE_UNIT (new_decl) = size;
1054 }
1055}
1056
1057/* The callback for remap_decl. Search all containing contexts for a
1058 mapping of the variable; this avoids having to duplicate the splay
1059 tree ahead of time. We know a mapping doesn't already exist in the
1060 given context. Create new mappings to implement default semantics. */
1061
1062static tree
1063omp_copy_decl (tree var, copy_body_data *cb)
1064{
1065 omp_context *ctx = (omp_context *) cb;
1066 tree new_var;
1067
1e8e9920 1068 if (TREE_CODE (var) == LABEL_DECL)
1069 {
e60a6f7b 1070 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
773c5ba7 1071 DECL_CONTEXT (new_var) = current_function_decl;
1e8e9920 1072 insert_decl_map (&ctx->cb, var, new_var);
1073 return new_var;
1074 }
1075
fd6481cf 1076 while (!is_taskreg_ctx (ctx))
1e8e9920 1077 {
1078 ctx = ctx->outer;
1079 if (ctx == NULL)
1080 return var;
1081 new_var = maybe_lookup_decl (var, ctx);
1082 if (new_var)
1083 return new_var;
1084 }
1085
f49d7bb5 1086 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1087 return var;
1088
1e8e9920 1089 return error_mark_node;
1090}
1091
773c5ba7 1092
1093/* Return the parallel region associated with STMT. */
1094
773c5ba7 1095/* Debugging dumps for parallel regions. */
1096void dump_omp_region (FILE *, struct omp_region *, int);
1097void debug_omp_region (struct omp_region *);
1098void debug_all_omp_regions (void);
1099
1100/* Dump the parallel region tree rooted at REGION. */
1101
1102void
1103dump_omp_region (FILE *file, struct omp_region *region, int indent)
1104{
61e47ac8 1105 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
75a70cf9 1106 gimple_code_name[region->type]);
773c5ba7 1107
1108 if (region->inner)
1109 dump_omp_region (file, region->inner, indent + 4);
1110
61e47ac8 1111 if (region->cont)
1112 {
75a70cf9 1113 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
61e47ac8 1114 region->cont->index);
1115 }
48e1416a 1116
773c5ba7 1117 if (region->exit)
75a70cf9 1118 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
61e47ac8 1119 region->exit->index);
773c5ba7 1120 else
61e47ac8 1121 fprintf (file, "%*s[no exit marker]\n", indent, "");
773c5ba7 1122
1123 if (region->next)
61e47ac8 1124 dump_omp_region (file, region->next, indent);
773c5ba7 1125}
1126
4b987fac 1127DEBUG_FUNCTION void
773c5ba7 1128debug_omp_region (struct omp_region *region)
1129{
1130 dump_omp_region (stderr, region, 0);
1131}
1132
4b987fac 1133DEBUG_FUNCTION void
773c5ba7 1134debug_all_omp_regions (void)
1135{
1136 dump_omp_region (stderr, root_omp_region, 0);
1137}
1138
1139
1140/* Create a new parallel region starting at STMT inside region PARENT. */
1141
61e47ac8 1142struct omp_region *
75a70cf9 1143new_omp_region (basic_block bb, enum gimple_code type,
1144 struct omp_region *parent)
773c5ba7 1145{
4077bf7a 1146 struct omp_region *region = XCNEW (struct omp_region);
773c5ba7 1147
1148 region->outer = parent;
61e47ac8 1149 region->entry = bb;
1150 region->type = type;
773c5ba7 1151
1152 if (parent)
1153 {
1154 /* This is a nested region. Add it to the list of inner
1155 regions in PARENT. */
1156 region->next = parent->inner;
1157 parent->inner = region;
1158 }
61e47ac8 1159 else
773c5ba7 1160 {
1161 /* This is a toplevel region. Add it to the list of toplevel
1162 regions in ROOT_OMP_REGION. */
1163 region->next = root_omp_region;
1164 root_omp_region = region;
1165 }
61e47ac8 1166
1167 return region;
1168}
1169
1170/* Release the memory associated with the region tree rooted at REGION. */
1171
1172static void
1173free_omp_region_1 (struct omp_region *region)
1174{
1175 struct omp_region *i, *n;
1176
1177 for (i = region->inner; i ; i = n)
773c5ba7 1178 {
61e47ac8 1179 n = i->next;
1180 free_omp_region_1 (i);
773c5ba7 1181 }
1182
61e47ac8 1183 free (region);
1184}
773c5ba7 1185
61e47ac8 1186/* Release the memory for the entire omp region tree. */
1187
1188void
1189free_omp_regions (void)
1190{
1191 struct omp_region *r, *n;
1192 for (r = root_omp_region; r ; r = n)
1193 {
1194 n = r->next;
1195 free_omp_region_1 (r);
1196 }
1197 root_omp_region = NULL;
773c5ba7 1198}
1199
1200
1e8e9920 1201/* Create a new context, with OUTER_CTX being the surrounding context. */
1202
1203static omp_context *
75a70cf9 1204new_omp_context (gimple stmt, omp_context *outer_ctx)
1e8e9920 1205{
1206 omp_context *ctx = XCNEW (omp_context);
1207
1208 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1209 (splay_tree_value) ctx);
1210 ctx->stmt = stmt;
1211
1212 if (outer_ctx)
1213 {
1214 ctx->outer = outer_ctx;
1215 ctx->cb = outer_ctx->cb;
1216 ctx->cb.block = NULL;
1217 ctx->depth = outer_ctx->depth + 1;
1218 }
1219 else
1220 {
1221 ctx->cb.src_fn = current_function_decl;
1222 ctx->cb.dst_fn = current_function_decl;
53f79206 1223 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1224 gcc_checking_assert (ctx->cb.src_node);
1e8e9920 1225 ctx->cb.dst_node = ctx->cb.src_node;
1226 ctx->cb.src_cfun = cfun;
1227 ctx->cb.copy_decl = omp_copy_decl;
e38def9c 1228 ctx->cb.eh_lp_nr = 0;
1e8e9920 1229 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1230 ctx->depth = 1;
1231 }
1232
e3022db7 1233 ctx->cb.decl_map = pointer_map_create ();
1e8e9920 1234
1235 return ctx;
1236}
1237
75a70cf9 1238static gimple_seq maybe_catch_exception (gimple_seq);
f6430caa 1239
1240/* Finalize task copyfn. */
1241
1242static void
75a70cf9 1243finalize_task_copyfn (gimple task_stmt)
f6430caa 1244{
1245 struct function *child_cfun;
9078126c 1246 tree child_fn;
e3a19533 1247 gimple_seq seq = NULL, new_seq;
75a70cf9 1248 gimple bind;
f6430caa 1249
75a70cf9 1250 child_fn = gimple_omp_task_copy_fn (task_stmt);
f6430caa 1251 if (child_fn == NULL_TREE)
1252 return;
1253
1254 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1255
1256 /* Inform the callgraph about the new function. */
1257 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
79f958cb 1258 = cfun->curr_properties & ~PROP_loops;
f6430caa 1259
f6430caa 1260 push_cfun (child_cfun);
7e3aae05 1261 bind = gimplify_body (child_fn, false);
75a70cf9 1262 gimple_seq_add_stmt (&seq, bind);
1263 new_seq = maybe_catch_exception (seq);
1264 if (new_seq != seq)
1265 {
1266 bind = gimple_build_bind (NULL, new_seq, NULL);
e3a19533 1267 seq = NULL;
75a70cf9 1268 gimple_seq_add_stmt (&seq, bind);
1269 }
1270 gimple_set_body (child_fn, seq);
f6430caa 1271 pop_cfun ();
f6430caa 1272
1273 cgraph_add_new_function (child_fn, false);
1274}
1275
1e8e9920 1276/* Destroy a omp_context data structures. Called through the splay tree
1277 value delete callback. */
1278
1279static void
1280delete_omp_context (splay_tree_value value)
1281{
1282 omp_context *ctx = (omp_context *) value;
1283
e3022db7 1284 pointer_map_destroy (ctx->cb.decl_map);
1e8e9920 1285
1286 if (ctx->field_map)
1287 splay_tree_delete (ctx->field_map);
fd6481cf 1288 if (ctx->sfield_map)
1289 splay_tree_delete (ctx->sfield_map);
1e8e9920 1290
1291 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1292 it produces corrupt debug information. */
1293 if (ctx->record_type)
1294 {
1295 tree t;
1767a056 1296 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1e8e9920 1297 DECL_ABSTRACT_ORIGIN (t) = NULL;
1298 }
fd6481cf 1299 if (ctx->srecord_type)
1300 {
1301 tree t;
1767a056 1302 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
fd6481cf 1303 DECL_ABSTRACT_ORIGIN (t) = NULL;
1304 }
1e8e9920 1305
f6430caa 1306 if (is_task_ctx (ctx))
1307 finalize_task_copyfn (ctx->stmt);
1308
1e8e9920 1309 XDELETE (ctx);
1310}
1311
1312/* Fix up RECEIVER_DECL with a type that has been remapped to the child
1313 context. */
1314
1315static void
1316fixup_child_record_type (omp_context *ctx)
1317{
1318 tree f, type = ctx->record_type;
1319
1320 /* ??? It isn't sufficient to just call remap_type here, because
1321 variably_modified_type_p doesn't work the way we expect for
1322 record types. Testing each field for whether it needs remapping
1323 and creating a new record by hand works, however. */
1767a056 1324 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1e8e9920 1325 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1326 break;
1327 if (f)
1328 {
1329 tree name, new_fields = NULL;
1330
1331 type = lang_hooks.types.make_type (RECORD_TYPE);
1332 name = DECL_NAME (TYPE_NAME (ctx->record_type));
e60a6f7b 1333 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1334 TYPE_DECL, name, type);
1e8e9920 1335 TYPE_NAME (type) = name;
1336
1767a056 1337 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1e8e9920 1338 {
1339 tree new_f = copy_node (f);
1340 DECL_CONTEXT (new_f) = type;
1341 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1767a056 1342 DECL_CHAIN (new_f) = new_fields;
75a70cf9 1343 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1344 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1345 &ctx->cb, NULL);
1346 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1347 &ctx->cb, NULL);
1e8e9920 1348 new_fields = new_f;
1349
1350 /* Arrange to be able to look up the receiver field
1351 given the sender field. */
1352 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1353 (splay_tree_value) new_f);
1354 }
1355 TYPE_FIELDS (type) = nreverse (new_fields);
1356 layout_type (type);
1357 }
1358
1359 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1360}
1361
1362/* Instantiate decls as necessary in CTX to satisfy the data sharing
1363 specified by CLAUSES. */
1364
1365static void
1366scan_sharing_clauses (tree clauses, omp_context *ctx)
1367{
1368 tree c, decl;
1369 bool scan_array_reductions = false;
1370
1371 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1372 {
1373 bool by_ref;
1374
55d6e7cd 1375 switch (OMP_CLAUSE_CODE (c))
1e8e9920 1376 {
1377 case OMP_CLAUSE_PRIVATE:
1378 decl = OMP_CLAUSE_DECL (c);
fd6481cf 1379 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1380 goto do_private;
1381 else if (!is_variable_sized (decl))
1e8e9920 1382 install_var_local (decl, ctx);
1383 break;
1384
1385 case OMP_CLAUSE_SHARED:
fd6481cf 1386 gcc_assert (is_taskreg_ctx (ctx));
1e8e9920 1387 decl = OMP_CLAUSE_DECL (c);
e7327393 1388 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1389 || !is_variable_sized (decl));
f49d7bb5 1390 /* Global variables don't need to be copied,
1391 the receiver side will use them directly. */
1392 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1393 break;
fd6481cf 1394 by_ref = use_pointer_for_field (decl, ctx);
1e8e9920 1395 if (! TREE_READONLY (decl)
1396 || TREE_ADDRESSABLE (decl)
1397 || by_ref
1398 || is_reference (decl))
1399 {
fd6481cf 1400 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1401 install_var_local (decl, ctx);
1402 break;
1403 }
1404 /* We don't need to copy const scalar vars back. */
55d6e7cd 1405 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1e8e9920 1406 goto do_private;
1407
1408 case OMP_CLAUSE_LASTPRIVATE:
1409 /* Let the corresponding firstprivate clause create
1410 the variable. */
1411 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1412 break;
1413 /* FALLTHRU */
1414
1415 case OMP_CLAUSE_FIRSTPRIVATE:
1416 case OMP_CLAUSE_REDUCTION:
1417 decl = OMP_CLAUSE_DECL (c);
1418 do_private:
1419 if (is_variable_sized (decl))
1e8e9920 1420 {
fd6481cf 1421 if (is_task_ctx (ctx))
1422 install_var_field (decl, false, 1, ctx);
1423 break;
1424 }
1425 else if (is_taskreg_ctx (ctx))
1426 {
1427 bool global
1428 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
e8a588af 1429 by_ref = use_pointer_for_field (decl, NULL);
fd6481cf 1430
1431 if (is_task_ctx (ctx)
1432 && (global || by_ref || is_reference (decl)))
1433 {
1434 install_var_field (decl, false, 1, ctx);
1435 if (!global)
1436 install_var_field (decl, by_ref, 2, ctx);
1437 }
1438 else if (!global)
1439 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1440 }
1441 install_var_local (decl, ctx);
1442 break;
1443
1444 case OMP_CLAUSE_COPYPRIVATE:
1e8e9920 1445 case OMP_CLAUSE_COPYIN:
1446 decl = OMP_CLAUSE_DECL (c);
e8a588af 1447 by_ref = use_pointer_for_field (decl, NULL);
fd6481cf 1448 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1449 break;
1450
1451 case OMP_CLAUSE_DEFAULT:
1452 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1453 break;
1454
2169f33b 1455 case OMP_CLAUSE_FINAL:
1e8e9920 1456 case OMP_CLAUSE_IF:
1457 case OMP_CLAUSE_NUM_THREADS:
1458 case OMP_CLAUSE_SCHEDULE:
1459 if (ctx->outer)
75a70cf9 1460 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1e8e9920 1461 break;
1462
1463 case OMP_CLAUSE_NOWAIT:
1464 case OMP_CLAUSE_ORDERED:
fd6481cf 1465 case OMP_CLAUSE_COLLAPSE:
1466 case OMP_CLAUSE_UNTIED:
2169f33b 1467 case OMP_CLAUSE_MERGEABLE:
1e8e9920 1468 break;
1469
1470 default:
1471 gcc_unreachable ();
1472 }
1473 }
1474
1475 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1476 {
55d6e7cd 1477 switch (OMP_CLAUSE_CODE (c))
1e8e9920 1478 {
1479 case OMP_CLAUSE_LASTPRIVATE:
1480 /* Let the corresponding firstprivate clause create
1481 the variable. */
75a70cf9 1482 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
fd6481cf 1483 scan_array_reductions = true;
1e8e9920 1484 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1485 break;
1486 /* FALLTHRU */
1487
1488 case OMP_CLAUSE_PRIVATE:
1489 case OMP_CLAUSE_FIRSTPRIVATE:
1490 case OMP_CLAUSE_REDUCTION:
1491 decl = OMP_CLAUSE_DECL (c);
1492 if (is_variable_sized (decl))
1493 install_var_local (decl, ctx);
1494 fixup_remapped_decl (decl, ctx,
55d6e7cd 1495 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1e8e9920 1496 && OMP_CLAUSE_PRIVATE_DEBUG (c));
55d6e7cd 1497 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1e8e9920 1498 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1499 scan_array_reductions = true;
1500 break;
1501
1502 case OMP_CLAUSE_SHARED:
1503 decl = OMP_CLAUSE_DECL (c);
f49d7bb5 1504 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1505 fixup_remapped_decl (decl, ctx, false);
1e8e9920 1506 break;
1507
1508 case OMP_CLAUSE_COPYPRIVATE:
1509 case OMP_CLAUSE_COPYIN:
1510 case OMP_CLAUSE_DEFAULT:
1511 case OMP_CLAUSE_IF:
1512 case OMP_CLAUSE_NUM_THREADS:
1513 case OMP_CLAUSE_SCHEDULE:
1514 case OMP_CLAUSE_NOWAIT:
1515 case OMP_CLAUSE_ORDERED:
fd6481cf 1516 case OMP_CLAUSE_COLLAPSE:
1517 case OMP_CLAUSE_UNTIED:
2169f33b 1518 case OMP_CLAUSE_FINAL:
1519 case OMP_CLAUSE_MERGEABLE:
1e8e9920 1520 break;
1521
1522 default:
1523 gcc_unreachable ();
1524 }
1525 }
1526
1527 if (scan_array_reductions)
1528 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
55d6e7cd 1529 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1e8e9920 1530 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1531 {
ab129075 1532 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1533 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1e8e9920 1534 }
fd6481cf 1535 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
75a70cf9 1536 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
ab129075 1537 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1e8e9920 1538}
1539
1540/* Create a new name for omp child function. Returns an identifier. */
1541
1542static GTY(()) unsigned int tmp_ompfn_id_num;
1543
1544static tree
fd6481cf 1545create_omp_child_function_name (bool task_copy)
1e8e9920 1546{
a70a5e2c 1547 return (clone_function_name (current_function_decl,
1548 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1e8e9920 1549}
1550
1551/* Build a decl for the omp child function. It'll not contain a body
1552 yet, just the bare decl. */
1553
1554static void
fd6481cf 1555create_omp_child_function (omp_context *ctx, bool task_copy)
1e8e9920 1556{
1557 tree decl, type, name, t;
1558
fd6481cf 1559 name = create_omp_child_function_name (task_copy);
1560 if (task_copy)
1561 type = build_function_type_list (void_type_node, ptr_type_node,
1562 ptr_type_node, NULL_TREE);
1563 else
1564 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1e8e9920 1565
e60a6f7b 1566 decl = build_decl (gimple_location (ctx->stmt),
1567 FUNCTION_DECL, name, type);
1e8e9920 1568
fd6481cf 1569 if (!task_copy)
1570 ctx->cb.dst_fn = decl;
1571 else
75a70cf9 1572 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1e8e9920 1573
1574 TREE_STATIC (decl) = 1;
1575 TREE_USED (decl) = 1;
1576 DECL_ARTIFICIAL (decl) = 1;
84bfaaeb 1577 DECL_NAMELESS (decl) = 1;
1e8e9920 1578 DECL_IGNORED_P (decl) = 0;
1579 TREE_PUBLIC (decl) = 0;
1580 DECL_UNINLINABLE (decl) = 1;
1581 DECL_EXTERNAL (decl) = 0;
1582 DECL_CONTEXT (decl) = NULL_TREE;
773c5ba7 1583 DECL_INITIAL (decl) = make_node (BLOCK);
1e8e9920 1584
e60a6f7b 1585 t = build_decl (DECL_SOURCE_LOCATION (decl),
1586 RESULT_DECL, NULL_TREE, void_type_node);
1e8e9920 1587 DECL_ARTIFICIAL (t) = 1;
1588 DECL_IGNORED_P (t) = 1;
8e5b4ed6 1589 DECL_CONTEXT (t) = decl;
1e8e9920 1590 DECL_RESULT (decl) = t;
1591
e60a6f7b 1592 t = build_decl (DECL_SOURCE_LOCATION (decl),
1593 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1e8e9920 1594 DECL_ARTIFICIAL (t) = 1;
84bfaaeb 1595 DECL_NAMELESS (t) = 1;
1e8e9920 1596 DECL_ARG_TYPE (t) = ptr_type_node;
773c5ba7 1597 DECL_CONTEXT (t) = current_function_decl;
1e8e9920 1598 TREE_USED (t) = 1;
1599 DECL_ARGUMENTS (decl) = t;
fd6481cf 1600 if (!task_copy)
1601 ctx->receiver_decl = t;
1602 else
1603 {
e60a6f7b 1604 t = build_decl (DECL_SOURCE_LOCATION (decl),
1605 PARM_DECL, get_identifier (".omp_data_o"),
fd6481cf 1606 ptr_type_node);
1607 DECL_ARTIFICIAL (t) = 1;
84bfaaeb 1608 DECL_NAMELESS (t) = 1;
fd6481cf 1609 DECL_ARG_TYPE (t) = ptr_type_node;
1610 DECL_CONTEXT (t) = current_function_decl;
1611 TREE_USED (t) = 1;
86f2ad37 1612 TREE_ADDRESSABLE (t) = 1;
1767a056 1613 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
fd6481cf 1614 DECL_ARGUMENTS (decl) = t;
1615 }
1e8e9920 1616
48e1416a 1617 /* Allocate memory for the function structure. The call to
773c5ba7 1618 allocate_struct_function clobbers CFUN, so we need to restore
1e8e9920 1619 it afterward. */
87d4aa85 1620 push_struct_function (decl);
75a70cf9 1621 cfun->function_end_locus = gimple_location (ctx->stmt);
87d4aa85 1622 pop_cfun ();
1e8e9920 1623}
1624
1e8e9920 1625
1626/* Scan an OpenMP parallel directive. */
1627
1628static void
75a70cf9 1629scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1e8e9920 1630{
1631 omp_context *ctx;
1632 tree name;
75a70cf9 1633 gimple stmt = gsi_stmt (*gsi);
1e8e9920 1634
1635 /* Ignore parallel directives with empty bodies, unless there
1636 are copyin clauses. */
1637 if (optimize > 0
75a70cf9 1638 && empty_body_p (gimple_omp_body (stmt))
1639 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1640 OMP_CLAUSE_COPYIN) == NULL)
1e8e9920 1641 {
75a70cf9 1642 gsi_replace (gsi, gimple_build_nop (), false);
1e8e9920 1643 return;
1644 }
1645
75a70cf9 1646 ctx = new_omp_context (stmt, outer_ctx);
fd6481cf 1647 if (taskreg_nesting_level > 1)
773c5ba7 1648 ctx->is_nested = true;
1e8e9920 1649 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1e8e9920 1650 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1651 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1e8e9920 1652 name = create_tmp_var_name (".omp_data_s");
e60a6f7b 1653 name = build_decl (gimple_location (stmt),
1654 TYPE_DECL, name, ctx->record_type);
84bfaaeb 1655 DECL_ARTIFICIAL (name) = 1;
1656 DECL_NAMELESS (name) = 1;
1e8e9920 1657 TYPE_NAME (ctx->record_type) = name;
fd6481cf 1658 create_omp_child_function (ctx, false);
75a70cf9 1659 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1e8e9920 1660
75a70cf9 1661 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
ab129075 1662 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 1663
1664 if (TYPE_FIELDS (ctx->record_type) == NULL)
1665 ctx->record_type = ctx->receiver_decl = NULL;
1666 else
1667 {
1668 layout_type (ctx->record_type);
1669 fixup_child_record_type (ctx);
1670 }
1671}
1672
fd6481cf 1673/* Scan an OpenMP task directive. */
1674
1675static void
75a70cf9 1676scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
fd6481cf 1677{
1678 omp_context *ctx;
75a70cf9 1679 tree name, t;
1680 gimple stmt = gsi_stmt (*gsi);
389dd41b 1681 location_t loc = gimple_location (stmt);
fd6481cf 1682
1683 /* Ignore task directives with empty bodies. */
1684 if (optimize > 0
75a70cf9 1685 && empty_body_p (gimple_omp_body (stmt)))
fd6481cf 1686 {
75a70cf9 1687 gsi_replace (gsi, gimple_build_nop (), false);
fd6481cf 1688 return;
1689 }
1690
75a70cf9 1691 ctx = new_omp_context (stmt, outer_ctx);
fd6481cf 1692 if (taskreg_nesting_level > 1)
1693 ctx->is_nested = true;
1694 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1695 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1696 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1697 name = create_tmp_var_name (".omp_data_s");
e60a6f7b 1698 name = build_decl (gimple_location (stmt),
1699 TYPE_DECL, name, ctx->record_type);
84bfaaeb 1700 DECL_ARTIFICIAL (name) = 1;
1701 DECL_NAMELESS (name) = 1;
fd6481cf 1702 TYPE_NAME (ctx->record_type) = name;
1703 create_omp_child_function (ctx, false);
75a70cf9 1704 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
fd6481cf 1705
75a70cf9 1706 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
fd6481cf 1707
1708 if (ctx->srecord_type)
1709 {
1710 name = create_tmp_var_name (".omp_data_a");
e60a6f7b 1711 name = build_decl (gimple_location (stmt),
1712 TYPE_DECL, name, ctx->srecord_type);
84bfaaeb 1713 DECL_ARTIFICIAL (name) = 1;
1714 DECL_NAMELESS (name) = 1;
fd6481cf 1715 TYPE_NAME (ctx->srecord_type) = name;
1716 create_omp_child_function (ctx, true);
1717 }
1718
ab129075 1719 scan_omp (gimple_omp_body_ptr (stmt), ctx);
fd6481cf 1720
1721 if (TYPE_FIELDS (ctx->record_type) == NULL)
1722 {
1723 ctx->record_type = ctx->receiver_decl = NULL;
75a70cf9 1724 t = build_int_cst (long_integer_type_node, 0);
1725 gimple_omp_task_set_arg_size (stmt, t);
1726 t = build_int_cst (long_integer_type_node, 1);
1727 gimple_omp_task_set_arg_align (stmt, t);
fd6481cf 1728 }
1729 else
1730 {
1731 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1732 /* Move VLA fields to the end. */
1733 p = &TYPE_FIELDS (ctx->record_type);
1734 while (*p)
1735 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1736 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1737 {
1738 *q = *p;
1739 *p = TREE_CHAIN (*p);
1740 TREE_CHAIN (*q) = NULL_TREE;
1741 q = &TREE_CHAIN (*q);
1742 }
1743 else
1767a056 1744 p = &DECL_CHAIN (*p);
fd6481cf 1745 *p = vla_fields;
1746 layout_type (ctx->record_type);
1747 fixup_child_record_type (ctx);
1748 if (ctx->srecord_type)
1749 layout_type (ctx->srecord_type);
389dd41b 1750 t = fold_convert_loc (loc, long_integer_type_node,
fd6481cf 1751 TYPE_SIZE_UNIT (ctx->record_type));
75a70cf9 1752 gimple_omp_task_set_arg_size (stmt, t);
1753 t = build_int_cst (long_integer_type_node,
fd6481cf 1754 TYPE_ALIGN_UNIT (ctx->record_type));
75a70cf9 1755 gimple_omp_task_set_arg_align (stmt, t);
fd6481cf 1756 }
1757}
1758
1e8e9920 1759
773c5ba7 1760/* Scan an OpenMP loop directive. */
1e8e9920 1761
1762static void
75a70cf9 1763scan_omp_for (gimple stmt, omp_context *outer_ctx)
1e8e9920 1764{
773c5ba7 1765 omp_context *ctx;
75a70cf9 1766 size_t i;
1e8e9920 1767
773c5ba7 1768 ctx = new_omp_context (stmt, outer_ctx);
1e8e9920 1769
75a70cf9 1770 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1e8e9920 1771
ab129075 1772 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
75a70cf9 1773 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
fd6481cf 1774 {
75a70cf9 1775 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1776 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1777 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1778 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
fd6481cf 1779 }
ab129075 1780 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 1781}
1782
1783/* Scan an OpenMP sections directive. */
1784
1785static void
75a70cf9 1786scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1e8e9920 1787{
1e8e9920 1788 omp_context *ctx;
1789
1790 ctx = new_omp_context (stmt, outer_ctx);
75a70cf9 1791 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
ab129075 1792 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 1793}
1794
1795/* Scan an OpenMP single directive. */
1796
1797static void
75a70cf9 1798scan_omp_single (gimple stmt, omp_context *outer_ctx)
1e8e9920 1799{
1e8e9920 1800 omp_context *ctx;
1801 tree name;
1802
1803 ctx = new_omp_context (stmt, outer_ctx);
1804 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1805 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1806 name = create_tmp_var_name (".omp_copy_s");
e60a6f7b 1807 name = build_decl (gimple_location (stmt),
1808 TYPE_DECL, name, ctx->record_type);
1e8e9920 1809 TYPE_NAME (ctx->record_type) = name;
1810
75a70cf9 1811 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
ab129075 1812 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 1813
1814 if (TYPE_FIELDS (ctx->record_type) == NULL)
1815 ctx->record_type = NULL;
1816 else
1817 layout_type (ctx->record_type);
1818}
1819
1e8e9920 1820
c1d127dd 1821/* Check OpenMP nesting restrictions. */
ab129075 1822static bool
1823check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
c1d127dd 1824{
75a70cf9 1825 switch (gimple_code (stmt))
c1d127dd 1826 {
75a70cf9 1827 case GIMPLE_OMP_FOR:
1828 case GIMPLE_OMP_SECTIONS:
1829 case GIMPLE_OMP_SINGLE:
1830 case GIMPLE_CALL:
c1d127dd 1831 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1832 switch (gimple_code (ctx->stmt))
c1d127dd 1833 {
75a70cf9 1834 case GIMPLE_OMP_FOR:
1835 case GIMPLE_OMP_SECTIONS:
1836 case GIMPLE_OMP_SINGLE:
1837 case GIMPLE_OMP_ORDERED:
1838 case GIMPLE_OMP_MASTER:
1839 case GIMPLE_OMP_TASK:
1840 if (is_gimple_call (stmt))
fd6481cf 1841 {
ab129075 1842 error_at (gimple_location (stmt),
1843 "barrier region may not be closely nested inside "
1844 "of work-sharing, critical, ordered, master or "
1845 "explicit task region");
1846 return false;
fd6481cf 1847 }
ab129075 1848 error_at (gimple_location (stmt),
1849 "work-sharing region may not be closely nested inside "
1850 "of work-sharing, critical, ordered, master or explicit "
1851 "task region");
1852 return false;
75a70cf9 1853 case GIMPLE_OMP_PARALLEL:
ab129075 1854 return true;
c1d127dd 1855 default:
1856 break;
1857 }
1858 break;
75a70cf9 1859 case GIMPLE_OMP_MASTER:
c1d127dd 1860 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1861 switch (gimple_code (ctx->stmt))
c1d127dd 1862 {
75a70cf9 1863 case GIMPLE_OMP_FOR:
1864 case GIMPLE_OMP_SECTIONS:
1865 case GIMPLE_OMP_SINGLE:
1866 case GIMPLE_OMP_TASK:
ab129075 1867 error_at (gimple_location (stmt),
1868 "master region may not be closely nested inside "
1869 "of work-sharing or explicit task region");
1870 return false;
75a70cf9 1871 case GIMPLE_OMP_PARALLEL:
ab129075 1872 return true;
c1d127dd 1873 default:
1874 break;
1875 }
1876 break;
75a70cf9 1877 case GIMPLE_OMP_ORDERED:
c1d127dd 1878 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1879 switch (gimple_code (ctx->stmt))
c1d127dd 1880 {
75a70cf9 1881 case GIMPLE_OMP_CRITICAL:
1882 case GIMPLE_OMP_TASK:
ab129075 1883 error_at (gimple_location (stmt),
1884 "ordered region may not be closely nested inside "
1885 "of critical or explicit task region");
1886 return false;
75a70cf9 1887 case GIMPLE_OMP_FOR:
1888 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
c1d127dd 1889 OMP_CLAUSE_ORDERED) == NULL)
ab129075 1890 {
1891 error_at (gimple_location (stmt),
1892 "ordered region must be closely nested inside "
c1d127dd 1893 "a loop region with an ordered clause");
ab129075 1894 return false;
1895 }
1896 return true;
75a70cf9 1897 case GIMPLE_OMP_PARALLEL:
ab129075 1898 return true;
c1d127dd 1899 default:
1900 break;
1901 }
1902 break;
75a70cf9 1903 case GIMPLE_OMP_CRITICAL:
c1d127dd 1904 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1905 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1906 && (gimple_omp_critical_name (stmt)
1907 == gimple_omp_critical_name (ctx->stmt)))
c1d127dd 1908 {
ab129075 1909 error_at (gimple_location (stmt),
1910 "critical region may not be nested inside a critical "
1911 "region with the same name");
1912 return false;
c1d127dd 1913 }
1914 break;
1915 default:
1916 break;
1917 }
ab129075 1918 return true;
c1d127dd 1919}
1920
1921
75a70cf9 1922/* Helper function scan_omp.
1923
1924 Callback for walk_tree or operators in walk_gimple_stmt used to
1925 scan for OpenMP directives in TP. */
1e8e9920 1926
1927static tree
75a70cf9 1928scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1e8e9920 1929{
4077bf7a 1930 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1931 omp_context *ctx = (omp_context *) wi->info;
1e8e9920 1932 tree t = *tp;
1933
75a70cf9 1934 switch (TREE_CODE (t))
1935 {
1936 case VAR_DECL:
1937 case PARM_DECL:
1938 case LABEL_DECL:
1939 case RESULT_DECL:
1940 if (ctx)
1941 *tp = remap_decl (t, &ctx->cb);
1942 break;
1943
1944 default:
1945 if (ctx && TYPE_P (t))
1946 *tp = remap_type (t, &ctx->cb);
1947 else if (!DECL_P (t))
7cf869dd 1948 {
1949 *walk_subtrees = 1;
1950 if (ctx)
182cf5a9 1951 {
1952 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1953 if (tem != TREE_TYPE (t))
1954 {
1955 if (TREE_CODE (t) == INTEGER_CST)
1956 *tp = build_int_cst_wide (tem,
1957 TREE_INT_CST_LOW (t),
1958 TREE_INT_CST_HIGH (t));
1959 else
1960 TREE_TYPE (t) = tem;
1961 }
1962 }
7cf869dd 1963 }
75a70cf9 1964 break;
1965 }
1966
1967 return NULL_TREE;
1968}
1969
1970
1971/* Helper function for scan_omp.
1972
1973 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1974 the current statement in GSI. */
1975
1976static tree
1977scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1978 struct walk_stmt_info *wi)
1979{
1980 gimple stmt = gsi_stmt (*gsi);
1981 omp_context *ctx = (omp_context *) wi->info;
1982
1983 if (gimple_has_location (stmt))
1984 input_location = gimple_location (stmt);
1e8e9920 1985
c1d127dd 1986 /* Check the OpenMP nesting restrictions. */
fd6481cf 1987 if (ctx != NULL)
1988 {
ab129075 1989 bool remove = false;
75a70cf9 1990 if (is_gimple_omp (stmt))
ab129075 1991 remove = !check_omp_nesting_restrictions (stmt, ctx);
75a70cf9 1992 else if (is_gimple_call (stmt))
fd6481cf 1993 {
75a70cf9 1994 tree fndecl = gimple_call_fndecl (stmt);
fd6481cf 1995 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1996 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
ab129075 1997 remove = !check_omp_nesting_restrictions (stmt, ctx);
1998 }
1999 if (remove)
2000 {
2001 stmt = gimple_build_nop ();
2002 gsi_replace (gsi, stmt, false);
fd6481cf 2003 }
2004 }
c1d127dd 2005
75a70cf9 2006 *handled_ops_p = true;
2007
2008 switch (gimple_code (stmt))
1e8e9920 2009 {
75a70cf9 2010 case GIMPLE_OMP_PARALLEL:
fd6481cf 2011 taskreg_nesting_level++;
75a70cf9 2012 scan_omp_parallel (gsi, ctx);
fd6481cf 2013 taskreg_nesting_level--;
2014 break;
2015
75a70cf9 2016 case GIMPLE_OMP_TASK:
fd6481cf 2017 taskreg_nesting_level++;
75a70cf9 2018 scan_omp_task (gsi, ctx);
fd6481cf 2019 taskreg_nesting_level--;
1e8e9920 2020 break;
2021
75a70cf9 2022 case GIMPLE_OMP_FOR:
2023 scan_omp_for (stmt, ctx);
1e8e9920 2024 break;
2025
75a70cf9 2026 case GIMPLE_OMP_SECTIONS:
2027 scan_omp_sections (stmt, ctx);
1e8e9920 2028 break;
2029
75a70cf9 2030 case GIMPLE_OMP_SINGLE:
2031 scan_omp_single (stmt, ctx);
1e8e9920 2032 break;
2033
75a70cf9 2034 case GIMPLE_OMP_SECTION:
2035 case GIMPLE_OMP_MASTER:
2036 case GIMPLE_OMP_ORDERED:
2037 case GIMPLE_OMP_CRITICAL:
2038 ctx = new_omp_context (stmt, ctx);
ab129075 2039 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 2040 break;
2041
75a70cf9 2042 case GIMPLE_BIND:
1e8e9920 2043 {
2044 tree var;
1e8e9920 2045
75a70cf9 2046 *handled_ops_p = false;
2047 if (ctx)
1767a056 2048 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
75a70cf9 2049 insert_decl_map (&ctx->cb, var, var);
1e8e9920 2050 }
2051 break;
1e8e9920 2052 default:
75a70cf9 2053 *handled_ops_p = false;
1e8e9920 2054 break;
2055 }
2056
2057 return NULL_TREE;
2058}
2059
2060
75a70cf9 2061/* Scan all the statements starting at the current statement. CTX
2062 contains context information about the OpenMP directives and
2063 clauses found during the scan. */
1e8e9920 2064
2065static void
ab129075 2066scan_omp (gimple_seq *body_p, omp_context *ctx)
1e8e9920 2067{
2068 location_t saved_location;
2069 struct walk_stmt_info wi;
2070
2071 memset (&wi, 0, sizeof (wi));
1e8e9920 2072 wi.info = ctx;
1e8e9920 2073 wi.want_locations = true;
2074
2075 saved_location = input_location;
ab129075 2076 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
1e8e9920 2077 input_location = saved_location;
2078}
2079\f
2080/* Re-gimplification and code generation routines. */
2081
2082/* Build a call to GOMP_barrier. */
2083
79acaae1 2084static tree
2085build_omp_barrier (void)
1e8e9920 2086{
b9a16870 2087 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
1e8e9920 2088}
2089
2090/* If a context was created for STMT when it was scanned, return it. */
2091
2092static omp_context *
75a70cf9 2093maybe_lookup_ctx (gimple stmt)
1e8e9920 2094{
2095 splay_tree_node n;
2096 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2097 return n ? (omp_context *) n->value : NULL;
2098}
2099
773c5ba7 2100
2101/* Find the mapping for DECL in CTX or the immediately enclosing
2102 context that has a mapping for DECL.
2103
2104 If CTX is a nested parallel directive, we may have to use the decl
2105 mappings created in CTX's parent context. Suppose that we have the
2106 following parallel nesting (variable UIDs showed for clarity):
2107
2108 iD.1562 = 0;
2109 #omp parallel shared(iD.1562) -> outer parallel
2110 iD.1562 = iD.1562 + 1;
2111
2112 #omp parallel shared (iD.1562) -> inner parallel
2113 iD.1562 = iD.1562 - 1;
2114
2115 Each parallel structure will create a distinct .omp_data_s structure
2116 for copying iD.1562 in/out of the directive:
2117
2118 outer parallel .omp_data_s.1.i -> iD.1562
2119 inner parallel .omp_data_s.2.i -> iD.1562
2120
2121 A shared variable mapping will produce a copy-out operation before
2122 the parallel directive and a copy-in operation after it. So, in
2123 this case we would have:
2124
2125 iD.1562 = 0;
2126 .omp_data_o.1.i = iD.1562;
2127 #omp parallel shared(iD.1562) -> outer parallel
2128 .omp_data_i.1 = &.omp_data_o.1
2129 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2130
2131 .omp_data_o.2.i = iD.1562; -> **
2132 #omp parallel shared(iD.1562) -> inner parallel
2133 .omp_data_i.2 = &.omp_data_o.2
2134 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2135
2136
2137 ** This is a problem. The symbol iD.1562 cannot be referenced
2138 inside the body of the outer parallel region. But since we are
2139 emitting this copy operation while expanding the inner parallel
2140 directive, we need to access the CTX structure of the outer
2141 parallel directive to get the correct mapping:
2142
2143 .omp_data_o.2.i = .omp_data_i.1->i
2144
2145 Since there may be other workshare or parallel directives enclosing
2146 the parallel directive, it may be necessary to walk up the context
2147 parent chain. This is not a problem in general because nested
2148 parallelism happens only rarely. */
2149
2150static tree
2151lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2152{
2153 tree t;
2154 omp_context *up;
2155
773c5ba7 2156 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2157 t = maybe_lookup_decl (decl, up);
2158
87b31375 2159 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
773c5ba7 2160
c37594c7 2161 return t ? t : decl;
773c5ba7 2162}
2163
2164
f49d7bb5 2165/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2166 in outer contexts. */
2167
2168static tree
2169maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2170{
2171 tree t = NULL;
2172 omp_context *up;
2173
87b31375 2174 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2175 t = maybe_lookup_decl (decl, up);
f49d7bb5 2176
2177 return t ? t : decl;
2178}
2179
2180
1e8e9920 2181/* Construct the initialization value for reduction CLAUSE. */
2182
2183tree
2184omp_reduction_init (tree clause, tree type)
2185{
389dd41b 2186 location_t loc = OMP_CLAUSE_LOCATION (clause);
1e8e9920 2187 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2188 {
2189 case PLUS_EXPR:
2190 case MINUS_EXPR:
2191 case BIT_IOR_EXPR:
2192 case BIT_XOR_EXPR:
2193 case TRUTH_OR_EXPR:
2194 case TRUTH_ORIF_EXPR:
2195 case TRUTH_XOR_EXPR:
2196 case NE_EXPR:
385f3f36 2197 return build_zero_cst (type);
1e8e9920 2198
2199 case MULT_EXPR:
2200 case TRUTH_AND_EXPR:
2201 case TRUTH_ANDIF_EXPR:
2202 case EQ_EXPR:
389dd41b 2203 return fold_convert_loc (loc, type, integer_one_node);
1e8e9920 2204
2205 case BIT_AND_EXPR:
389dd41b 2206 return fold_convert_loc (loc, type, integer_minus_one_node);
1e8e9920 2207
2208 case MAX_EXPR:
2209 if (SCALAR_FLOAT_TYPE_P (type))
2210 {
2211 REAL_VALUE_TYPE max, min;
2212 if (HONOR_INFINITIES (TYPE_MODE (type)))
2213 {
2214 real_inf (&max);
2215 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2216 }
2217 else
2218 real_maxval (&min, 1, TYPE_MODE (type));
2219 return build_real (type, min);
2220 }
2221 else
2222 {
2223 gcc_assert (INTEGRAL_TYPE_P (type));
2224 return TYPE_MIN_VALUE (type);
2225 }
2226
2227 case MIN_EXPR:
2228 if (SCALAR_FLOAT_TYPE_P (type))
2229 {
2230 REAL_VALUE_TYPE max;
2231 if (HONOR_INFINITIES (TYPE_MODE (type)))
2232 real_inf (&max);
2233 else
2234 real_maxval (&max, 0, TYPE_MODE (type));
2235 return build_real (type, max);
2236 }
2237 else
2238 {
2239 gcc_assert (INTEGRAL_TYPE_P (type));
2240 return TYPE_MAX_VALUE (type);
2241 }
2242
2243 default:
2244 gcc_unreachable ();
2245 }
2246}
2247
2248/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2249 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2250 private variables. Initialization statements go in ILIST, while calls
2251 to destructors go in DLIST. */
2252
2253static void
75a70cf9 2254lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
1e4afe3c 2255 omp_context *ctx)
1e8e9920 2256{
c2f47e15 2257 tree c, dtor, copyin_seq, x, ptr;
1e8e9920 2258 bool copyin_by_ref = false;
f49d7bb5 2259 bool lastprivate_firstprivate = false;
1e8e9920 2260 int pass;
2261
1e8e9920 2262 copyin_seq = NULL;
2263
2264 /* Do all the fixed sized types in the first pass, and the variable sized
2265 types in the second pass. This makes sure that the scalar arguments to
48e1416a 2266 the variable sized types are processed before we use them in the
1e8e9920 2267 variable sized operations. */
2268 for (pass = 0; pass < 2; ++pass)
2269 {
2270 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2271 {
55d6e7cd 2272 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
1e8e9920 2273 tree var, new_var;
2274 bool by_ref;
389dd41b 2275 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2276
2277 switch (c_kind)
2278 {
2279 case OMP_CLAUSE_PRIVATE:
2280 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2281 continue;
2282 break;
2283 case OMP_CLAUSE_SHARED:
f49d7bb5 2284 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2285 {
2286 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2287 continue;
2288 }
1e8e9920 2289 case OMP_CLAUSE_FIRSTPRIVATE:
1e8e9920 2290 case OMP_CLAUSE_COPYIN:
2291 case OMP_CLAUSE_REDUCTION:
2292 break;
df2c34fc 2293 case OMP_CLAUSE_LASTPRIVATE:
f49d7bb5 2294 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2295 {
2296 lastprivate_firstprivate = true;
2297 if (pass != 0)
2298 continue;
2299 }
df2c34fc 2300 break;
1e8e9920 2301 default:
2302 continue;
2303 }
2304
2305 new_var = var = OMP_CLAUSE_DECL (c);
2306 if (c_kind != OMP_CLAUSE_COPYIN)
2307 new_var = lookup_decl (var, ctx);
2308
2309 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2310 {
2311 if (pass != 0)
2312 continue;
2313 }
1e8e9920 2314 else if (is_variable_sized (var))
2315 {
773c5ba7 2316 /* For variable sized types, we need to allocate the
2317 actual storage here. Call alloca and store the
2318 result in the pointer decl that we created elsewhere. */
1e8e9920 2319 if (pass == 0)
2320 continue;
2321
fd6481cf 2322 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2323 {
75a70cf9 2324 gimple stmt;
b9a16870 2325 tree tmp, atmp;
75a70cf9 2326
fd6481cf 2327 ptr = DECL_VALUE_EXPR (new_var);
2328 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2329 ptr = TREE_OPERAND (ptr, 0);
2330 gcc_assert (DECL_P (ptr));
2331 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
75a70cf9 2332
2333 /* void *tmp = __builtin_alloca */
b9a16870 2334 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2335 stmt = gimple_build_call (atmp, 1, x);
75a70cf9 2336 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2337 gimple_add_tmp_var (tmp);
2338 gimple_call_set_lhs (stmt, tmp);
2339
2340 gimple_seq_add_stmt (ilist, stmt);
2341
389dd41b 2342 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
75a70cf9 2343 gimplify_assign (ptr, x, ilist);
fd6481cf 2344 }
1e8e9920 2345 }
1e8e9920 2346 else if (is_reference (var))
2347 {
773c5ba7 2348 /* For references that are being privatized for Fortran,
2349 allocate new backing storage for the new pointer
2350 variable. This allows us to avoid changing all the
2351 code that expects a pointer to something that expects
2352 a direct variable. Note that this doesn't apply to
2353 C++, since reference types are disallowed in data
df2c34fc 2354 sharing clauses there, except for NRV optimized
2355 return values. */
1e8e9920 2356 if (pass == 0)
2357 continue;
2358
2359 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
fd6481cf 2360 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2361 {
2362 x = build_receiver_ref (var, false, ctx);
389dd41b 2363 x = build_fold_addr_expr_loc (clause_loc, x);
fd6481cf 2364 }
2365 else if (TREE_CONSTANT (x))
1e8e9920 2366 {
2367 const char *name = NULL;
2368 if (DECL_NAME (var))
2369 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2370
df2c34fc 2371 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2372 name);
2373 gimple_add_tmp_var (x);
86f2ad37 2374 TREE_ADDRESSABLE (x) = 1;
389dd41b 2375 x = build_fold_addr_expr_loc (clause_loc, x);
1e8e9920 2376 }
2377 else
2378 {
b9a16870 2379 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2380 x = build_call_expr_loc (clause_loc, atmp, 1, x);
1e8e9920 2381 }
2382
389dd41b 2383 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
75a70cf9 2384 gimplify_assign (new_var, x, ilist);
1e8e9920 2385
182cf5a9 2386 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
1e8e9920 2387 }
2388 else if (c_kind == OMP_CLAUSE_REDUCTION
2389 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2390 {
2391 if (pass == 0)
2392 continue;
2393 }
2394 else if (pass != 0)
2395 continue;
2396
55d6e7cd 2397 switch (OMP_CLAUSE_CODE (c))
1e8e9920 2398 {
2399 case OMP_CLAUSE_SHARED:
f49d7bb5 2400 /* Shared global vars are just accessed directly. */
2401 if (is_global_var (new_var))
2402 break;
1e8e9920 2403 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2404 needs to be delayed until after fixup_child_record_type so
2405 that we get the correct type during the dereference. */
e8a588af 2406 by_ref = use_pointer_for_field (var, ctx);
1e8e9920 2407 x = build_receiver_ref (var, by_ref, ctx);
2408 SET_DECL_VALUE_EXPR (new_var, x);
2409 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2410
2411 /* ??? If VAR is not passed by reference, and the variable
2412 hasn't been initialized yet, then we'll get a warning for
2413 the store into the omp_data_s structure. Ideally, we'd be
48e1416a 2414 able to notice this and not store anything at all, but
1e8e9920 2415 we're generating code too early. Suppress the warning. */
2416 if (!by_ref)
2417 TREE_NO_WARNING (var) = 1;
2418 break;
2419
2420 case OMP_CLAUSE_LASTPRIVATE:
2421 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2422 break;
2423 /* FALLTHRU */
2424
2425 case OMP_CLAUSE_PRIVATE:
fd6481cf 2426 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2427 x = build_outer_var_ref (var, ctx);
2428 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2429 {
2430 if (is_task_ctx (ctx))
2431 x = build_receiver_ref (var, false, ctx);
2432 else
2433 x = build_outer_var_ref (var, ctx);
2434 }
2435 else
2436 x = NULL;
2437 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
1e8e9920 2438 if (x)
2439 gimplify_and_add (x, ilist);
2440 /* FALLTHRU */
2441
2442 do_dtor:
2443 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2444 if (x)
2445 {
75a70cf9 2446 gimple_seq tseq = NULL;
2447
1e8e9920 2448 dtor = x;
75a70cf9 2449 gimplify_stmt (&dtor, &tseq);
e3a19533 2450 gimple_seq_add_seq (dlist, tseq);
1e8e9920 2451 }
2452 break;
2453
2454 case OMP_CLAUSE_FIRSTPRIVATE:
fd6481cf 2455 if (is_task_ctx (ctx))
2456 {
2457 if (is_reference (var) || is_variable_sized (var))
2458 goto do_dtor;
2459 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2460 ctx))
2461 || use_pointer_for_field (var, NULL))
2462 {
2463 x = build_receiver_ref (var, false, ctx);
2464 SET_DECL_VALUE_EXPR (new_var, x);
2465 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2466 goto do_dtor;
2467 }
2468 }
1e8e9920 2469 x = build_outer_var_ref (var, ctx);
2470 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2471 gimplify_and_add (x, ilist);
2472 goto do_dtor;
2473 break;
2474
2475 case OMP_CLAUSE_COPYIN:
e8a588af 2476 by_ref = use_pointer_for_field (var, NULL);
1e8e9920 2477 x = build_receiver_ref (var, by_ref, ctx);
2478 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2479 append_to_statement_list (x, &copyin_seq);
2480 copyin_by_ref |= by_ref;
2481 break;
2482
2483 case OMP_CLAUSE_REDUCTION:
2484 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2485 {
fd6481cf 2486 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2487 x = build_outer_var_ref (var, ctx);
2488
2489 if (is_reference (var))
389dd41b 2490 x = build_fold_addr_expr_loc (clause_loc, x);
fd6481cf 2491 SET_DECL_VALUE_EXPR (placeholder, x);
2492 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
e3a19533 2493 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
75a70cf9 2494 gimple_seq_add_seq (ilist,
2495 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2496 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
fd6481cf 2497 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
1e8e9920 2498 }
2499 else
2500 {
2501 x = omp_reduction_init (c, TREE_TYPE (new_var));
2502 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
75a70cf9 2503 gimplify_assign (new_var, x, ilist);
1e8e9920 2504 }
2505 break;
2506
2507 default:
2508 gcc_unreachable ();
2509 }
2510 }
2511 }
2512
2513 /* The copyin sequence is not to be executed by the main thread, since
2514 that would result in self-copies. Perhaps not visible to scalars,
2515 but it certainly is to C++ operator=. */
2516 if (copyin_seq)
2517 {
b9a16870 2518 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2519 0);
1e8e9920 2520 x = build2 (NE_EXPR, boolean_type_node, x,
2521 build_int_cst (TREE_TYPE (x), 0));
2522 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2523 gimplify_and_add (x, ilist);
2524 }
2525
2526 /* If any copyin variable is passed by reference, we must ensure the
2527 master thread doesn't modify it before it is copied over in all
f49d7bb5 2528 threads. Similarly for variables in both firstprivate and
2529 lastprivate clauses we need to ensure the lastprivate copying
2530 happens after firstprivate copying in all threads. */
2531 if (copyin_by_ref || lastprivate_firstprivate)
79acaae1 2532 gimplify_and_add (build_omp_barrier (), ilist);
1e8e9920 2533}
2534
773c5ba7 2535
1e8e9920 2536/* Generate code to implement the LASTPRIVATE clauses. This is used for
2537 both parallel and workshare constructs. PREDICATE may be NULL if it's
2538 always true. */
2539
2540static void
75a70cf9 2541lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2542 omp_context *ctx)
1e8e9920 2543{
75a70cf9 2544 tree x, c, label = NULL;
fd6481cf 2545 bool par_clauses = false;
1e8e9920 2546
2547 /* Early exit if there are no lastprivate clauses. */
2548 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2549 if (clauses == NULL)
2550 {
2551 /* If this was a workshare clause, see if it had been combined
2552 with its parallel. In that case, look for the clauses on the
2553 parallel statement itself. */
2554 if (is_parallel_ctx (ctx))
2555 return;
2556
2557 ctx = ctx->outer;
2558 if (ctx == NULL || !is_parallel_ctx (ctx))
2559 return;
2560
75a70cf9 2561 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
1e8e9920 2562 OMP_CLAUSE_LASTPRIVATE);
2563 if (clauses == NULL)
2564 return;
fd6481cf 2565 par_clauses = true;
1e8e9920 2566 }
2567
75a70cf9 2568 if (predicate)
2569 {
2570 gimple stmt;
2571 tree label_true, arm1, arm2;
2572
e60a6f7b 2573 label = create_artificial_label (UNKNOWN_LOCATION);
2574 label_true = create_artificial_label (UNKNOWN_LOCATION);
75a70cf9 2575 arm1 = TREE_OPERAND (predicate, 0);
2576 arm2 = TREE_OPERAND (predicate, 1);
2577 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2578 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2579 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2580 label_true, label);
2581 gimple_seq_add_stmt (stmt_list, stmt);
2582 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2583 }
1e8e9920 2584
fd6481cf 2585 for (c = clauses; c ;)
1e8e9920 2586 {
2587 tree var, new_var;
389dd41b 2588 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2589
fd6481cf 2590 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2591 {
2592 var = OMP_CLAUSE_DECL (c);
2593 new_var = lookup_decl (var, ctx);
1e8e9920 2594
75a70cf9 2595 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2596 {
e3a19533 2597 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
75a70cf9 2598 gimple_seq_add_seq (stmt_list,
2599 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2600 }
2601 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
1e8e9920 2602
fd6481cf 2603 x = build_outer_var_ref (var, ctx);
2604 if (is_reference (var))
182cf5a9 2605 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
fd6481cf 2606 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
75a70cf9 2607 gimplify_and_add (x, stmt_list);
fd6481cf 2608 }
2609 c = OMP_CLAUSE_CHAIN (c);
2610 if (c == NULL && !par_clauses)
2611 {
2612 /* If this was a workshare clause, see if it had been combined
2613 with its parallel. In that case, continue looking for the
2614 clauses also on the parallel statement itself. */
2615 if (is_parallel_ctx (ctx))
2616 break;
2617
2618 ctx = ctx->outer;
2619 if (ctx == NULL || !is_parallel_ctx (ctx))
2620 break;
2621
75a70cf9 2622 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
fd6481cf 2623 OMP_CLAUSE_LASTPRIVATE);
2624 par_clauses = true;
2625 }
1e8e9920 2626 }
2627
75a70cf9 2628 if (label)
2629 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
1e8e9920 2630}
2631
773c5ba7 2632
1e8e9920 2633/* Generate code to implement the REDUCTION clauses. */
2634
2635static void
75a70cf9 2636lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
1e8e9920 2637{
75a70cf9 2638 gimple_seq sub_seq = NULL;
2639 gimple stmt;
2640 tree x, c;
1e8e9920 2641 int count = 0;
2642
2643 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2644 update in that case, otherwise use a lock. */
2645 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
55d6e7cd 2646 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
1e8e9920 2647 {
2648 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2649 {
2650 /* Never use OMP_ATOMIC for array reductions. */
2651 count = -1;
2652 break;
2653 }
2654 count++;
2655 }
2656
2657 if (count == 0)
2658 return;
2659
2660 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2661 {
2662 tree var, ref, new_var;
2663 enum tree_code code;
389dd41b 2664 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2665
55d6e7cd 2666 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
1e8e9920 2667 continue;
2668
2669 var = OMP_CLAUSE_DECL (c);
2670 new_var = lookup_decl (var, ctx);
2671 if (is_reference (var))
182cf5a9 2672 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
1e8e9920 2673 ref = build_outer_var_ref (var, ctx);
2674 code = OMP_CLAUSE_REDUCTION_CODE (c);
773c5ba7 2675
2676 /* reduction(-:var) sums up the partial results, so it acts
2677 identically to reduction(+:var). */
1e8e9920 2678 if (code == MINUS_EXPR)
2679 code = PLUS_EXPR;
2680
2681 if (count == 1)
2682 {
389dd41b 2683 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
1e8e9920 2684
2685 addr = save_expr (addr);
2686 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
389dd41b 2687 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
1e8e9920 2688 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
75a70cf9 2689 gimplify_and_add (x, stmt_seqp);
1e8e9920 2690 return;
2691 }
2692
2693 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2694 {
2695 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2696
2697 if (is_reference (var))
389dd41b 2698 ref = build_fold_addr_expr_loc (clause_loc, ref);
1e8e9920 2699 SET_DECL_VALUE_EXPR (placeholder, ref);
2700 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
e3a19533 2701 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
75a70cf9 2702 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2703 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
1e8e9920 2704 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2705 }
2706 else
2707 {
2708 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2709 ref = build_outer_var_ref (var, ctx);
75a70cf9 2710 gimplify_assign (ref, x, &sub_seq);
1e8e9920 2711 }
2712 }
2713
b9a16870 2714 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
2715 0);
75a70cf9 2716 gimple_seq_add_stmt (stmt_seqp, stmt);
1e8e9920 2717
75a70cf9 2718 gimple_seq_add_seq (stmt_seqp, sub_seq);
1e8e9920 2719
b9a16870 2720 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
2721 0);
75a70cf9 2722 gimple_seq_add_stmt (stmt_seqp, stmt);
1e8e9920 2723}
2724
773c5ba7 2725
1e8e9920 2726/* Generate code to implement the COPYPRIVATE clauses. */
2727
2728static void
75a70cf9 2729lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
1e8e9920 2730 omp_context *ctx)
2731{
2732 tree c;
2733
2734 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2735 {
cb561506 2736 tree var, new_var, ref, x;
1e8e9920 2737 bool by_ref;
389dd41b 2738 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2739
55d6e7cd 2740 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
1e8e9920 2741 continue;
2742
2743 var = OMP_CLAUSE_DECL (c);
e8a588af 2744 by_ref = use_pointer_for_field (var, NULL);
1e8e9920 2745
2746 ref = build_sender_ref (var, ctx);
cb561506 2747 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2748 if (by_ref)
2749 {
2750 x = build_fold_addr_expr_loc (clause_loc, new_var);
2751 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2752 }
75a70cf9 2753 gimplify_assign (ref, x, slist);
1e8e9920 2754
cb561506 2755 ref = build_receiver_ref (var, false, ctx);
2756 if (by_ref)
2757 {
2758 ref = fold_convert_loc (clause_loc,
2759 build_pointer_type (TREE_TYPE (new_var)),
2760 ref);
2761 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2762 }
1e8e9920 2763 if (is_reference (var))
2764 {
cb561506 2765 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
182cf5a9 2766 ref = build_simple_mem_ref_loc (clause_loc, ref);
2767 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
1e8e9920 2768 }
cb561506 2769 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
1e8e9920 2770 gimplify_and_add (x, rlist);
2771 }
2772}
2773
773c5ba7 2774
1e8e9920 2775/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2776 and REDUCTION from the sender (aka parent) side. */
2777
2778static void
75a70cf9 2779lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2780 omp_context *ctx)
1e8e9920 2781{
2782 tree c;
2783
2784 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2785 {
773c5ba7 2786 tree val, ref, x, var;
1e8e9920 2787 bool by_ref, do_in = false, do_out = false;
389dd41b 2788 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2789
55d6e7cd 2790 switch (OMP_CLAUSE_CODE (c))
1e8e9920 2791 {
fd6481cf 2792 case OMP_CLAUSE_PRIVATE:
2793 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2794 break;
2795 continue;
1e8e9920 2796 case OMP_CLAUSE_FIRSTPRIVATE:
2797 case OMP_CLAUSE_COPYIN:
2798 case OMP_CLAUSE_LASTPRIVATE:
2799 case OMP_CLAUSE_REDUCTION:
2800 break;
2801 default:
2802 continue;
2803 }
2804
87b31375 2805 val = OMP_CLAUSE_DECL (c);
2806 var = lookup_decl_in_outer_ctx (val, ctx);
773c5ba7 2807
f49d7bb5 2808 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2809 && is_global_var (var))
2810 continue;
1e8e9920 2811 if (is_variable_sized (val))
2812 continue;
e8a588af 2813 by_ref = use_pointer_for_field (val, NULL);
1e8e9920 2814
55d6e7cd 2815 switch (OMP_CLAUSE_CODE (c))
1e8e9920 2816 {
fd6481cf 2817 case OMP_CLAUSE_PRIVATE:
1e8e9920 2818 case OMP_CLAUSE_FIRSTPRIVATE:
2819 case OMP_CLAUSE_COPYIN:
2820 do_in = true;
2821 break;
2822
2823 case OMP_CLAUSE_LASTPRIVATE:
2824 if (by_ref || is_reference (val))
2825 {
2826 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2827 continue;
2828 do_in = true;
2829 }
2830 else
fd6481cf 2831 {
2832 do_out = true;
2833 if (lang_hooks.decls.omp_private_outer_ref (val))
2834 do_in = true;
2835 }
1e8e9920 2836 break;
2837
2838 case OMP_CLAUSE_REDUCTION:
2839 do_in = true;
2840 do_out = !(by_ref || is_reference (val));
2841 break;
2842
2843 default:
2844 gcc_unreachable ();
2845 }
2846
2847 if (do_in)
2848 {
2849 ref = build_sender_ref (val, ctx);
389dd41b 2850 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
75a70cf9 2851 gimplify_assign (ref, x, ilist);
fd6481cf 2852 if (is_task_ctx (ctx))
2853 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
1e8e9920 2854 }
773c5ba7 2855
1e8e9920 2856 if (do_out)
2857 {
2858 ref = build_sender_ref (val, ctx);
75a70cf9 2859 gimplify_assign (var, ref, olist);
1e8e9920 2860 }
2861 }
2862}
2863
75a70cf9 2864/* Generate code to implement SHARED from the sender (aka parent)
2865 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2866 list things that got automatically shared. */
1e8e9920 2867
2868static void
75a70cf9 2869lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
1e8e9920 2870{
fd6481cf 2871 tree var, ovar, nvar, f, x, record_type;
1e8e9920 2872
2873 if (ctx->record_type == NULL)
2874 return;
773c5ba7 2875
fd6481cf 2876 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
1767a056 2877 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
1e8e9920 2878 {
2879 ovar = DECL_ABSTRACT_ORIGIN (f);
2880 nvar = maybe_lookup_decl (ovar, ctx);
2881 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2882 continue;
2883
773c5ba7 2884 /* If CTX is a nested parallel directive. Find the immediately
2885 enclosing parallel or workshare construct that contains a
2886 mapping for OVAR. */
87b31375 2887 var = lookup_decl_in_outer_ctx (ovar, ctx);
773c5ba7 2888
e8a588af 2889 if (use_pointer_for_field (ovar, ctx))
1e8e9920 2890 {
2891 x = build_sender_ref (ovar, ctx);
773c5ba7 2892 var = build_fold_addr_expr (var);
75a70cf9 2893 gimplify_assign (x, var, ilist);
1e8e9920 2894 }
2895 else
2896 {
2897 x = build_sender_ref (ovar, ctx);
75a70cf9 2898 gimplify_assign (x, var, ilist);
1e8e9920 2899
d2263ebb 2900 if (!TREE_READONLY (var)
2901 /* We don't need to receive a new reference to a result
2902 or parm decl. In fact we may not store to it as we will
2903 invalidate any pending RSO and generate wrong gimple
2904 during inlining. */
2905 && !((TREE_CODE (var) == RESULT_DECL
2906 || TREE_CODE (var) == PARM_DECL)
2907 && DECL_BY_REFERENCE (var)))
fd6481cf 2908 {
2909 x = build_sender_ref (ovar, ctx);
75a70cf9 2910 gimplify_assign (var, x, olist);
fd6481cf 2911 }
1e8e9920 2912 }
2913 }
2914}
2915
75a70cf9 2916
2917/* A convenience function to build an empty GIMPLE_COND with just the
2918 condition. */
2919
2920static gimple
2921gimple_build_cond_empty (tree cond)
2922{
2923 enum tree_code pred_code;
2924 tree lhs, rhs;
2925
2926 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2927 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2928}
2929
2930
48e1416a 2931/* Build the function calls to GOMP_parallel_start etc to actually
773c5ba7 2932 generate the parallel operation. REGION is the parallel region
2933 being expanded. BB is the block where to insert the code. WS_ARGS
2934 will be set if this is a call to a combined parallel+workshare
2935 construct, it contains the list of additional arguments needed by
2936 the workshare construct. */
1e8e9920 2937
2938static void
61e47ac8 2939expand_parallel_call (struct omp_region *region, basic_block bb,
414c3a2c 2940 gimple entry_stmt, VEC(tree,gc) *ws_args)
1e8e9920 2941{
79acaae1 2942 tree t, t1, t2, val, cond, c, clauses;
75a70cf9 2943 gimple_stmt_iterator gsi;
2944 gimple stmt;
b9a16870 2945 enum built_in_function start_ix;
2946 int start_ix2;
389dd41b 2947 location_t clause_loc;
414c3a2c 2948 VEC(tree,gc) *args;
773c5ba7 2949
75a70cf9 2950 clauses = gimple_omp_parallel_clauses (entry_stmt);
773c5ba7 2951
334ec2d8 2952 /* Determine what flavor of GOMP_parallel_start we will be
773c5ba7 2953 emitting. */
2954 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2955 if (is_combined_parallel (region))
2956 {
61e47ac8 2957 switch (region->inner->type)
773c5ba7 2958 {
75a70cf9 2959 case GIMPLE_OMP_FOR:
fd6481cf 2960 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
b9a16870 2961 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2962 + (region->inner->sched_kind
2963 == OMP_CLAUSE_SCHEDULE_RUNTIME
2964 ? 3 : region->inner->sched_kind));
2965 start_ix = (enum built_in_function)start_ix2;
61e47ac8 2966 break;
75a70cf9 2967 case GIMPLE_OMP_SECTIONS:
61e47ac8 2968 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2969 break;
2970 default:
2971 gcc_unreachable ();
773c5ba7 2972 }
773c5ba7 2973 }
1e8e9920 2974
2975 /* By default, the value of NUM_THREADS is zero (selected at run time)
2976 and there is no conditional. */
2977 cond = NULL_TREE;
2978 val = build_int_cst (unsigned_type_node, 0);
2979
2980 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2981 if (c)
2982 cond = OMP_CLAUSE_IF_EXPR (c);
2983
2984 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2985 if (c)
389dd41b 2986 {
2987 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2988 clause_loc = OMP_CLAUSE_LOCATION (c);
2989 }
2990 else
2991 clause_loc = gimple_location (entry_stmt);
1e8e9920 2992
2993 /* Ensure 'val' is of the correct type. */
389dd41b 2994 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
1e8e9920 2995
2996 /* If we found the clause 'if (cond)', build either
2997 (cond != 0) or (cond ? val : 1u). */
2998 if (cond)
2999 {
75a70cf9 3000 gimple_stmt_iterator gsi;
773c5ba7 3001
3002 cond = gimple_boolify (cond);
3003
1e8e9920 3004 if (integer_zerop (val))
389dd41b 3005 val = fold_build2_loc (clause_loc,
3006 EQ_EXPR, unsigned_type_node, cond,
79acaae1 3007 build_int_cst (TREE_TYPE (cond), 0));
1e8e9920 3008 else
773c5ba7 3009 {
3010 basic_block cond_bb, then_bb, else_bb;
79acaae1 3011 edge e, e_then, e_else;
75a70cf9 3012 tree tmp_then, tmp_else, tmp_join, tmp_var;
79acaae1 3013
3014 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
3015 if (gimple_in_ssa_p (cfun))
3016 {
75a70cf9 3017 tmp_then = make_ssa_name (tmp_var, NULL);
3018 tmp_else = make_ssa_name (tmp_var, NULL);
3019 tmp_join = make_ssa_name (tmp_var, NULL);
79acaae1 3020 }
3021 else
3022 {
3023 tmp_then = tmp_var;
3024 tmp_else = tmp_var;
3025 tmp_join = tmp_var;
3026 }
773c5ba7 3027
773c5ba7 3028 e = split_block (bb, NULL);
3029 cond_bb = e->src;
3030 bb = e->dest;
3031 remove_edge (e);
3032
3033 then_bb = create_empty_bb (cond_bb);
3034 else_bb = create_empty_bb (then_bb);
79acaae1 3035 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3036 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
773c5ba7 3037
75a70cf9 3038 stmt = gimple_build_cond_empty (cond);
3039 gsi = gsi_start_bb (cond_bb);
3040 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 3041
75a70cf9 3042 gsi = gsi_start_bb (then_bb);
3043 stmt = gimple_build_assign (tmp_then, val);
3044 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 3045
75a70cf9 3046 gsi = gsi_start_bb (else_bb);
3047 stmt = gimple_build_assign
3048 (tmp_else, build_int_cst (unsigned_type_node, 1));
3049 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 3050
3051 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3052 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
79acaae1 3053 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3054 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
773c5ba7 3055
79acaae1 3056 if (gimple_in_ssa_p (cfun))
3057 {
75a70cf9 3058 gimple phi = create_phi_node (tmp_join, bb);
60d535d2 3059 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3060 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
79acaae1 3061 }
3062
3063 val = tmp_join;
773c5ba7 3064 }
3065
75a70cf9 3066 gsi = gsi_start_bb (bb);
3067 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3068 false, GSI_CONTINUE_LINKING);
1e8e9920 3069 }
3070
75a70cf9 3071 gsi = gsi_last_bb (bb);
3072 t = gimple_omp_parallel_data_arg (entry_stmt);
1e8e9920 3073 if (t == NULL)
c2f47e15 3074 t1 = null_pointer_node;
1e8e9920 3075 else
c2f47e15 3076 t1 = build_fold_addr_expr (t);
75a70cf9 3077 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
773c5ba7 3078
414c3a2c 3079 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3080 VEC_quick_push (tree, args, t2);
3081 VEC_quick_push (tree, args, t1);
3082 VEC_quick_push (tree, args, val);
3083 VEC_splice (tree, args, ws_args);
3084
3085 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
b9a16870 3086 builtin_decl_explicit (start_ix), args);
773c5ba7 3087
75a70cf9 3088 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3089 false, GSI_CONTINUE_LINKING);
1e8e9920 3090
75a70cf9 3091 t = gimple_omp_parallel_data_arg (entry_stmt);
1e8e9920 3092 if (t == NULL)
3093 t = null_pointer_node;
3094 else
3095 t = build_fold_addr_expr (t);
389dd41b 3096 t = build_call_expr_loc (gimple_location (entry_stmt),
3097 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
75a70cf9 3098 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3099 false, GSI_CONTINUE_LINKING);
1e8e9920 3100
389dd41b 3101 t = build_call_expr_loc (gimple_location (entry_stmt),
b9a16870 3102 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3103 0);
75a70cf9 3104 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3105 false, GSI_CONTINUE_LINKING);
1e8e9920 3106}
3107
773c5ba7 3108
fd6481cf 3109/* Build the function call to GOMP_task to actually
3110 generate the task operation. BB is the block where to insert the code. */
3111
3112static void
75a70cf9 3113expand_task_call (basic_block bb, gimple entry_stmt)
fd6481cf 3114{
2169f33b 3115 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
75a70cf9 3116 gimple_stmt_iterator gsi;
389dd41b 3117 location_t loc = gimple_location (entry_stmt);
fd6481cf 3118
75a70cf9 3119 clauses = gimple_omp_task_clauses (entry_stmt);
fd6481cf 3120
fd6481cf 3121 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3122 if (c)
3123 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3124 else
3125 cond = boolean_true_node;
3126
3127 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
2169f33b 3128 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3129 flags = build_int_cst (unsigned_type_node,
3130 (c ? 1 : 0) + (c2 ? 4 : 0));
3131
3132 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3133 if (c)
3134 {
3135 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3136 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3137 build_int_cst (unsigned_type_node, 2),
3138 build_int_cst (unsigned_type_node, 0));
3139 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3140 }
fd6481cf 3141
75a70cf9 3142 gsi = gsi_last_bb (bb);
3143 t = gimple_omp_task_data_arg (entry_stmt);
fd6481cf 3144 if (t == NULL)
3145 t2 = null_pointer_node;
3146 else
389dd41b 3147 t2 = build_fold_addr_expr_loc (loc, t);
3148 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
75a70cf9 3149 t = gimple_omp_task_copy_fn (entry_stmt);
fd6481cf 3150 if (t == NULL)
3151 t3 = null_pointer_node;
3152 else
389dd41b 3153 t3 = build_fold_addr_expr_loc (loc, t);
fd6481cf 3154
b9a16870 3155 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3156 7, t1, t2, t3,
75a70cf9 3157 gimple_omp_task_arg_size (entry_stmt),
3158 gimple_omp_task_arg_align (entry_stmt), cond, flags);
fd6481cf 3159
75a70cf9 3160 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3161 false, GSI_CONTINUE_LINKING);
fd6481cf 3162}
3163
3164
75a70cf9 3165/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3166 catch handler and return it. This prevents programs from violating the
3167 structured block semantics with throws. */
1e8e9920 3168
75a70cf9 3169static gimple_seq
3170maybe_catch_exception (gimple_seq body)
1e8e9920 3171{
e38def9c 3172 gimple g;
3173 tree decl;
1e8e9920 3174
3175 if (!flag_exceptions)
75a70cf9 3176 return body;
1e8e9920 3177
596981c8 3178 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3179 decl = lang_hooks.eh_protect_cleanup_actions ();
1e8e9920 3180 else
b9a16870 3181 decl = builtin_decl_explicit (BUILT_IN_TRAP);
75a70cf9 3182
e38def9c 3183 g = gimple_build_eh_must_not_throw (decl);
3184 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
75a70cf9 3185 GIMPLE_TRY_CATCH);
1e8e9920 3186
e38def9c 3187 return gimple_seq_alloc_with_stmt (g);
1e8e9920 3188}
3189
773c5ba7 3190/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
1e8e9920 3191
773c5ba7 3192static tree
2ab2ce89 3193vec2chain (VEC(tree,gc) *v)
1e8e9920 3194{
2ab2ce89 3195 tree chain = NULL_TREE, t;
3196 unsigned ix;
1e8e9920 3197
2ab2ce89 3198 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
773c5ba7 3199 {
1767a056 3200 DECL_CHAIN (t) = chain;
2ab2ce89 3201 chain = t;
773c5ba7 3202 }
1e8e9920 3203
2ab2ce89 3204 return chain;
773c5ba7 3205}
1e8e9920 3206
1e8e9920 3207
773c5ba7 3208/* Remove barriers in REGION->EXIT's block. Note that this is only
75a70cf9 3209 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3210 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3211 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
773c5ba7 3212 removed. */
1e8e9920 3213
773c5ba7 3214static void
3215remove_exit_barrier (struct omp_region *region)
3216{
75a70cf9 3217 gimple_stmt_iterator gsi;
773c5ba7 3218 basic_block exit_bb;
61e47ac8 3219 edge_iterator ei;
3220 edge e;
75a70cf9 3221 gimple stmt;
4a04f4b4 3222 int any_addressable_vars = -1;
1e8e9920 3223
61e47ac8 3224 exit_bb = region->exit;
1e8e9920 3225
5056ba1a 3226 /* If the parallel region doesn't return, we don't have REGION->EXIT
3227 block at all. */
3228 if (! exit_bb)
3229 return;
3230
75a70cf9 3231 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3232 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
61e47ac8 3233 statements that can appear in between are extremely limited -- no
3234 memory operations at all. Here, we allow nothing at all, so the
75a70cf9 3235 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3236 gsi = gsi_last_bb (exit_bb);
3237 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3238 gsi_prev (&gsi);
3239 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
773c5ba7 3240 return;
1e8e9920 3241
61e47ac8 3242 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3243 {
75a70cf9 3244 gsi = gsi_last_bb (e->src);
3245 if (gsi_end_p (gsi))
61e47ac8 3246 continue;
75a70cf9 3247 stmt = gsi_stmt (gsi);
4a04f4b4 3248 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3249 && !gimple_omp_return_nowait_p (stmt))
3250 {
3251 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3252 in many cases. If there could be tasks queued, the barrier
3253 might be needed to let the tasks run before some local
3254 variable of the parallel that the task uses as shared
3255 runs out of scope. The task can be spawned either
3256 from within current function (this would be easy to check)
3257 or from some function it calls and gets passed an address
3258 of such a variable. */
3259 if (any_addressable_vars < 0)
3260 {
3261 gimple parallel_stmt = last_stmt (region->entry);
3262 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
2ab2ce89 3263 tree local_decls, block, decl;
3264 unsigned ix;
4a04f4b4 3265
3266 any_addressable_vars = 0;
2ab2ce89 3267 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3268 if (TREE_ADDRESSABLE (decl))
4a04f4b4 3269 {
3270 any_addressable_vars = 1;
3271 break;
3272 }
3273 for (block = gimple_block (stmt);
3274 !any_addressable_vars
3275 && block
3276 && TREE_CODE (block) == BLOCK;
3277 block = BLOCK_SUPERCONTEXT (block))
3278 {
3279 for (local_decls = BLOCK_VARS (block);
3280 local_decls;
1767a056 3281 local_decls = DECL_CHAIN (local_decls))
4a04f4b4 3282 if (TREE_ADDRESSABLE (local_decls))
3283 {
3284 any_addressable_vars = 1;
3285 break;
3286 }
3287 if (block == gimple_block (parallel_stmt))
3288 break;
3289 }
3290 }
3291 if (!any_addressable_vars)
3292 gimple_omp_return_set_nowait (stmt);
3293 }
61e47ac8 3294 }
1e8e9920 3295}
3296
61e47ac8 3297static void
3298remove_exit_barriers (struct omp_region *region)
3299{
75a70cf9 3300 if (region->type == GIMPLE_OMP_PARALLEL)
61e47ac8 3301 remove_exit_barrier (region);
3302
3303 if (region->inner)
3304 {
3305 region = region->inner;
3306 remove_exit_barriers (region);
3307 while (region->next)
3308 {
3309 region = region->next;
3310 remove_exit_barriers (region);
3311 }
3312 }
3313}
773c5ba7 3314
658b4427 3315/* Optimize omp_get_thread_num () and omp_get_num_threads ()
3316 calls. These can't be declared as const functions, but
3317 within one parallel body they are constant, so they can be
3318 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
fd6481cf 3319 which are declared const. Similarly for task body, except
3320 that in untied task omp_get_thread_num () can change at any task
3321 scheduling point. */
658b4427 3322
3323static void
75a70cf9 3324optimize_omp_library_calls (gimple entry_stmt)
658b4427 3325{
3326 basic_block bb;
75a70cf9 3327 gimple_stmt_iterator gsi;
b9a16870 3328 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3329 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3330 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3331 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
75a70cf9 3332 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3333 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
fd6481cf 3334 OMP_CLAUSE_UNTIED) != NULL);
658b4427 3335
3336 FOR_EACH_BB (bb)
75a70cf9 3337 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
658b4427 3338 {
75a70cf9 3339 gimple call = gsi_stmt (gsi);
658b4427 3340 tree decl;
3341
75a70cf9 3342 if (is_gimple_call (call)
3343 && (decl = gimple_call_fndecl (call))
658b4427 3344 && DECL_EXTERNAL (decl)
3345 && TREE_PUBLIC (decl)
3346 && DECL_INITIAL (decl) == NULL)
3347 {
3348 tree built_in;
3349
3350 if (DECL_NAME (decl) == thr_num_id)
fd6481cf 3351 {
3352 /* In #pragma omp task untied omp_get_thread_num () can change
3353 during the execution of the task region. */
3354 if (untied_task)
3355 continue;
b9a16870 3356 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
fd6481cf 3357 }
658b4427 3358 else if (DECL_NAME (decl) == num_thr_id)
b9a16870 3359 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
658b4427 3360 else
3361 continue;
3362
3363 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
75a70cf9 3364 || gimple_call_num_args (call) != 0)
658b4427 3365 continue;
3366
3367 if (flag_exceptions && !TREE_NOTHROW (decl))
3368 continue;
3369
3370 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
1ea6a73c 3371 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3372 TREE_TYPE (TREE_TYPE (built_in))))
658b4427 3373 continue;
3374
0acacf9e 3375 gimple_call_set_fndecl (call, built_in);
658b4427 3376 }
3377 }
3378}
3379
fd6481cf 3380/* Expand the OpenMP parallel or task directive starting at REGION. */
1e8e9920 3381
3382static void
fd6481cf 3383expand_omp_taskreg (struct omp_region *region)
1e8e9920 3384{
773c5ba7 3385 basic_block entry_bb, exit_bb, new_bb;
87d4aa85 3386 struct function *child_cfun;
414c3a2c 3387 tree child_fn, block, t;
75a70cf9 3388 gimple_stmt_iterator gsi;
3389 gimple entry_stmt, stmt;
773c5ba7 3390 edge e;
414c3a2c 3391 VEC(tree,gc) *ws_args;
773c5ba7 3392
61e47ac8 3393 entry_stmt = last_stmt (region->entry);
75a70cf9 3394 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
773c5ba7 3395 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
773c5ba7 3396
61e47ac8 3397 entry_bb = region->entry;
3398 exit_bb = region->exit;
773c5ba7 3399
773c5ba7 3400 if (is_combined_parallel (region))
61e47ac8 3401 ws_args = region->ws_args;
773c5ba7 3402 else
414c3a2c 3403 ws_args = NULL;
1e8e9920 3404
61e47ac8 3405 if (child_cfun->cfg)
1e8e9920 3406 {
773c5ba7 3407 /* Due to inlining, it may happen that we have already outlined
3408 the region, in which case all we need to do is make the
3409 sub-graph unreachable and emit the parallel call. */
3410 edge entry_succ_e, exit_succ_e;
75a70cf9 3411 gimple_stmt_iterator gsi;
773c5ba7 3412
3413 entry_succ_e = single_succ_edge (entry_bb);
773c5ba7 3414
75a70cf9 3415 gsi = gsi_last_bb (entry_bb);
3416 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3417 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3418 gsi_remove (&gsi, true);
773c5ba7 3419
3420 new_bb = entry_bb;
03ed154b 3421 if (exit_bb)
3422 {
3423 exit_succ_e = single_succ_edge (exit_bb);
3424 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3425 }
79acaae1 3426 remove_edge_and_dominated_blocks (entry_succ_e);
1e8e9920 3427 }
773c5ba7 3428 else
3429 {
501bdd19 3430 unsigned srcidx, dstidx, num;
2ab2ce89 3431
773c5ba7 3432 /* If the parallel region needs data sent from the parent
3480139d 3433 function, then the very first statement (except possible
3434 tree profile counter updates) of the parallel body
773c5ba7 3435 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3436 &.OMP_DATA_O is passed as an argument to the child function,
3437 we need to replace it with the argument as seen by the child
3438 function.
3439
3440 In most cases, this will end up being the identity assignment
3441 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3442 a function call that has been inlined, the original PARM_DECL
3443 .OMP_DATA_I may have been converted into a different local
3444 variable. In which case, we need to keep the assignment. */
75a70cf9 3445 if (gimple_omp_taskreg_data_arg (entry_stmt))
773c5ba7 3446 {
3447 basic_block entry_succ_bb = single_succ (entry_bb);
75a70cf9 3448 gimple_stmt_iterator gsi;
3449 tree arg, narg;
3450 gimple parcopy_stmt = NULL;
1e8e9920 3451
75a70cf9 3452 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3480139d 3453 {
75a70cf9 3454 gimple stmt;
3480139d 3455
75a70cf9 3456 gcc_assert (!gsi_end_p (gsi));
3457 stmt = gsi_stmt (gsi);
3458 if (gimple_code (stmt) != GIMPLE_ASSIGN)
cc6b725b 3459 continue;
3460
75a70cf9 3461 if (gimple_num_ops (stmt) == 2)
3480139d 3462 {
75a70cf9 3463 tree arg = gimple_assign_rhs1 (stmt);
3464
3465 /* We're ignore the subcode because we're
3466 effectively doing a STRIP_NOPS. */
3467
3468 if (TREE_CODE (arg) == ADDR_EXPR
3469 && TREE_OPERAND (arg, 0)
3470 == gimple_omp_taskreg_data_arg (entry_stmt))
3471 {
3472 parcopy_stmt = stmt;
3473 break;
3474 }
3480139d 3475 }
3476 }
79acaae1 3477
75a70cf9 3478 gcc_assert (parcopy_stmt != NULL);
79acaae1 3479 arg = DECL_ARGUMENTS (child_fn);
3480
3481 if (!gimple_in_ssa_p (cfun))
3482 {
75a70cf9 3483 if (gimple_assign_lhs (parcopy_stmt) == arg)
3484 gsi_remove (&gsi, true);
79acaae1 3485 else
75a70cf9 3486 {
3487 /* ?? Is setting the subcode really necessary ?? */
3488 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3489 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3490 }
79acaae1 3491 }
3492 else
3493 {
3494 /* If we are in ssa form, we must load the value from the default
3495 definition of the argument. That should not be defined now,
3496 since the argument is not used uninitialized. */
c6dfe037 3497 gcc_assert (ssa_default_def (cfun, arg) == NULL);
75a70cf9 3498 narg = make_ssa_name (arg, gimple_build_nop ());
c6dfe037 3499 set_ssa_default_def (cfun, arg, narg);
75a70cf9 3500 /* ?? Is setting the subcode really necessary ?? */
3501 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3502 gimple_assign_set_rhs1 (parcopy_stmt, narg);
79acaae1 3503 update_stmt (parcopy_stmt);
3504 }
773c5ba7 3505 }
3506
3507 /* Declare local variables needed in CHILD_CFUN. */
3508 block = DECL_INITIAL (child_fn);
2ab2ce89 3509 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
e1a7ccb9 3510 /* The gimplifier could record temporaries in parallel/task block
3511 rather than in containing function's local_decls chain,
3512 which would mean cgraph missed finalizing them. Do it now. */
1767a056 3513 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
e1a7ccb9 3514 if (TREE_CODE (t) == VAR_DECL
3515 && TREE_STATIC (t)
3516 && !DECL_EXTERNAL (t))
3517 varpool_finalize_decl (t);
75a70cf9 3518 DECL_SAVED_TREE (child_fn) = NULL;
e3a19533 3519 /* We'll create a CFG for child_fn, so no gimple body is needed. */
3520 gimple_set_body (child_fn, NULL);
1d22f541 3521 TREE_USED (block) = 1;
773c5ba7 3522
79acaae1 3523 /* Reset DECL_CONTEXT on function arguments. */
1767a056 3524 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
773c5ba7 3525 DECL_CONTEXT (t) = child_fn;
3526
75a70cf9 3527 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3528 so that it can be moved to the child function. */
3529 gsi = gsi_last_bb (entry_bb);
3530 stmt = gsi_stmt (gsi);
3531 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3532 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3533 gsi_remove (&gsi, true);
3534 e = split_block (entry_bb, stmt);
773c5ba7 3535 entry_bb = e->dest;
3536 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3537
75a70cf9 3538 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
5056ba1a 3539 if (exit_bb)
3540 {
75a70cf9 3541 gsi = gsi_last_bb (exit_bb);
3542 gcc_assert (!gsi_end_p (gsi)
3543 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3544 stmt = gimple_build_return (NULL);
3545 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3546 gsi_remove (&gsi, true);
5056ba1a 3547 }
79acaae1 3548
3549 /* Move the parallel region into CHILD_CFUN. */
48e1416a 3550
79acaae1 3551 if (gimple_in_ssa_p (cfun))
3552 {
bcaa2770 3553 init_tree_ssa (child_cfun);
5084b2e4 3554 init_ssa_operands (child_cfun);
3555 child_cfun->gimple_df->in_ssa_p = true;
1d22f541 3556 block = NULL_TREE;
79acaae1 3557 }
1d22f541 3558 else
75a70cf9 3559 block = gimple_block (entry_stmt);
1d22f541 3560
3561 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
79acaae1 3562 if (exit_bb)
3563 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3564
1d22f541 3565 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
501bdd19 3566 num = VEC_length (tree, child_cfun->local_decls);
3567 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3568 {
3569 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3570 if (DECL_CONTEXT (t) == cfun->decl)
3571 continue;
3572 if (srcidx != dstidx)
3573 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3574 dstidx++;
3575 }
3576 if (dstidx != num)
3577 VEC_truncate (tree, child_cfun->local_decls, dstidx);
1d22f541 3578
79acaae1 3579 /* Inform the callgraph about the new function. */
3580 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
79f958cb 3581 = cfun->curr_properties & ~PROP_loops;
79acaae1 3582 cgraph_add_new_function (child_fn, true);
3583
3584 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3585 fixed in a following pass. */
3586 push_cfun (child_cfun);
658b4427 3587 if (optimize)
fd6481cf 3588 optimize_omp_library_calls (entry_stmt);
79acaae1 3589 rebuild_cgraph_edges ();
fbe86b1b 3590
3591 /* Some EH regions might become dead, see PR34608. If
3592 pass_cleanup_cfg isn't the first pass to happen with the
3593 new child, these dead EH edges might cause problems.
3594 Clean them up now. */
3595 if (flag_exceptions)
3596 {
3597 basic_block bb;
fbe86b1b 3598 bool changed = false;
3599
fbe86b1b 3600 FOR_EACH_BB (bb)
75a70cf9 3601 changed |= gimple_purge_dead_eh_edges (bb);
fbe86b1b 3602 if (changed)
3603 cleanup_tree_cfg ();
fbe86b1b 3604 }
dd277d48 3605 if (gimple_in_ssa_p (cfun))
3606 update_ssa (TODO_update_ssa);
79acaae1 3607 pop_cfun ();
773c5ba7 3608 }
48e1416a 3609
773c5ba7 3610 /* Emit a library call to launch the children threads. */
75a70cf9 3611 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
fd6481cf 3612 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3613 else
3614 expand_task_call (new_bb, entry_stmt);
083152fb 3615 if (gimple_in_ssa_p (cfun))
3616 update_ssa (TODO_update_ssa_only_virtuals);
1e8e9920 3617}
3618
773c5ba7 3619
3620/* A subroutine of expand_omp_for. Generate code for a parallel
1e8e9920 3621 loop with any schedule. Given parameters:
3622
3623 for (V = N1; V cond N2; V += STEP) BODY;
3624
3625 where COND is "<" or ">", we generate pseudocode
3626
3627 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
773c5ba7 3628 if (more) goto L0; else goto L3;
1e8e9920 3629 L0:
3630 V = istart0;
3631 iend = iend0;
3632 L1:
3633 BODY;
3634 V += STEP;
773c5ba7 3635 if (V cond iend) goto L1; else goto L2;
1e8e9920 3636 L2:
773c5ba7 3637 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3638 L3:
1e8e9920 3639
773c5ba7 3640 If this is a combined omp parallel loop, instead of the call to
fd6481cf 3641 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3642
3643 For collapsed loops, given parameters:
3644 collapse(3)
3645 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3646 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3647 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3648 BODY;
3649
3650 we generate pseudocode
3651
3652 if (cond3 is <)
3653 adj = STEP3 - 1;
3654 else
3655 adj = STEP3 + 1;
3656 count3 = (adj + N32 - N31) / STEP3;
3657 if (cond2 is <)
3658 adj = STEP2 - 1;
3659 else
3660 adj = STEP2 + 1;
3661 count2 = (adj + N22 - N21) / STEP2;
3662 if (cond1 is <)
3663 adj = STEP1 - 1;
3664 else
3665 adj = STEP1 + 1;
3666 count1 = (adj + N12 - N11) / STEP1;
3667 count = count1 * count2 * count3;
3668 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3669 if (more) goto L0; else goto L3;
3670 L0:
3671 V = istart0;
3672 T = V;
3673 V3 = N31 + (T % count3) * STEP3;
3674 T = T / count3;
3675 V2 = N21 + (T % count2) * STEP2;
3676 T = T / count2;
3677 V1 = N11 + T * STEP1;
3678 iend = iend0;
3679 L1:
3680 BODY;
3681 V += 1;
3682 if (V < iend) goto L10; else goto L2;
3683 L10:
3684 V3 += STEP3;
3685 if (V3 cond3 N32) goto L1; else goto L11;
3686 L11:
3687 V3 = N31;
3688 V2 += STEP2;
3689 if (V2 cond2 N22) goto L1; else goto L12;
3690 L12:
3691 V2 = N21;
3692 V1 += STEP1;
3693 goto L1;
3694 L2:
3695 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3696 L3:
3697
3698 */
1e8e9920 3699
61e47ac8 3700static void
773c5ba7 3701expand_omp_for_generic (struct omp_region *region,
3702 struct omp_for_data *fd,
1e8e9920 3703 enum built_in_function start_fn,
3704 enum built_in_function next_fn)
3705{
75a70cf9 3706 tree type, istart0, iend0, iend;
fd6481cf 3707 tree t, vmain, vback, bias = NULL_TREE;
3708 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
03ed154b 3709 basic_block l2_bb = NULL, l3_bb = NULL;
75a70cf9 3710 gimple_stmt_iterator gsi;
3711 gimple stmt;
773c5ba7 3712 bool in_combined_parallel = is_combined_parallel (region);
ac6e3339 3713 bool broken_loop = region->cont == NULL;
79acaae1 3714 edge e, ne;
fd6481cf 3715 tree *counts = NULL;
3716 int i;
ac6e3339 3717
3718 gcc_assert (!broken_loop || !in_combined_parallel);
fd6481cf 3719 gcc_assert (fd->iter_type == long_integer_type_node
3720 || !in_combined_parallel);
1e8e9920 3721
fd6481cf 3722 type = TREE_TYPE (fd->loop.v);
3723 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3724 iend0 = create_tmp_var (fd->iter_type, ".iend0");
6d63fc03 3725 TREE_ADDRESSABLE (istart0) = 1;
3726 TREE_ADDRESSABLE (iend0) = 1;
1e8e9920 3727
fd6481cf 3728 /* See if we need to bias by LLONG_MIN. */
3729 if (fd->iter_type == long_long_unsigned_type_node
3730 && TREE_CODE (type) == INTEGER_TYPE
3731 && !TYPE_UNSIGNED (type))
3732 {
3733 tree n1, n2;
3734
3735 if (fd->loop.cond_code == LT_EXPR)
3736 {
3737 n1 = fd->loop.n1;
3738 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3739 }
3740 else
3741 {
3742 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3743 n2 = fd->loop.n1;
3744 }
3745 if (TREE_CODE (n1) != INTEGER_CST
3746 || TREE_CODE (n2) != INTEGER_CST
3747 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3748 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3749 }
3750
61e47ac8 3751 entry_bb = region->entry;
03ed154b 3752 cont_bb = region->cont;
fd6481cf 3753 collapse_bb = NULL;
ac6e3339 3754 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3755 gcc_assert (broken_loop
3756 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3757 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3758 l1_bb = single_succ (l0_bb);
3759 if (!broken_loop)
03ed154b 3760 {
3761 l2_bb = create_empty_bb (cont_bb);
ac6e3339 3762 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3763 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
03ed154b 3764 }
ac6e3339 3765 else
3766 l2_bb = NULL;
3767 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3768 exit_bb = region->exit;
773c5ba7 3769
75a70cf9 3770 gsi = gsi_last_bb (entry_bb);
fd6481cf 3771
75a70cf9 3772 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
fd6481cf 3773 if (fd->collapse > 1)
3774 {
3775 /* collapsed loops need work for expansion in SSA form. */
3776 gcc_assert (!gimple_in_ssa_p (cfun));
3777 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3778 for (i = 0; i < fd->collapse; i++)
3779 {
3780 tree itype = TREE_TYPE (fd->loops[i].v);
3781
3782 if (POINTER_TYPE_P (itype))
3cea8318 3783 itype = signed_type_for (itype);
fd6481cf 3784 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3785 ? -1 : 1));
3786 t = fold_build2 (PLUS_EXPR, itype,
3787 fold_convert (itype, fd->loops[i].step), t);
3788 t = fold_build2 (PLUS_EXPR, itype, t,
3789 fold_convert (itype, fd->loops[i].n2));
3790 t = fold_build2 (MINUS_EXPR, itype, t,
3791 fold_convert (itype, fd->loops[i].n1));
3792 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3793 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3794 fold_build1 (NEGATE_EXPR, itype, t),
3795 fold_build1 (NEGATE_EXPR, itype,
3796 fold_convert (itype,
3797 fd->loops[i].step)));
3798 else
3799 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3800 fold_convert (itype, fd->loops[i].step));
3801 t = fold_convert (type, t);
3802 if (TREE_CODE (t) == INTEGER_CST)
3803 counts[i] = t;
3804 else
3805 {
072f7ab1 3806 counts[i] = create_tmp_reg (type, ".count");
75a70cf9 3807 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3808 true, GSI_SAME_STMT);
3809 stmt = gimple_build_assign (counts[i], t);
3810 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
fd6481cf 3811 }
3812 if (SSA_VAR_P (fd->loop.n2))
3813 {
3814 if (i == 0)
75a70cf9 3815 t = counts[0];
fd6481cf 3816 else
3817 {
3818 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
75a70cf9 3819 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3820 true, GSI_SAME_STMT);
fd6481cf 3821 }
75a70cf9 3822 stmt = gimple_build_assign (fd->loop.n2, t);
3823 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
fd6481cf 3824 }
3825 }
3826 }
79acaae1 3827 if (in_combined_parallel)
3828 {
3829 /* In a combined parallel loop, emit a call to
3830 GOMP_loop_foo_next. */
b9a16870 3831 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
79acaae1 3832 build_fold_addr_expr (istart0),
3833 build_fold_addr_expr (iend0));
3834 }
3835 else
1e8e9920 3836 {
c2f47e15 3837 tree t0, t1, t2, t3, t4;
773c5ba7 3838 /* If this is not a combined parallel loop, emit a call to
3839 GOMP_loop_foo_start in ENTRY_BB. */
c2f47e15 3840 t4 = build_fold_addr_expr (iend0);
3841 t3 = build_fold_addr_expr (istart0);
fd6481cf 3842 t2 = fold_convert (fd->iter_type, fd->loop.step);
c799f233 3843 if (POINTER_TYPE_P (type)
3844 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3845 {
3846 /* Avoid casting pointers to integer of a different size. */
3cea8318 3847 tree itype = signed_type_for (type);
c799f233 3848 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3849 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3850 }
3851 else
3852 {
3853 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3854 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3855 }
fd6481cf 3856 if (bias)
1e8e9920 3857 {
fd6481cf 3858 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3859 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3860 }
3861 if (fd->iter_type == long_integer_type_node)
3862 {
3863 if (fd->chunk_size)
3864 {
3865 t = fold_convert (fd->iter_type, fd->chunk_size);
b9a16870 3866 t = build_call_expr (builtin_decl_explicit (start_fn),
3867 6, t0, t1, t2, t, t3, t4);
fd6481cf 3868 }
3869 else
b9a16870 3870 t = build_call_expr (builtin_decl_explicit (start_fn),
3871 5, t0, t1, t2, t3, t4);
1e8e9920 3872 }
c2f47e15 3873 else
fd6481cf 3874 {
3875 tree t5;
3876 tree c_bool_type;
b9a16870 3877 tree bfn_decl;
fd6481cf 3878
3879 /* The GOMP_loop_ull_*start functions have additional boolean
3880 argument, true for < loops and false for > loops.
3881 In Fortran, the C bool type can be different from
3882 boolean_type_node. */
b9a16870 3883 bfn_decl = builtin_decl_explicit (start_fn);
3884 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
fd6481cf 3885 t5 = build_int_cst (c_bool_type,
3886 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3887 if (fd->chunk_size)
3888 {
b9a16870 3889 tree bfn_decl = builtin_decl_explicit (start_fn);
fd6481cf 3890 t = fold_convert (fd->iter_type, fd->chunk_size);
b9a16870 3891 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
fd6481cf 3892 }
3893 else
b9a16870 3894 t = build_call_expr (builtin_decl_explicit (start_fn),
3895 6, t5, t0, t1, t2, t3, t4);
fd6481cf 3896 }
1e8e9920 3897 }
fd6481cf 3898 if (TREE_TYPE (t) != boolean_type_node)
3899 t = fold_build2 (NE_EXPR, boolean_type_node,
3900 t, build_int_cst (TREE_TYPE (t), 0));
75a70cf9 3901 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3902 true, GSI_SAME_STMT);
3903 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
79acaae1 3904
75a70cf9 3905 /* Remove the GIMPLE_OMP_FOR statement. */
3906 gsi_remove (&gsi, true);
1e8e9920 3907
773c5ba7 3908 /* Iteration setup for sequential loop goes in L0_BB. */
75a70cf9 3909 gsi = gsi_start_bb (l0_bb);
1efcacec 3910 t = istart0;
fd6481cf 3911 if (bias)
1efcacec 3912 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3913 if (POINTER_TYPE_P (type))
3cea8318 3914 t = fold_convert (signed_type_for (type), t);
1efcacec 3915 t = fold_convert (type, t);
75a70cf9 3916 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3917 false, GSI_CONTINUE_LINKING);
3918 stmt = gimple_build_assign (fd->loop.v, t);
3919 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1e8e9920 3920
1efcacec 3921 t = iend0;
fd6481cf 3922 if (bias)
1efcacec 3923 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3924 if (POINTER_TYPE_P (type))
3cea8318 3925 t = fold_convert (signed_type_for (type), t);
1efcacec 3926 t = fold_convert (type, t);
75a70cf9 3927 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3928 false, GSI_CONTINUE_LINKING);
fd6481cf 3929 if (fd->collapse > 1)
3930 {
072f7ab1 3931 tree tem = create_tmp_reg (type, ".tem");
75a70cf9 3932 stmt = gimple_build_assign (tem, fd->loop.v);
3933 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3934 for (i = fd->collapse - 1; i >= 0; i--)
3935 {
3936 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3937 itype = vtype;
3938 if (POINTER_TYPE_P (vtype))
3cea8318 3939 itype = signed_type_for (vtype);
fd6481cf 3940 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3941 t = fold_convert (itype, t);
c821ef7d 3942 t = fold_build2 (MULT_EXPR, itype, t,
3943 fold_convert (itype, fd->loops[i].step));
fd6481cf 3944 if (POINTER_TYPE_P (vtype))
2cc66f2a 3945 t = fold_build_pointer_plus (fd->loops[i].n1, t);
fd6481cf 3946 else
3947 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
75a70cf9 3948 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3949 false, GSI_CONTINUE_LINKING);
3950 stmt = gimple_build_assign (fd->loops[i].v, t);
3951 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3952 if (i != 0)
3953 {
3954 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
75a70cf9 3955 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3956 false, GSI_CONTINUE_LINKING);
3957 stmt = gimple_build_assign (tem, t);
3958 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3959 }
3960 }
3961 }
773c5ba7 3962
ac6e3339 3963 if (!broken_loop)
03ed154b 3964 {
ac6e3339 3965 /* Code to control the increment and predicate for the sequential
3966 loop goes in the CONT_BB. */
75a70cf9 3967 gsi = gsi_last_bb (cont_bb);
3968 stmt = gsi_stmt (gsi);
3969 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3970 vmain = gimple_omp_continue_control_use (stmt);
3971 vback = gimple_omp_continue_control_def (stmt);
79acaae1 3972
fd6481cf 3973 if (POINTER_TYPE_P (type))
2cc66f2a 3974 t = fold_build_pointer_plus (vmain, fd->loop.step);
fd6481cf 3975 else
3976 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
75a70cf9 3977 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3978 true, GSI_SAME_STMT);
3979 stmt = gimple_build_assign (vback, t);
3980 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3981
fd6481cf 3982 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
75a70cf9 3983 stmt = gimple_build_cond_empty (t);
3984 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
773c5ba7 3985
75a70cf9 3986 /* Remove GIMPLE_OMP_CONTINUE. */
3987 gsi_remove (&gsi, true);
773c5ba7 3988
fd6481cf 3989 if (fd->collapse > 1)
3990 {
3991 basic_block last_bb, bb;
3992
3993 last_bb = cont_bb;
3994 for (i = fd->collapse - 1; i >= 0; i--)
3995 {
3996 tree vtype = TREE_TYPE (fd->loops[i].v);
3997
3998 bb = create_empty_bb (last_bb);
75a70cf9 3999 gsi = gsi_start_bb (bb);
fd6481cf 4000
4001 if (i < fd->collapse - 1)
4002 {
4003 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
4004 e->probability = REG_BR_PROB_BASE / 8;
4005
75a70cf9 4006 t = fd->loops[i + 1].n1;
4007 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4008 false, GSI_CONTINUE_LINKING);
4009 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4010 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 4011 }
4012 else
4013 collapse_bb = bb;
4014
4015 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4016
4017 if (POINTER_TYPE_P (vtype))
2cc66f2a 4018 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
fd6481cf 4019 else
4020 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
4021 fd->loops[i].step);
75a70cf9 4022 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4023 false, GSI_CONTINUE_LINKING);
4024 stmt = gimple_build_assign (fd->loops[i].v, t);
4025 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 4026
4027 if (i > 0)
4028 {
75a70cf9 4029 t = fd->loops[i].n2;
4030 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4031 false, GSI_CONTINUE_LINKING);
fd6481cf 4032 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
75a70cf9 4033 fd->loops[i].v, t);
4034 stmt = gimple_build_cond_empty (t);
4035 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 4036 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4037 e->probability = REG_BR_PROB_BASE * 7 / 8;
4038 }
4039 else
4040 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4041 last_bb = bb;
4042 }
4043 }
4044
ac6e3339 4045 /* Emit code to get the next parallel iteration in L2_BB. */
75a70cf9 4046 gsi = gsi_start_bb (l2_bb);
773c5ba7 4047
b9a16870 4048 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
ac6e3339 4049 build_fold_addr_expr (istart0),
4050 build_fold_addr_expr (iend0));
75a70cf9 4051 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4052 false, GSI_CONTINUE_LINKING);
fd6481cf 4053 if (TREE_TYPE (t) != boolean_type_node)
4054 t = fold_build2 (NE_EXPR, boolean_type_node,
4055 t, build_int_cst (TREE_TYPE (t), 0));
75a70cf9 4056 stmt = gimple_build_cond_empty (t);
4057 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
ac6e3339 4058 }
1e8e9920 4059
61e47ac8 4060 /* Add the loop cleanup function. */
75a70cf9 4061 gsi = gsi_last_bb (exit_bb);
4062 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
b9a16870 4063 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
61e47ac8 4064 else
b9a16870 4065 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
75a70cf9 4066 stmt = gimple_build_call (t, 0);
4067 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4068 gsi_remove (&gsi, true);
773c5ba7 4069
4070 /* Connect the new blocks. */
79acaae1 4071 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4072 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
1e8e9920 4073
ac6e3339 4074 if (!broken_loop)
4075 {
75a70cf9 4076 gimple_seq phis;
4077
79acaae1 4078 e = find_edge (cont_bb, l3_bb);
4079 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4080
75a70cf9 4081 phis = phi_nodes (l3_bb);
4082 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4083 {
4084 gimple phi = gsi_stmt (gsi);
4085 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4086 PHI_ARG_DEF_FROM_EDGE (phi, e));
4087 }
79acaae1 4088 remove_edge (e);
4089
ac6e3339 4090 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
fd6481cf 4091 if (fd->collapse > 1)
4092 {
4093 e = find_edge (cont_bb, l1_bb);
4094 remove_edge (e);
4095 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4096 }
4097 else
4098 {
4099 e = find_edge (cont_bb, l1_bb);
4100 e->flags = EDGE_TRUE_VALUE;
4101 }
4102 e->probability = REG_BR_PROB_BASE * 7 / 8;
4103 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
ac6e3339 4104 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
79acaae1 4105
4106 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4107 recompute_dominator (CDI_DOMINATORS, l2_bb));
4108 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4109 recompute_dominator (CDI_DOMINATORS, l3_bb));
4110 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4111 recompute_dominator (CDI_DOMINATORS, l0_bb));
4112 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4113 recompute_dominator (CDI_DOMINATORS, l1_bb));
ac6e3339 4114 }
1e8e9920 4115}
4116
4117
773c5ba7 4118/* A subroutine of expand_omp_for. Generate code for a parallel
4119 loop with static schedule and no specified chunk size. Given
4120 parameters:
1e8e9920 4121
4122 for (V = N1; V cond N2; V += STEP) BODY;
4123
4124 where COND is "<" or ">", we generate pseudocode
4125
4126 if (cond is <)
4127 adj = STEP - 1;
4128 else
4129 adj = STEP + 1;
fd6481cf 4130 if ((__typeof (V)) -1 > 0 && cond is >)
4131 n = -(adj + N2 - N1) / -STEP;
4132 else
4133 n = (adj + N2 - N1) / STEP;
1e8e9920 4134 q = n / nthreads;
31712e83 4135 tt = n % nthreads;
4136 if (threadid < tt) goto L3; else goto L4;
4137 L3:
4138 tt = 0;
4139 q = q + 1;
4140 L4:
4141 s0 = q * threadid + tt;
4142 e0 = s0 + q;
79acaae1 4143 V = s0 * STEP + N1;
1e8e9920 4144 if (s0 >= e0) goto L2; else goto L0;
4145 L0:
1e8e9920 4146 e = e0 * STEP + N1;
4147 L1:
4148 BODY;
4149 V += STEP;
4150 if (V cond e) goto L1;
1e8e9920 4151 L2:
4152*/
4153
61e47ac8 4154static void
773c5ba7 4155expand_omp_for_static_nochunk (struct omp_region *region,
4156 struct omp_for_data *fd)
1e8e9920 4157{
31712e83 4158 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
fd6481cf 4159 tree type, itype, vmain, vback;
31712e83 4160 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4161 basic_block body_bb, cont_bb;
61e47ac8 4162 basic_block fin_bb;
75a70cf9 4163 gimple_stmt_iterator gsi;
4164 gimple stmt;
31712e83 4165 edge ep;
1e8e9920 4166
fd6481cf 4167 itype = type = TREE_TYPE (fd->loop.v);
4168 if (POINTER_TYPE_P (type))
3cea8318 4169 itype = signed_type_for (type);
1e8e9920 4170
61e47ac8 4171 entry_bb = region->entry;
61e47ac8 4172 cont_bb = region->cont;
ac6e3339 4173 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4174 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4175 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4176 body_bb = single_succ (seq_start_bb);
4177 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4178 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4179 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
61e47ac8 4180 exit_bb = region->exit;
4181
773c5ba7 4182 /* Iteration space partitioning goes in ENTRY_BB. */
75a70cf9 4183 gsi = gsi_last_bb (entry_bb);
4184 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
61e47ac8 4185
b9a16870 4186 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
fd6481cf 4187 t = fold_convert (itype, t);
75a70cf9 4188 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4189 true, GSI_SAME_STMT);
48e1416a 4190
b9a16870 4191 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
fd6481cf 4192 t = fold_convert (itype, t);
75a70cf9 4193 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4194 true, GSI_SAME_STMT);
1e8e9920 4195
fd6481cf 4196 fd->loop.n1
75a70cf9 4197 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4198 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4199 fd->loop.n2
75a70cf9 4200 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4201 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4202 fd->loop.step
75a70cf9 4203 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4204 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4205
4206 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4207 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4208 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4209 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4210 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4211 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4212 fold_build1 (NEGATE_EXPR, itype, t),
4213 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4214 else
4215 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4216 t = fold_convert (itype, t);
75a70cf9 4217 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 4218
072f7ab1 4219 q = create_tmp_reg (itype, "q");
fd6481cf 4220 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
31712e83 4221 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4222 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4223
072f7ab1 4224 tt = create_tmp_reg (itype, "tt");
31712e83 4225 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4226 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4227 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
1e8e9920 4228
31712e83 4229 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4230 stmt = gimple_build_cond_empty (t);
4231 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4232
4233 second_bb = split_block (entry_bb, stmt)->dest;
4234 gsi = gsi_last_bb (second_bb);
4235 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4236
4237 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4238 GSI_SAME_STMT);
4239 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4240 build_int_cst (itype, 1));
4241 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4242
4243 third_bb = split_block (second_bb, stmt)->dest;
4244 gsi = gsi_last_bb (third_bb);
4245 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
1e8e9920 4246
fd6481cf 4247 t = build2 (MULT_EXPR, itype, q, threadid);
31712e83 4248 t = build2 (PLUS_EXPR, itype, t, tt);
75a70cf9 4249 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 4250
fd6481cf 4251 t = fold_build2 (PLUS_EXPR, itype, s0, q);
75a70cf9 4252 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
79acaae1 4253
1e8e9920 4254 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
75a70cf9 4255 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
773c5ba7 4256
75a70cf9 4257 /* Remove the GIMPLE_OMP_FOR statement. */
4258 gsi_remove (&gsi, true);
773c5ba7 4259
4260 /* Setup code for sequential iteration goes in SEQ_START_BB. */
75a70cf9 4261 gsi = gsi_start_bb (seq_start_bb);
1e8e9920 4262
fd6481cf 4263 t = fold_convert (itype, s0);
4264 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4265 if (POINTER_TYPE_P (type))
2cc66f2a 4266 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4267 else
4268 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4269 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4270 false, GSI_CONTINUE_LINKING);
4271 stmt = gimple_build_assign (fd->loop.v, t);
4272 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
48e1416a 4273
fd6481cf 4274 t = fold_convert (itype, e0);
4275 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4276 if (POINTER_TYPE_P (type))
2cc66f2a 4277 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4278 else
4279 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4280 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4281 false, GSI_CONTINUE_LINKING);
1e8e9920 4282
75a70cf9 4283 /* The code controlling the sequential loop replaces the
4284 GIMPLE_OMP_CONTINUE. */
4285 gsi = gsi_last_bb (cont_bb);
4286 stmt = gsi_stmt (gsi);
4287 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4288 vmain = gimple_omp_continue_control_use (stmt);
4289 vback = gimple_omp_continue_control_def (stmt);
79acaae1 4290
fd6481cf 4291 if (POINTER_TYPE_P (type))
2cc66f2a 4292 t = fold_build_pointer_plus (vmain, fd->loop.step);
fd6481cf 4293 else
4294 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
75a70cf9 4295 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4296 true, GSI_SAME_STMT);
4297 stmt = gimple_build_assign (vback, t);
4298 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
79acaae1 4299
fd6481cf 4300 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
75a70cf9 4301 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
1e8e9920 4302
75a70cf9 4303 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4304 gsi_remove (&gsi, true);
773c5ba7 4305
75a70cf9 4306 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4307 gsi = gsi_last_bb (exit_bb);
4308 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4309 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4310 false, GSI_SAME_STMT);
4311 gsi_remove (&gsi, true);
773c5ba7 4312
4313 /* Connect all the blocks. */
31712e83 4314 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4315 ep->probability = REG_BR_PROB_BASE / 4 * 3;
4316 ep = find_edge (entry_bb, second_bb);
4317 ep->flags = EDGE_TRUE_VALUE;
4318 ep->probability = REG_BR_PROB_BASE / 4;
4319 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4320 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
79acaae1 4321
ac6e3339 4322 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
61e47ac8 4323 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
48e1416a 4324
31712e83 4325 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4326 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4327 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
79acaae1 4328 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4329 recompute_dominator (CDI_DOMINATORS, body_bb));
4330 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4331 recompute_dominator (CDI_DOMINATORS, fin_bb));
1e8e9920 4332}
4333
773c5ba7 4334
4335/* A subroutine of expand_omp_for. Generate code for a parallel
4336 loop with static schedule and a specified chunk size. Given
4337 parameters:
1e8e9920 4338
4339 for (V = N1; V cond N2; V += STEP) BODY;
4340
4341 where COND is "<" or ">", we generate pseudocode
4342
4343 if (cond is <)
4344 adj = STEP - 1;
4345 else
4346 adj = STEP + 1;
fd6481cf 4347 if ((__typeof (V)) -1 > 0 && cond is >)
4348 n = -(adj + N2 - N1) / -STEP;
4349 else
4350 n = (adj + N2 - N1) / STEP;
1e8e9920 4351 trip = 0;
79acaae1 4352 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4353 here so that V is defined
4354 if the loop is not entered
1e8e9920 4355 L0:
4356 s0 = (trip * nthreads + threadid) * CHUNK;
4357 e0 = min(s0 + CHUNK, n);
4358 if (s0 < n) goto L1; else goto L4;
4359 L1:
4360 V = s0 * STEP + N1;
4361 e = e0 * STEP + N1;
4362 L2:
4363 BODY;
4364 V += STEP;
4365 if (V cond e) goto L2; else goto L3;
4366 L3:
4367 trip += 1;
4368 goto L0;
4369 L4:
1e8e9920 4370*/
4371
61e47ac8 4372static void
75a70cf9 4373expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
1e8e9920 4374{
75a70cf9 4375 tree n, s0, e0, e, t;
79acaae1 4376 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
75a70cf9 4377 tree type, itype, v_main, v_back, v_extra;
773c5ba7 4378 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
61e47ac8 4379 basic_block trip_update_bb, cont_bb, fin_bb;
75a70cf9 4380 gimple_stmt_iterator si;
4381 gimple stmt;
4382 edge se;
1e8e9920 4383
fd6481cf 4384 itype = type = TREE_TYPE (fd->loop.v);
4385 if (POINTER_TYPE_P (type))
3cea8318 4386 itype = signed_type_for (type);
1e8e9920 4387
61e47ac8 4388 entry_bb = region->entry;
ac6e3339 4389 se = split_block (entry_bb, last_stmt (entry_bb));
4390 entry_bb = se->src;
4391 iter_part_bb = se->dest;
61e47ac8 4392 cont_bb = region->cont;
ac6e3339 4393 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4394 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4395 == FALLTHRU_EDGE (cont_bb)->dest);
4396 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4397 body_bb = single_succ (seq_start_bb);
4398 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4399 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4400 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4401 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
61e47ac8 4402 exit_bb = region->exit;
773c5ba7 4403
773c5ba7 4404 /* Trip and adjustment setup goes in ENTRY_BB. */
75a70cf9 4405 si = gsi_last_bb (entry_bb);
4406 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
773c5ba7 4407
b9a16870 4408 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
fd6481cf 4409 t = fold_convert (itype, t);
75a70cf9 4410 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4411 true, GSI_SAME_STMT);
48e1416a 4412
b9a16870 4413 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
fd6481cf 4414 t = fold_convert (itype, t);
75a70cf9 4415 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4416 true, GSI_SAME_STMT);
79acaae1 4417
fd6481cf 4418 fd->loop.n1
75a70cf9 4419 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4420 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4421 fd->loop.n2
75a70cf9 4422 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4423 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4424 fd->loop.step
75a70cf9 4425 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4426 true, NULL_TREE, true, GSI_SAME_STMT);
79acaae1 4427 fd->chunk_size
75a70cf9 4428 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4429 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4430
4431 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4432 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4433 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4434 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4435 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4436 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4437 fold_build1 (NEGATE_EXPR, itype, t),
4438 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4439 else
4440 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4441 t = fold_convert (itype, t);
75a70cf9 4442 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4443 true, GSI_SAME_STMT);
79acaae1 4444
083152fb 4445 trip_var = create_tmp_reg (itype, ".trip");
79acaae1 4446 if (gimple_in_ssa_p (cfun))
4447 {
75a70cf9 4448 trip_init = make_ssa_name (trip_var, NULL);
4449 trip_main = make_ssa_name (trip_var, NULL);
4450 trip_back = make_ssa_name (trip_var, NULL);
79acaae1 4451 }
1e8e9920 4452 else
79acaae1 4453 {
4454 trip_init = trip_var;
4455 trip_main = trip_var;
4456 trip_back = trip_var;
4457 }
1e8e9920 4458
75a70cf9 4459 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4460 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
773c5ba7 4461
fd6481cf 4462 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4463 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4464 if (POINTER_TYPE_P (type))
2cc66f2a 4465 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4466 else
4467 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4468 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4469 true, GSI_SAME_STMT);
79acaae1 4470
75a70cf9 4471 /* Remove the GIMPLE_OMP_FOR. */
4472 gsi_remove (&si, true);
773c5ba7 4473
4474 /* Iteration space partitioning goes in ITER_PART_BB. */
75a70cf9 4475 si = gsi_last_bb (iter_part_bb);
1e8e9920 4476
fd6481cf 4477 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4478 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4479 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
75a70cf9 4480 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4481 false, GSI_CONTINUE_LINKING);
1e8e9920 4482
fd6481cf 4483 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4484 t = fold_build2 (MIN_EXPR, itype, t, n);
75a70cf9 4485 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4486 false, GSI_CONTINUE_LINKING);
1e8e9920 4487
4488 t = build2 (LT_EXPR, boolean_type_node, s0, n);
75a70cf9 4489 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
773c5ba7 4490
4491 /* Setup code for sequential iteration goes in SEQ_START_BB. */
75a70cf9 4492 si = gsi_start_bb (seq_start_bb);
1e8e9920 4493
fd6481cf 4494 t = fold_convert (itype, s0);
4495 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4496 if (POINTER_TYPE_P (type))
2cc66f2a 4497 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4498 else
4499 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4500 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4501 false, GSI_CONTINUE_LINKING);
4502 stmt = gimple_build_assign (fd->loop.v, t);
4503 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
1e8e9920 4504
fd6481cf 4505 t = fold_convert (itype, e0);
4506 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4507 if (POINTER_TYPE_P (type))
2cc66f2a 4508 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4509 else
4510 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4511 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4512 false, GSI_CONTINUE_LINKING);
1e8e9920 4513
61e47ac8 4514 /* The code controlling the sequential loop goes in CONT_BB,
75a70cf9 4515 replacing the GIMPLE_OMP_CONTINUE. */
4516 si = gsi_last_bb (cont_bb);
4517 stmt = gsi_stmt (si);
4518 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4519 v_main = gimple_omp_continue_control_use (stmt);
4520 v_back = gimple_omp_continue_control_def (stmt);
79acaae1 4521
fd6481cf 4522 if (POINTER_TYPE_P (type))
2cc66f2a 4523 t = fold_build_pointer_plus (v_main, fd->loop.step);
fd6481cf 4524 else
75a70cf9 4525 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4526 stmt = gimple_build_assign (v_back, t);
4527 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
79acaae1 4528
fd6481cf 4529 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
75a70cf9 4530 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
48e1416a 4531
75a70cf9 4532 /* Remove GIMPLE_OMP_CONTINUE. */
4533 gsi_remove (&si, true);
773c5ba7 4534
4535 /* Trip update code goes into TRIP_UPDATE_BB. */
75a70cf9 4536 si = gsi_start_bb (trip_update_bb);
1e8e9920 4537
fd6481cf 4538 t = build_int_cst (itype, 1);
4539 t = build2 (PLUS_EXPR, itype, trip_main, t);
75a70cf9 4540 stmt = gimple_build_assign (trip_back, t);
4541 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
1e8e9920 4542
75a70cf9 4543 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4544 si = gsi_last_bb (exit_bb);
4545 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4546 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4547 false, GSI_SAME_STMT);
4548 gsi_remove (&si, true);
1e8e9920 4549
773c5ba7 4550 /* Connect the new blocks. */
ac6e3339 4551 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4552 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
79acaae1 4553
ac6e3339 4554 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4555 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
79acaae1 4556
ac6e3339 4557 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
79acaae1 4558
4559 if (gimple_in_ssa_p (cfun))
4560 {
75a70cf9 4561 gimple_stmt_iterator psi;
4562 gimple phi;
4563 edge re, ene;
4564 edge_var_map_vector head;
4565 edge_var_map *vm;
4566 size_t i;
4567
79acaae1 4568 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4569 remove arguments of the phi nodes in fin_bb. We need to create
4570 appropriate phi nodes in iter_part_bb instead. */
4571 se = single_pred_edge (fin_bb);
4572 re = single_succ_edge (trip_update_bb);
75a70cf9 4573 head = redirect_edge_var_map_vector (re);
79acaae1 4574 ene = single_succ_edge (entry_bb);
4575
75a70cf9 4576 psi = gsi_start_phis (fin_bb);
4577 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4578 gsi_next (&psi), ++i)
79acaae1 4579 {
75a70cf9 4580 gimple nphi;
efbcb6de 4581 source_location locus;
75a70cf9 4582
4583 phi = gsi_stmt (psi);
4584 t = gimple_phi_result (phi);
4585 gcc_assert (t == redirect_edge_var_map_result (vm));
79acaae1 4586 nphi = create_phi_node (t, iter_part_bb);
79acaae1 4587
4588 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
efbcb6de 4589 locus = gimple_phi_arg_location_from_edge (phi, se);
4590
fd6481cf 4591 /* A special case -- fd->loop.v is not yet computed in
4592 iter_part_bb, we need to use v_extra instead. */
4593 if (t == fd->loop.v)
79acaae1 4594 t = v_extra;
60d535d2 4595 add_phi_arg (nphi, t, ene, locus);
efbcb6de 4596 locus = redirect_edge_var_map_location (vm);
60d535d2 4597 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
75a70cf9 4598 }
4599 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4600 redirect_edge_var_map_clear (re);
4601 while (1)
4602 {
4603 psi = gsi_start_phis (fin_bb);
4604 if (gsi_end_p (psi))
4605 break;
4606 remove_phi_node (&psi, false);
79acaae1 4607 }
79acaae1 4608
4609 /* Make phi node for trip. */
4610 phi = create_phi_node (trip_main, iter_part_bb);
efbcb6de 4611 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
60d535d2 4612 UNKNOWN_LOCATION);
efbcb6de 4613 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
60d535d2 4614 UNKNOWN_LOCATION);
79acaae1 4615 }
4616
4617 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4618 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4619 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4620 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4621 recompute_dominator (CDI_DOMINATORS, fin_bb));
4622 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4623 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4624 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4625 recompute_dominator (CDI_DOMINATORS, body_bb));
1e8e9920 4626}
4627
1e8e9920 4628
773c5ba7 4629/* Expand the OpenMP loop defined by REGION. */
1e8e9920 4630
773c5ba7 4631static void
4632expand_omp_for (struct omp_region *region)
4633{
4634 struct omp_for_data fd;
fd6481cf 4635 struct omp_for_data_loop *loops;
1e8e9920 4636
fd6481cf 4637 loops
4638 = (struct omp_for_data_loop *)
75a70cf9 4639 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
fd6481cf 4640 * sizeof (struct omp_for_data_loop));
fd6481cf 4641 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
f77459c5 4642 region->sched_kind = fd.sched_kind;
1e8e9920 4643
b3a3ddec 4644 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4645 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4646 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4647 if (region->cont)
4648 {
4649 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4650 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4651 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4652 }
4653
03ed154b 4654 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4655 && !fd.have_ordered
fd6481cf 4656 && fd.collapse == 1
ac6e3339 4657 && region->cont != NULL)
1e8e9920 4658 {
4659 if (fd.chunk_size == NULL)
61e47ac8 4660 expand_omp_for_static_nochunk (region, &fd);
1e8e9920 4661 else
61e47ac8 4662 expand_omp_for_static_chunk (region, &fd);
1e8e9920 4663 }
4664 else
4665 {
fd6481cf 4666 int fn_index, start_ix, next_ix;
4667
0416ca72 4668 if (fd.chunk_size == NULL
4669 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
4670 fd.chunk_size = integer_zero_node;
fd6481cf 4671 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4672 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
75a70cf9 4673 ? 3 : fd.sched_kind;
fd6481cf 4674 fn_index += fd.have_ordered * 4;
b9a16870 4675 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
4676 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
fd6481cf 4677 if (fd.iter_type == long_long_unsigned_type_node)
4678 {
b9a16870 4679 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4680 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
4681 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4682 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
fd6481cf 4683 }
b9c74b4d 4684 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4685 (enum built_in_function) next_ix);
1e8e9920 4686 }
28c92cbb 4687
083152fb 4688 if (gimple_in_ssa_p (cfun))
4689 update_ssa (TODO_update_ssa_only_virtuals);
1e8e9920 4690}
4691
1e8e9920 4692
4693/* Expand code for an OpenMP sections directive. In pseudo code, we generate
4694
1e8e9920 4695 v = GOMP_sections_start (n);
4696 L0:
4697 switch (v)
4698 {
4699 case 0:
4700 goto L2;
4701 case 1:
4702 section 1;
4703 goto L1;
4704 case 2:
4705 ...
4706 case n:
4707 ...
1e8e9920 4708 default:
4709 abort ();
4710 }
4711 L1:
4712 v = GOMP_sections_next ();
4713 goto L0;
4714 L2:
4715 reduction;
4716
773c5ba7 4717 If this is a combined parallel sections, replace the call to
79acaae1 4718 GOMP_sections_start with call to GOMP_sections_next. */
1e8e9920 4719
4720static void
773c5ba7 4721expand_omp_sections (struct omp_region *region)
1e8e9920 4722{
f018d957 4723 tree t, u, vin = NULL, vmain, vnext, l2;
75a70cf9 4724 VEC (tree,heap) *label_vec;
4725 unsigned len;
ac6e3339 4726 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
75a70cf9 4727 gimple_stmt_iterator si, switch_si;
4728 gimple sections_stmt, stmt, cont;
9884aaf8 4729 edge_iterator ei;
4730 edge e;
61e47ac8 4731 struct omp_region *inner;
75a70cf9 4732 unsigned i, casei;
ac6e3339 4733 bool exit_reachable = region->cont != NULL;
1e8e9920 4734
d244d9de 4735 gcc_assert (region->exit != NULL);
61e47ac8 4736 entry_bb = region->entry;
ac6e3339 4737 l0_bb = single_succ (entry_bb);
61e47ac8 4738 l1_bb = region->cont;
ac6e3339 4739 l2_bb = region->exit;
d244d9de 4740 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4741 l2 = gimple_block_label (l2_bb);
4742 else
03ed154b 4743 {
d244d9de 4744 /* This can happen if there are reductions. */
4745 len = EDGE_COUNT (l0_bb->succs);
4746 gcc_assert (len > 0);
4747 e = EDGE_SUCC (l0_bb, len - 1);
4748 si = gsi_last_bb (e->dest);
4749 l2 = NULL_TREE;
4750 if (gsi_end_p (si)
4751 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4752 l2 = gimple_block_label (e->dest);
9884aaf8 4753 else
d244d9de 4754 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4755 {
4756 si = gsi_last_bb (e->dest);
4757 if (gsi_end_p (si)
4758 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
9884aaf8 4759 {
d244d9de 4760 l2 = gimple_block_label (e->dest);
4761 break;
9884aaf8 4762 }
d244d9de 4763 }
03ed154b 4764 }
d244d9de 4765 if (exit_reachable)
4766 default_bb = create_empty_bb (l1_bb->prev_bb);
03ed154b 4767 else
d244d9de 4768 default_bb = create_empty_bb (l0_bb);
773c5ba7 4769
4770 /* We will build a switch() with enough cases for all the
75a70cf9 4771 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
773c5ba7 4772 and a default case to abort if something goes wrong. */
ac6e3339 4773 len = EDGE_COUNT (l0_bb->succs);
75a70cf9 4774
4775 /* Use VEC_quick_push on label_vec throughout, since we know the size
4776 in advance. */
4777 label_vec = VEC_alloc (tree, heap, len);
1e8e9920 4778
61e47ac8 4779 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
75a70cf9 4780 GIMPLE_OMP_SECTIONS statement. */
4781 si = gsi_last_bb (entry_bb);
4782 sections_stmt = gsi_stmt (si);
4783 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4784 vin = gimple_omp_sections_control (sections_stmt);
773c5ba7 4785 if (!is_combined_parallel (region))
1e8e9920 4786 {
773c5ba7 4787 /* If we are not inside a combined parallel+sections region,
4788 call GOMP_sections_start. */
ac6e3339 4789 t = build_int_cst (unsigned_type_node,
4790 exit_reachable ? len - 1 : len);
b9a16870 4791 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
75a70cf9 4792 stmt = gimple_build_call (u, 1, t);
1e8e9920 4793 }
79acaae1 4794 else
4795 {
4796 /* Otherwise, call GOMP_sections_next. */
b9a16870 4797 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
75a70cf9 4798 stmt = gimple_build_call (u, 0);
79acaae1 4799 }
75a70cf9 4800 gimple_call_set_lhs (stmt, vin);
4801 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4802 gsi_remove (&si, true);
4803
4804 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4805 L0_BB. */
4806 switch_si = gsi_last_bb (l0_bb);
4807 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
79acaae1 4808 if (exit_reachable)
4809 {
4810 cont = last_stmt (l1_bb);
75a70cf9 4811 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4812 vmain = gimple_omp_continue_control_use (cont);
4813 vnext = gimple_omp_continue_control_def (cont);
79acaae1 4814 }
4815 else
4816 {
4817 vmain = vin;
4818 vnext = NULL_TREE;
4819 }
1e8e9920 4820
d244d9de 4821 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
4822 VEC_quick_push (tree, label_vec, t);
4823 i = 1;
03ed154b 4824
75a70cf9 4825 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
ac6e3339 4826 for (inner = region->inner, casei = 1;
4827 inner;
4828 inner = inner->next, i++, casei++)
1e8e9920 4829 {
773c5ba7 4830 basic_block s_entry_bb, s_exit_bb;
4831
9884aaf8 4832 /* Skip optional reduction region. */
75a70cf9 4833 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
9884aaf8 4834 {
4835 --i;
4836 --casei;
4837 continue;
4838 }
4839
61e47ac8 4840 s_entry_bb = inner->entry;
4841 s_exit_bb = inner->exit;
1e8e9920 4842
75a70cf9 4843 t = gimple_block_label (s_entry_bb);
ac6e3339 4844 u = build_int_cst (unsigned_type_node, casei);
b6e3dd65 4845 u = build_case_label (u, NULL, t);
75a70cf9 4846 VEC_quick_push (tree, label_vec, u);
61e47ac8 4847
75a70cf9 4848 si = gsi_last_bb (s_entry_bb);
4849 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4850 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4851 gsi_remove (&si, true);
61e47ac8 4852 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
03ed154b 4853
4854 if (s_exit_bb == NULL)
4855 continue;
4856
75a70cf9 4857 si = gsi_last_bb (s_exit_bb);
4858 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4859 gsi_remove (&si, true);
03ed154b 4860
773c5ba7 4861 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
1e8e9920 4862 }
4863
773c5ba7 4864 /* Error handling code goes in DEFAULT_BB. */
75a70cf9 4865 t = gimple_block_label (default_bb);
b6e3dd65 4866 u = build_case_label (NULL, NULL, t);
61e47ac8 4867 make_edge (l0_bb, default_bb, 0);
1e8e9920 4868
49a70175 4869 stmt = gimple_build_switch (vmain, u, label_vec);
75a70cf9 4870 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4871 gsi_remove (&switch_si, true);
4872 VEC_free (tree, heap, label_vec);
4873
4874 si = gsi_start_bb (default_bb);
b9a16870 4875 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
75a70cf9 4876 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
773c5ba7 4877
ac6e3339 4878 if (exit_reachable)
03ed154b 4879 {
b9a16870 4880 tree bfn_decl;
4881
ac6e3339 4882 /* Code to get the next section goes in L1_BB. */
75a70cf9 4883 si = gsi_last_bb (l1_bb);
4884 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
1e8e9920 4885
b9a16870 4886 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4887 stmt = gimple_build_call (bfn_decl, 0);
75a70cf9 4888 gimple_call_set_lhs (stmt, vnext);
4889 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4890 gsi_remove (&si, true);
773c5ba7 4891
ac6e3339 4892 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
03ed154b 4893 }
773c5ba7 4894
d244d9de 4895 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4896 si = gsi_last_bb (l2_bb);
4897 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4898 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
4899 else
4900 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
4901 stmt = gimple_build_call (t, 0);
4902 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4903 gsi_remove (&si, true);
4904
79acaae1 4905 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
773c5ba7 4906}
1e8e9920 4907
1e8e9920 4908
61e47ac8 4909/* Expand code for an OpenMP single directive. We've already expanded
4910 much of the code, here we simply place the GOMP_barrier call. */
4911
4912static void
4913expand_omp_single (struct omp_region *region)
4914{
4915 basic_block entry_bb, exit_bb;
75a70cf9 4916 gimple_stmt_iterator si;
61e47ac8 4917 bool need_barrier = false;
4918
4919 entry_bb = region->entry;
4920 exit_bb = region->exit;
4921
75a70cf9 4922 si = gsi_last_bb (entry_bb);
61e47ac8 4923 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4924 be removed. We need to ensure that the thread that entered the single
4925 does not exit before the data is copied out by the other threads. */
75a70cf9 4926 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
61e47ac8 4927 OMP_CLAUSE_COPYPRIVATE))
4928 need_barrier = true;
75a70cf9 4929 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4930 gsi_remove (&si, true);
61e47ac8 4931 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4932
75a70cf9 4933 si = gsi_last_bb (exit_bb);
4934 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4935 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4936 false, GSI_SAME_STMT);
4937 gsi_remove (&si, true);
61e47ac8 4938 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4939}
4940
4941
4942/* Generic expansion for OpenMP synchronization directives: master,
4943 ordered and critical. All we need to do here is remove the entry
4944 and exit markers for REGION. */
773c5ba7 4945
4946static void
4947expand_omp_synch (struct omp_region *region)
4948{
4949 basic_block entry_bb, exit_bb;
75a70cf9 4950 gimple_stmt_iterator si;
773c5ba7 4951
61e47ac8 4952 entry_bb = region->entry;
4953 exit_bb = region->exit;
773c5ba7 4954
75a70cf9 4955 si = gsi_last_bb (entry_bb);
4956 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4957 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4958 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4959 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4960 gsi_remove (&si, true);
773c5ba7 4961 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4962
03ed154b 4963 if (exit_bb)
4964 {
75a70cf9 4965 si = gsi_last_bb (exit_bb);
4966 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4967 gsi_remove (&si, true);
03ed154b 4968 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4969 }
773c5ba7 4970}
1e8e9920 4971
2169f33b 4972/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4973 operation as a normal volatile load. */
4974
4975static bool
3ec11c49 4976expand_omp_atomic_load (basic_block load_bb, tree addr,
4977 tree loaded_val, int index)
2169f33b 4978{
3ec11c49 4979 enum built_in_function tmpbase;
4980 gimple_stmt_iterator gsi;
4981 basic_block store_bb;
4982 location_t loc;
4983 gimple stmt;
4984 tree decl, call, type, itype;
4985
4986 gsi = gsi_last_bb (load_bb);
4987 stmt = gsi_stmt (gsi);
4988 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
4989 loc = gimple_location (stmt);
4990
4991 /* ??? If the target does not implement atomic_load_optab[mode], and mode
4992 is smaller than word size, then expand_atomic_load assumes that the load
4993 is atomic. We could avoid the builtin entirely in this case. */
4994
4995 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
4996 decl = builtin_decl_explicit (tmpbase);
4997 if (decl == NULL_TREE)
4998 return false;
4999
5000 type = TREE_TYPE (loaded_val);
5001 itype = TREE_TYPE (TREE_TYPE (decl));
5002
5003 call = build_call_expr_loc (loc, decl, 2, addr,
5004 build_int_cst (NULL, MEMMODEL_RELAXED));
5005 if (!useless_type_conversion_p (type, itype))
5006 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5007 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5008
5009 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5010 gsi_remove (&gsi, true);
5011
5012 store_bb = single_succ (load_bb);
5013 gsi = gsi_last_bb (store_bb);
5014 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5015 gsi_remove (&gsi, true);
5016
5017 if (gimple_in_ssa_p (cfun))
5018 update_ssa (TODO_update_ssa_no_phi);
5019
5020 return true;
2169f33b 5021}
5022
5023/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5024 operation as a normal volatile store. */
5025
5026static bool
3ec11c49 5027expand_omp_atomic_store (basic_block load_bb, tree addr,
5028 tree loaded_val, tree stored_val, int index)
2169f33b 5029{
3ec11c49 5030 enum built_in_function tmpbase;
5031 gimple_stmt_iterator gsi;
5032 basic_block store_bb = single_succ (load_bb);
5033 location_t loc;
5034 gimple stmt;
5035 tree decl, call, type, itype;
5036 enum machine_mode imode;
5037 bool exchange;
5038
5039 gsi = gsi_last_bb (load_bb);
5040 stmt = gsi_stmt (gsi);
5041 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5042
5043 /* If the load value is needed, then this isn't a store but an exchange. */
5044 exchange = gimple_omp_atomic_need_value_p (stmt);
5045
5046 gsi = gsi_last_bb (store_bb);
5047 stmt = gsi_stmt (gsi);
5048 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
5049 loc = gimple_location (stmt);
5050
5051 /* ??? If the target does not implement atomic_store_optab[mode], and mode
5052 is smaller than word size, then expand_atomic_store assumes that the store
5053 is atomic. We could avoid the builtin entirely in this case. */
5054
5055 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
5056 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
5057 decl = builtin_decl_explicit (tmpbase);
5058 if (decl == NULL_TREE)
5059 return false;
5060
5061 type = TREE_TYPE (stored_val);
5062
5063 /* Dig out the type of the function's second argument. */
5064 itype = TREE_TYPE (decl);
5065 itype = TYPE_ARG_TYPES (itype);
5066 itype = TREE_CHAIN (itype);
5067 itype = TREE_VALUE (itype);
5068 imode = TYPE_MODE (itype);
5069
5070 if (exchange && !can_atomic_exchange_p (imode, true))
5071 return false;
5072
5073 if (!useless_type_conversion_p (itype, type))
5074 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
5075 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
5076 build_int_cst (NULL, MEMMODEL_RELAXED));
5077 if (exchange)
5078 {
5079 if (!useless_type_conversion_p (type, itype))
5080 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5081 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5082 }
5083
5084 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5085 gsi_remove (&gsi, true);
5086
5087 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
5088 gsi = gsi_last_bb (load_bb);
5089 gsi_remove (&gsi, true);
5090
5091 if (gimple_in_ssa_p (cfun))
5092 update_ssa (TODO_update_ssa_no_phi);
5093
5094 return true;
2169f33b 5095}
5096
cb7f680b 5097/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
1cd6e20d 5098 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
cb7f680b 5099 size of the data type, and thus usable to find the index of the builtin
5100 decl. Returns false if the expression is not of the proper form. */
5101
5102static bool
5103expand_omp_atomic_fetch_op (basic_block load_bb,
5104 tree addr, tree loaded_val,
5105 tree stored_val, int index)
5106{
b9a16870 5107 enum built_in_function oldbase, newbase, tmpbase;
cb7f680b 5108 tree decl, itype, call;
2169f33b 5109 tree lhs, rhs;
cb7f680b 5110 basic_block store_bb = single_succ (load_bb);
75a70cf9 5111 gimple_stmt_iterator gsi;
5112 gimple stmt;
389dd41b 5113 location_t loc;
1cd6e20d 5114 enum tree_code code;
2169f33b 5115 bool need_old, need_new;
1cd6e20d 5116 enum machine_mode imode;
cb7f680b 5117
5118 /* We expect to find the following sequences:
48e1416a 5119
cb7f680b 5120 load_bb:
75a70cf9 5121 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
cb7f680b 5122
5123 store_bb:
5124 val = tmp OP something; (or: something OP tmp)
48e1416a 5125 GIMPLE_OMP_STORE (val)
cb7f680b 5126
48e1416a 5127 ???FIXME: Allow a more flexible sequence.
cb7f680b 5128 Perhaps use data flow to pick the statements.
48e1416a 5129
cb7f680b 5130 */
5131
75a70cf9 5132 gsi = gsi_after_labels (store_bb);
5133 stmt = gsi_stmt (gsi);
389dd41b 5134 loc = gimple_location (stmt);
75a70cf9 5135 if (!is_gimple_assign (stmt))
cb7f680b 5136 return false;
75a70cf9 5137 gsi_next (&gsi);
5138 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
cb7f680b 5139 return false;
2169f33b 5140 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
5141 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
5142 gcc_checking_assert (!need_old || !need_new);
cb7f680b 5143
75a70cf9 5144 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
cb7f680b 5145 return false;
5146
cb7f680b 5147 /* Check for one of the supported fetch-op operations. */
1cd6e20d 5148 code = gimple_assign_rhs_code (stmt);
5149 switch (code)
cb7f680b 5150 {
5151 case PLUS_EXPR:
5152 case POINTER_PLUS_EXPR:
1cd6e20d 5153 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
5154 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
cb7f680b 5155 break;
5156 case MINUS_EXPR:
1cd6e20d 5157 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
5158 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
cb7f680b 5159 break;
5160 case BIT_AND_EXPR:
1cd6e20d 5161 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
5162 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
cb7f680b 5163 break;
5164 case BIT_IOR_EXPR:
1cd6e20d 5165 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
5166 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
cb7f680b 5167 break;
5168 case BIT_XOR_EXPR:
1cd6e20d 5169 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
5170 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
cb7f680b 5171 break;
5172 default:
5173 return false;
5174 }
1cd6e20d 5175
cb7f680b 5176 /* Make sure the expression is of the proper form. */
75a70cf9 5177 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5178 rhs = gimple_assign_rhs2 (stmt);
5179 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5180 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5181 rhs = gimple_assign_rhs1 (stmt);
cb7f680b 5182 else
5183 return false;
5184
b9a16870 5185 tmpbase = ((enum built_in_function)
5186 ((need_new ? newbase : oldbase) + index + 1));
5187 decl = builtin_decl_explicit (tmpbase);
0f94f46b 5188 if (decl == NULL_TREE)
5189 return false;
cb7f680b 5190 itype = TREE_TYPE (TREE_TYPE (decl));
1cd6e20d 5191 imode = TYPE_MODE (itype);
cb7f680b 5192
1cd6e20d 5193 /* We could test all of the various optabs involved, but the fact of the
5194 matter is that (with the exception of i486 vs i586 and xadd) all targets
5195 that support any atomic operaton optab also implements compare-and-swap.
5196 Let optabs.c take care of expanding any compare-and-swap loop. */
29139cdc 5197 if (!can_compare_and_swap_p (imode, true))
cb7f680b 5198 return false;
5199
75a70cf9 5200 gsi = gsi_last_bb (load_bb);
5201 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
1cd6e20d 5202
5203 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5204 It only requires that the operation happen atomically. Thus we can
5205 use the RELAXED memory model. */
5206 call = build_call_expr_loc (loc, decl, 3, addr,
5207 fold_convert_loc (loc, itype, rhs),
5208 build_int_cst (NULL, MEMMODEL_RELAXED));
5209
2169f33b 5210 if (need_old || need_new)
5211 {
5212 lhs = need_old ? loaded_val : stored_val;
5213 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
5214 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
5215 }
5216 else
5217 call = fold_convert_loc (loc, void_type_node, call);
75a70cf9 5218 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5219 gsi_remove (&gsi, true);
cb7f680b 5220
75a70cf9 5221 gsi = gsi_last_bb (store_bb);
5222 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5223 gsi_remove (&gsi, true);
5224 gsi = gsi_last_bb (store_bb);
5225 gsi_remove (&gsi, true);
cb7f680b 5226
5227 if (gimple_in_ssa_p (cfun))
5228 update_ssa (TODO_update_ssa_no_phi);
5229
5230 return true;
5231}
5232
5233/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5234
5235 oldval = *addr;
5236 repeat:
5237 newval = rhs; // with oldval replacing *addr in rhs
5238 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5239 if (oldval != newval)
5240 goto repeat;
5241
5242 INDEX is log2 of the size of the data type, and thus usable to find the
5243 index of the builtin decl. */
5244
5245static bool
5246expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5247 tree addr, tree loaded_val, tree stored_val,
5248 int index)
5249{
790368c5 5250 tree loadedi, storedi, initial, new_storedi, old_vali;
cb7f680b 5251 tree type, itype, cmpxchg, iaddr;
75a70cf9 5252 gimple_stmt_iterator si;
cb7f680b 5253 basic_block loop_header = single_succ (load_bb);
75a70cf9 5254 gimple phi, stmt;
cb7f680b 5255 edge e;
b9a16870 5256 enum built_in_function fncode;
cb7f680b 5257
1cd6e20d 5258 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5259 order to use the RELAXED memory model effectively. */
b9a16870 5260 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5261 + index + 1);
5262 cmpxchg = builtin_decl_explicit (fncode);
0f94f46b 5263 if (cmpxchg == NULL_TREE)
5264 return false;
cb7f680b 5265 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5266 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5267
29139cdc 5268 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
cb7f680b 5269 return false;
5270
75a70cf9 5271 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5272 si = gsi_last_bb (load_bb);
5273 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5274
790368c5 5275 /* For floating-point values, we'll need to view-convert them to integers
5276 so that we can perform the atomic compare and swap. Simplify the
5277 following code by always setting up the "i"ntegral variables. */
5278 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5279 {
75a70cf9 5280 tree iaddr_val;
5281
072f7ab1 5282 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
5283 true), NULL);
75a70cf9 5284 iaddr_val
5285 = force_gimple_operand_gsi (&si,
5286 fold_convert (TREE_TYPE (iaddr), addr),
5287 false, NULL_TREE, true, GSI_SAME_STMT);
5288 stmt = gimple_build_assign (iaddr, iaddr_val);
5289 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
790368c5 5290 loadedi = create_tmp_var (itype, NULL);
5291 if (gimple_in_ssa_p (cfun))
b03e5397 5292 loadedi = make_ssa_name (loadedi, NULL);
790368c5 5293 }
5294 else
5295 {
5296 iaddr = addr;
5297 loadedi = loaded_val;
5298 }
75a70cf9 5299
182cf5a9 5300 initial
5301 = force_gimple_operand_gsi (&si,
5302 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5303 iaddr,
5304 build_int_cst (TREE_TYPE (iaddr), 0)),
5305 true, NULL_TREE, true, GSI_SAME_STMT);
790368c5 5306
5307 /* Move the value to the LOADEDI temporary. */
cb7f680b 5308 if (gimple_in_ssa_p (cfun))
5309 {
75a70cf9 5310 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
790368c5 5311 phi = create_phi_node (loadedi, loop_header);
cb7f680b 5312 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5313 initial);
5314 }
5315 else
75a70cf9 5316 gsi_insert_before (&si,
5317 gimple_build_assign (loadedi, initial),
5318 GSI_SAME_STMT);
790368c5 5319 if (loadedi != loaded_val)
5320 {
75a70cf9 5321 gimple_stmt_iterator gsi2;
5322 tree x;
790368c5 5323
5324 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
75a70cf9 5325 gsi2 = gsi_start_bb (loop_header);
790368c5 5326 if (gimple_in_ssa_p (cfun))
5327 {
75a70cf9 5328 gimple stmt;
5329 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5330 true, GSI_SAME_STMT);
5331 stmt = gimple_build_assign (loaded_val, x);
5332 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
790368c5 5333 }
5334 else
5335 {
75a70cf9 5336 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5337 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5338 true, GSI_SAME_STMT);
790368c5 5339 }
5340 }
75a70cf9 5341 gsi_remove (&si, true);
cb7f680b 5342
75a70cf9 5343 si = gsi_last_bb (store_bb);
5344 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
cb7f680b 5345
790368c5 5346 if (iaddr == addr)
5347 storedi = stored_val;
cb7f680b 5348 else
790368c5 5349 storedi =
75a70cf9 5350 force_gimple_operand_gsi (&si,
790368c5 5351 build1 (VIEW_CONVERT_EXPR, itype,
5352 stored_val), true, NULL_TREE, true,
75a70cf9 5353 GSI_SAME_STMT);
cb7f680b 5354
5355 /* Build the compare&swap statement. */
5356 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
75a70cf9 5357 new_storedi = force_gimple_operand_gsi (&si,
87f9ffa4 5358 fold_convert (TREE_TYPE (loadedi),
5359 new_storedi),
cb7f680b 5360 true, NULL_TREE,
75a70cf9 5361 true, GSI_SAME_STMT);
cb7f680b 5362
5363 if (gimple_in_ssa_p (cfun))
5364 old_vali = loadedi;
5365 else
5366 {
87f9ffa4 5367 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
75a70cf9 5368 stmt = gimple_build_assign (old_vali, loadedi);
5369 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5370
75a70cf9 5371 stmt = gimple_build_assign (loadedi, new_storedi);
5372 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5373 }
5374
5375 /* Note that we always perform the comparison as an integer, even for
48e1416a 5376 floating point. This allows the atomic operation to properly
cb7f680b 5377 succeed even with NaNs and -0.0. */
75a70cf9 5378 stmt = gimple_build_cond_empty
5379 (build2 (NE_EXPR, boolean_type_node,
5380 new_storedi, old_vali));
5381 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5382
5383 /* Update cfg. */
5384 e = single_succ_edge (store_bb);
5385 e->flags &= ~EDGE_FALLTHRU;
5386 e->flags |= EDGE_FALSE_VALUE;
5387
5388 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5389
790368c5 5390 /* Copy the new value to loadedi (we already did that before the condition
cb7f680b 5391 if we are not in SSA). */
5392 if (gimple_in_ssa_p (cfun))
5393 {
75a70cf9 5394 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
790368c5 5395 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
cb7f680b 5396 }
5397
75a70cf9 5398 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5399 gsi_remove (&si, true);
cb7f680b 5400
5401 if (gimple_in_ssa_p (cfun))
5402 update_ssa (TODO_update_ssa_no_phi);
5403
5404 return true;
5405}
5406
5407/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5408
5409 GOMP_atomic_start ();
5410 *addr = rhs;
5411 GOMP_atomic_end ();
5412
5413 The result is not globally atomic, but works so long as all parallel
5414 references are within #pragma omp atomic directives. According to
5415 responses received from omp@openmp.org, appears to be within spec.
5416 Which makes sense, since that's how several other compilers handle
48e1416a 5417 this situation as well.
75a70cf9 5418 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5419 expanding. STORED_VAL is the operand of the matching
5420 GIMPLE_OMP_ATOMIC_STORE.
cb7f680b 5421
48e1416a 5422 We replace
5423 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
cb7f680b 5424 loaded_val = *addr;
5425
5426 and replace
3ec11c49 5427 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
48e1416a 5428 *addr = stored_val;
cb7f680b 5429*/
5430
5431static bool
5432expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5433 tree addr, tree loaded_val, tree stored_val)
5434{
75a70cf9 5435 gimple_stmt_iterator si;
5436 gimple stmt;
cb7f680b 5437 tree t;
5438
75a70cf9 5439 si = gsi_last_bb (load_bb);
5440 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
cb7f680b 5441
b9a16870 5442 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
414c3a2c 5443 t = build_call_expr (t, 0);
75a70cf9 5444 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
cb7f680b 5445
182cf5a9 5446 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
75a70cf9 5447 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5448 gsi_remove (&si, true);
cb7f680b 5449
75a70cf9 5450 si = gsi_last_bb (store_bb);
5451 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
cb7f680b 5452
182cf5a9 5453 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5454 stored_val);
75a70cf9 5455 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5456
b9a16870 5457 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
414c3a2c 5458 t = build_call_expr (t, 0);
75a70cf9 5459 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5460 gsi_remove (&si, true);
cb7f680b 5461
5462 if (gimple_in_ssa_p (cfun))
5463 update_ssa (TODO_update_ssa_no_phi);
5464 return true;
5465}
5466
48e1416a 5467/* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5468 using expand_omp_atomic_fetch_op. If it failed, we try to
cb7f680b 5469 call expand_omp_atomic_pipeline, and if it fails too, the
5470 ultimate fallback is wrapping the operation in a mutex
48e1416a 5471 (expand_omp_atomic_mutex). REGION is the atomic region built
5472 by build_omp_regions_1(). */
cb7f680b 5473
5474static void
5475expand_omp_atomic (struct omp_region *region)
5476{
5477 basic_block load_bb = region->entry, store_bb = region->exit;
75a70cf9 5478 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5479 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5480 tree addr = gimple_omp_atomic_load_rhs (load);
5481 tree stored_val = gimple_omp_atomic_store_val (store);
cb7f680b 5482 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5483 HOST_WIDE_INT index;
5484
5485 /* Make sure the type is one of the supported sizes. */
5486 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5487 index = exact_log2 (index);
5488 if (index >= 0 && index <= 4)
5489 {
5490 unsigned int align = TYPE_ALIGN_UNIT (type);
5491
5492 /* __sync builtins require strict data alignment. */
dcf7024c 5493 if (exact_log2 (align) >= index)
cb7f680b 5494 {
3ec11c49 5495 /* Atomic load. */
2169f33b 5496 if (loaded_val == stored_val
5497 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5498 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5499 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
3ec11c49 5500 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
2169f33b 5501 return;
5502
3ec11c49 5503 /* Atomic store. */
2169f33b 5504 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5505 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5506 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5507 && store_bb == single_succ (load_bb)
5508 && first_stmt (store_bb) == store
3ec11c49 5509 && expand_omp_atomic_store (load_bb, addr, loaded_val,
5510 stored_val, index))
2169f33b 5511 return;
5512
cb7f680b 5513 /* When possible, use specialized atomic update functions. */
5514 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
3ec11c49 5515 && store_bb == single_succ (load_bb)
5516 && expand_omp_atomic_fetch_op (load_bb, addr,
5517 loaded_val, stored_val, index))
5518 return;
cb7f680b 5519
5520 /* If we don't have specialized __sync builtins, try and implement
5521 as a compare and swap loop. */
5522 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5523 loaded_val, stored_val, index))
5524 return;
5525 }
5526 }
5527
5528 /* The ultimate fallback is wrapping the operation in a mutex. */
5529 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5530}
5531
1e8e9920 5532
773c5ba7 5533/* Expand the parallel region tree rooted at REGION. Expansion
5534 proceeds in depth-first order. Innermost regions are expanded
5535 first. This way, parallel regions that require a new function to
75a70cf9 5536 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
773c5ba7 5537 internal dependencies in their body. */
5538
5539static void
5540expand_omp (struct omp_region *region)
5541{
5542 while (region)
5543 {
1d22f541 5544 location_t saved_location;
5545
d1d5b012 5546 /* First, determine whether this is a combined parallel+workshare
5547 region. */
75a70cf9 5548 if (region->type == GIMPLE_OMP_PARALLEL)
d1d5b012 5549 determine_parallel_type (region);
5550
773c5ba7 5551 if (region->inner)
5552 expand_omp (region->inner);
5553
1d22f541 5554 saved_location = input_location;
75a70cf9 5555 if (gimple_has_location (last_stmt (region->entry)))
5556 input_location = gimple_location (last_stmt (region->entry));
1d22f541 5557
61e47ac8 5558 switch (region->type)
773c5ba7 5559 {
75a70cf9 5560 case GIMPLE_OMP_PARALLEL:
5561 case GIMPLE_OMP_TASK:
fd6481cf 5562 expand_omp_taskreg (region);
5563 break;
5564
75a70cf9 5565 case GIMPLE_OMP_FOR:
61e47ac8 5566 expand_omp_for (region);
5567 break;
773c5ba7 5568
75a70cf9 5569 case GIMPLE_OMP_SECTIONS:
61e47ac8 5570 expand_omp_sections (region);
5571 break;
773c5ba7 5572
75a70cf9 5573 case GIMPLE_OMP_SECTION:
61e47ac8 5574 /* Individual omp sections are handled together with their
75a70cf9 5575 parent GIMPLE_OMP_SECTIONS region. */
61e47ac8 5576 break;
773c5ba7 5577
75a70cf9 5578 case GIMPLE_OMP_SINGLE:
61e47ac8 5579 expand_omp_single (region);
5580 break;
773c5ba7 5581
75a70cf9 5582 case GIMPLE_OMP_MASTER:
5583 case GIMPLE_OMP_ORDERED:
5584 case GIMPLE_OMP_CRITICAL:
61e47ac8 5585 expand_omp_synch (region);
5586 break;
773c5ba7 5587
75a70cf9 5588 case GIMPLE_OMP_ATOMIC_LOAD:
cb7f680b 5589 expand_omp_atomic (region);
5590 break;
5591
61e47ac8 5592 default:
5593 gcc_unreachable ();
5594 }
cc5982dc 5595
1d22f541 5596 input_location = saved_location;
773c5ba7 5597 region = region->next;
5598 }
5599}
5600
5601
5602/* Helper for build_omp_regions. Scan the dominator tree starting at
28c92cbb 5603 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5604 true, the function ends once a single tree is built (otherwise, whole
5605 forest of OMP constructs may be built). */
773c5ba7 5606
5607static void
28c92cbb 5608build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5609 bool single_tree)
773c5ba7 5610{
75a70cf9 5611 gimple_stmt_iterator gsi;
5612 gimple stmt;
773c5ba7 5613 basic_block son;
5614
75a70cf9 5615 gsi = gsi_last_bb (bb);
5616 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
773c5ba7 5617 {
5618 struct omp_region *region;
75a70cf9 5619 enum gimple_code code;
773c5ba7 5620
75a70cf9 5621 stmt = gsi_stmt (gsi);
5622 code = gimple_code (stmt);
5623 if (code == GIMPLE_OMP_RETURN)
773c5ba7 5624 {
5625 /* STMT is the return point out of region PARENT. Mark it
5626 as the exit point and make PARENT the immediately
5627 enclosing region. */
5628 gcc_assert (parent);
5629 region = parent;
61e47ac8 5630 region->exit = bb;
773c5ba7 5631 parent = parent->outer;
773c5ba7 5632 }
75a70cf9 5633 else if (code == GIMPLE_OMP_ATOMIC_STORE)
cb7f680b 5634 {
75a70cf9 5635 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5636 GIMPLE_OMP_RETURN, but matches with
5637 GIMPLE_OMP_ATOMIC_LOAD. */
cb7f680b 5638 gcc_assert (parent);
75a70cf9 5639 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
cb7f680b 5640 region = parent;
5641 region->exit = bb;
5642 parent = parent->outer;
5643 }
5644
75a70cf9 5645 else if (code == GIMPLE_OMP_CONTINUE)
61e47ac8 5646 {
5647 gcc_assert (parent);
5648 parent->cont = bb;
5649 }
75a70cf9 5650 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
ac6e3339 5651 {
75a70cf9 5652 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5653 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5654 ;
ac6e3339 5655 }
773c5ba7 5656 else
5657 {
5658 /* Otherwise, this directive becomes the parent for a new
5659 region. */
61e47ac8 5660 region = new_omp_region (bb, code, parent);
773c5ba7 5661 parent = region;
5662 }
773c5ba7 5663 }
5664
28c92cbb 5665 if (single_tree && !parent)
5666 return;
5667
773c5ba7 5668 for (son = first_dom_son (CDI_DOMINATORS, bb);
5669 son;
5670 son = next_dom_son (CDI_DOMINATORS, son))
28c92cbb 5671 build_omp_regions_1 (son, parent, single_tree);
5672}
5673
5674/* Builds the tree of OMP regions rooted at ROOT, storing it to
5675 root_omp_region. */
5676
5677static void
5678build_omp_regions_root (basic_block root)
5679{
5680 gcc_assert (root_omp_region == NULL);
5681 build_omp_regions_1 (root, NULL, true);
5682 gcc_assert (root_omp_region != NULL);
773c5ba7 5683}
5684
28c92cbb 5685/* Expands omp construct (and its subconstructs) starting in HEAD. */
5686
5687void
5688omp_expand_local (basic_block head)
5689{
5690 build_omp_regions_root (head);
5691 if (dump_file && (dump_flags & TDF_DETAILS))
5692 {
5693 fprintf (dump_file, "\nOMP region tree\n\n");
5694 dump_omp_region (dump_file, root_omp_region, 0);
5695 fprintf (dump_file, "\n");
5696 }
5697
5698 remove_exit_barriers (root_omp_region);
5699 expand_omp (root_omp_region);
5700
5701 free_omp_regions ();
5702}
773c5ba7 5703
5704/* Scan the CFG and build a tree of OMP regions. Return the root of
5705 the OMP region tree. */
5706
5707static void
5708build_omp_regions (void)
5709{
61e47ac8 5710 gcc_assert (root_omp_region == NULL);
773c5ba7 5711 calculate_dominance_info (CDI_DOMINATORS);
28c92cbb 5712 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
773c5ba7 5713}
5714
773c5ba7 5715/* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5716
2a1990e9 5717static unsigned int
773c5ba7 5718execute_expand_omp (void)
5719{
5720 build_omp_regions ();
5721
61e47ac8 5722 if (!root_omp_region)
5723 return 0;
773c5ba7 5724
61e47ac8 5725 if (dump_file)
5726 {
5727 fprintf (dump_file, "\nOMP region tree\n\n");
5728 dump_omp_region (dump_file, root_omp_region, 0);
5729 fprintf (dump_file, "\n");
773c5ba7 5730 }
61e47ac8 5731
5732 remove_exit_barriers (root_omp_region);
5733
5734 expand_omp (root_omp_region);
5735
61e47ac8 5736 cleanup_tree_cfg ();
5737
5738 free_omp_regions ();
5739
2a1990e9 5740 return 0;
773c5ba7 5741}
5742
79acaae1 5743/* OMP expansion -- the default pass, run before creation of SSA form. */
5744
773c5ba7 5745static bool
5746gate_expand_omp (void)
5747{
852f689e 5748 return (flag_openmp != 0 && !seen_error ());
773c5ba7 5749}
5750
48e1416a 5751struct gimple_opt_pass pass_expand_omp =
773c5ba7 5752{
20099e35 5753 {
5754 GIMPLE_PASS,
773c5ba7 5755 "ompexp", /* name */
c7875731 5756 OPTGROUP_NONE, /* optinfo_flags */
773c5ba7 5757 gate_expand_omp, /* gate */
5758 execute_expand_omp, /* execute */
5759 NULL, /* sub */
5760 NULL, /* next */
5761 0, /* static_pass_number */
0b1615c1 5762 TV_NONE, /* tv_id */
773c5ba7 5763 PROP_gimple_any, /* properties_required */
41709826 5764 0, /* properties_provided */
773c5ba7 5765 0, /* properties_destroyed */
5766 0, /* todo_flags_start */
771e2890 5767 0 /* todo_flags_finish */
20099e35 5768 }
773c5ba7 5769};
5770\f
5771/* Routines to lower OpenMP directives into OMP-GIMPLE. */
5772
75a70cf9 5773/* Lower the OpenMP sections directive in the current statement in GSI_P.
5774 CTX is the enclosing OMP context for the current statement. */
773c5ba7 5775
5776static void
75a70cf9 5777lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 5778{
75a70cf9 5779 tree block, control;
5780 gimple_stmt_iterator tgsi;
75a70cf9 5781 gimple stmt, new_stmt, bind, t;
e3a19533 5782 gimple_seq ilist, dlist, olist, new_body;
dac18d1a 5783 struct gimplify_ctx gctx;
773c5ba7 5784
75a70cf9 5785 stmt = gsi_stmt (*gsi_p);
773c5ba7 5786
dac18d1a 5787 push_gimplify_context (&gctx);
773c5ba7 5788
5789 dlist = NULL;
5790 ilist = NULL;
75a70cf9 5791 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5792 &ilist, &dlist, ctx);
773c5ba7 5793
e3a19533 5794 new_body = gimple_omp_body (stmt);
5795 gimple_omp_set_body (stmt, NULL);
5796 tgsi = gsi_start (new_body);
5797 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
773c5ba7 5798 {
5799 omp_context *sctx;
75a70cf9 5800 gimple sec_start;
773c5ba7 5801
75a70cf9 5802 sec_start = gsi_stmt (tgsi);
773c5ba7 5803 sctx = maybe_lookup_ctx (sec_start);
5804 gcc_assert (sctx);
5805
e3a19533 5806 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
5807 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
5808 GSI_CONTINUE_LINKING);
75a70cf9 5809 gimple_omp_set_body (sec_start, NULL);
773c5ba7 5810
e3a19533 5811 if (gsi_one_before_end_p (tgsi))
773c5ba7 5812 {
75a70cf9 5813 gimple_seq l = NULL;
5814 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
773c5ba7 5815 &l, ctx);
e3a19533 5816 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
75a70cf9 5817 gimple_omp_section_set_last (sec_start);
773c5ba7 5818 }
48e1416a 5819
e3a19533 5820 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
5821 GSI_CONTINUE_LINKING);
773c5ba7 5822 }
1e8e9920 5823
5824 block = make_node (BLOCK);
e3a19533 5825 bind = gimple_build_bind (NULL, new_body, block);
1e8e9920 5826
75a70cf9 5827 olist = NULL;
5828 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
773c5ba7 5829
1d22f541 5830 block = make_node (BLOCK);
75a70cf9 5831 new_stmt = gimple_build_bind (NULL, NULL, block);
e3a19533 5832 gsi_replace (gsi_p, new_stmt, true);
773c5ba7 5833
1d22f541 5834 pop_gimplify_context (new_stmt);
75a70cf9 5835 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5836 BLOCK_VARS (block) = gimple_bind_vars (bind);
1d22f541 5837 if (BLOCK_VARS (block))
5838 TREE_USED (block) = 1;
5839
75a70cf9 5840 new_body = NULL;
5841 gimple_seq_add_seq (&new_body, ilist);
5842 gimple_seq_add_stmt (&new_body, stmt);
5843 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5844 gimple_seq_add_stmt (&new_body, bind);
61e47ac8 5845
ac6e3339 5846 control = create_tmp_var (unsigned_type_node, ".section");
75a70cf9 5847 t = gimple_build_omp_continue (control, control);
5848 gimple_omp_sections_set_control (stmt, control);
5849 gimple_seq_add_stmt (&new_body, t);
61e47ac8 5850
75a70cf9 5851 gimple_seq_add_seq (&new_body, olist);
5852 gimple_seq_add_seq (&new_body, dlist);
773c5ba7 5853
75a70cf9 5854 new_body = maybe_catch_exception (new_body);
aade31a0 5855
75a70cf9 5856 t = gimple_build_omp_return
5857 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5858 OMP_CLAUSE_NOWAIT));
5859 gimple_seq_add_stmt (&new_body, t);
61e47ac8 5860
75a70cf9 5861 gimple_bind_set_body (new_stmt, new_body);
1e8e9920 5862}
5863
5864
773c5ba7 5865/* A subroutine of lower_omp_single. Expand the simple form of
75a70cf9 5866 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
1e8e9920 5867
5868 if (GOMP_single_start ())
5869 BODY;
5870 [ GOMP_barrier (); ] -> unless 'nowait' is present.
773c5ba7 5871
5872 FIXME. It may be better to delay expanding the logic of this until
5873 pass_expand_omp. The expanded logic may make the job more difficult
5874 to a synchronization analysis pass. */
1e8e9920 5875
5876static void
75a70cf9 5877lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
1e8e9920 5878{
e60a6f7b 5879 location_t loc = gimple_location (single_stmt);
5880 tree tlabel = create_artificial_label (loc);
5881 tree flabel = create_artificial_label (loc);
75a70cf9 5882 gimple call, cond;
5883 tree lhs, decl;
5884
b9a16870 5885 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
75a70cf9 5886 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5887 call = gimple_build_call (decl, 0);
5888 gimple_call_set_lhs (call, lhs);
5889 gimple_seq_add_stmt (pre_p, call);
5890
5891 cond = gimple_build_cond (EQ_EXPR, lhs,
389dd41b 5892 fold_convert_loc (loc, TREE_TYPE (lhs),
5893 boolean_true_node),
75a70cf9 5894 tlabel, flabel);
5895 gimple_seq_add_stmt (pre_p, cond);
5896 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5897 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5898 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
1e8e9920 5899}
5900
773c5ba7 5901
5902/* A subroutine of lower_omp_single. Expand the simple form of
75a70cf9 5903 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
1e8e9920 5904
5905 #pragma omp single copyprivate (a, b, c)
5906
5907 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5908
5909 {
5910 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5911 {
5912 BODY;
5913 copyout.a = a;
5914 copyout.b = b;
5915 copyout.c = c;
5916 GOMP_single_copy_end (&copyout);
5917 }
5918 else
5919 {
5920 a = copyout_p->a;
5921 b = copyout_p->b;
5922 c = copyout_p->c;
5923 }
5924 GOMP_barrier ();
5925 }
773c5ba7 5926
5927 FIXME. It may be better to delay expanding the logic of this until
5928 pass_expand_omp. The expanded logic may make the job more difficult
5929 to a synchronization analysis pass. */
1e8e9920 5930
5931static void
75a70cf9 5932lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
1e8e9920 5933{
b9a16870 5934 tree ptr_type, t, l0, l1, l2, bfn_decl;
75a70cf9 5935 gimple_seq copyin_seq;
e60a6f7b 5936 location_t loc = gimple_location (single_stmt);
1e8e9920 5937
5938 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5939
5940 ptr_type = build_pointer_type (ctx->record_type);
5941 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5942
e60a6f7b 5943 l0 = create_artificial_label (loc);
5944 l1 = create_artificial_label (loc);
5945 l2 = create_artificial_label (loc);
1e8e9920 5946
b9a16870 5947 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
5948 t = build_call_expr_loc (loc, bfn_decl, 0);
389dd41b 5949 t = fold_convert_loc (loc, ptr_type, t);
75a70cf9 5950 gimplify_assign (ctx->receiver_decl, t, pre_p);
1e8e9920 5951
5952 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5953 build_int_cst (ptr_type, 0));
5954 t = build3 (COND_EXPR, void_type_node, t,
5955 build_and_jump (&l0), build_and_jump (&l1));
5956 gimplify_and_add (t, pre_p);
5957
75a70cf9 5958 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
1e8e9920 5959
75a70cf9 5960 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
1e8e9920 5961
5962 copyin_seq = NULL;
75a70cf9 5963 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
1e8e9920 5964 &copyin_seq, ctx);
5965
389dd41b 5966 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
b9a16870 5967 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
5968 t = build_call_expr_loc (loc, bfn_decl, 1, t);
1e8e9920 5969 gimplify_and_add (t, pre_p);
5970
5971 t = build_and_jump (&l2);
5972 gimplify_and_add (t, pre_p);
5973
75a70cf9 5974 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
1e8e9920 5975
75a70cf9 5976 gimple_seq_add_seq (pre_p, copyin_seq);
1e8e9920 5977
75a70cf9 5978 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
1e8e9920 5979}
5980
773c5ba7 5981
1e8e9920 5982/* Expand code for an OpenMP single directive. */
5983
5984static void
75a70cf9 5985lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 5986{
75a70cf9 5987 tree block;
5988 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5989 gimple_seq bind_body, dlist;
dac18d1a 5990 struct gimplify_ctx gctx;
1e8e9920 5991
dac18d1a 5992 push_gimplify_context (&gctx);
1e8e9920 5993
e3a19533 5994 block = make_node (BLOCK);
5995 bind = gimple_build_bind (NULL, NULL, block);
5996 gsi_replace (gsi_p, bind, true);
75a70cf9 5997 bind_body = NULL;
e3a19533 5998 dlist = NULL;
75a70cf9 5999 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
6000 &bind_body, &dlist, ctx);
e3a19533 6001 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
1e8e9920 6002
75a70cf9 6003 gimple_seq_add_stmt (&bind_body, single_stmt);
1e8e9920 6004
6005 if (ctx->record_type)
75a70cf9 6006 lower_omp_single_copy (single_stmt, &bind_body, ctx);
1e8e9920 6007 else
75a70cf9 6008 lower_omp_single_simple (single_stmt, &bind_body);
6009
6010 gimple_omp_set_body (single_stmt, NULL);
1e8e9920 6011
75a70cf9 6012 gimple_seq_add_seq (&bind_body, dlist);
61e47ac8 6013
75a70cf9 6014 bind_body = maybe_catch_exception (bind_body);
61e47ac8 6015
48e1416a 6016 t = gimple_build_omp_return
75a70cf9 6017 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
6018 OMP_CLAUSE_NOWAIT));
6019 gimple_seq_add_stmt (&bind_body, t);
e3a19533 6020 gimple_bind_set_body (bind, bind_body);
61e47ac8 6021
1e8e9920 6022 pop_gimplify_context (bind);
773c5ba7 6023
75a70cf9 6024 gimple_bind_append_vars (bind, ctx->block_vars);
6025 BLOCK_VARS (block) = ctx->block_vars;
1d22f541 6026 if (BLOCK_VARS (block))
6027 TREE_USED (block) = 1;
1e8e9920 6028}
6029
773c5ba7 6030
1e8e9920 6031/* Expand code for an OpenMP master directive. */
6032
6033static void
75a70cf9 6034lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 6035{
b9a16870 6036 tree block, lab = NULL, x, bfn_decl;
75a70cf9 6037 gimple stmt = gsi_stmt (*gsi_p), bind;
389dd41b 6038 location_t loc = gimple_location (stmt);
75a70cf9 6039 gimple_seq tseq;
dac18d1a 6040 struct gimplify_ctx gctx;
1e8e9920 6041
dac18d1a 6042 push_gimplify_context (&gctx);
1e8e9920 6043
6044 block = make_node (BLOCK);
e3a19533 6045 bind = gimple_build_bind (NULL, NULL, block);
6046 gsi_replace (gsi_p, bind, true);
6047 gimple_bind_add_stmt (bind, stmt);
61e47ac8 6048
b9a16870 6049 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6050 x = build_call_expr_loc (loc, bfn_decl, 0);
1e8e9920 6051 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
6052 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
75a70cf9 6053 tseq = NULL;
6054 gimplify_and_add (x, &tseq);
6055 gimple_bind_add_seq (bind, tseq);
1e8e9920 6056
e3a19533 6057 lower_omp (gimple_omp_body_ptr (stmt), ctx);
75a70cf9 6058 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6059 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6060 gimple_omp_set_body (stmt, NULL);
1e8e9920 6061
75a70cf9 6062 gimple_bind_add_stmt (bind, gimple_build_label (lab));
61e47ac8 6063
75a70cf9 6064 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
61e47ac8 6065
1e8e9920 6066 pop_gimplify_context (bind);
773c5ba7 6067
75a70cf9 6068 gimple_bind_append_vars (bind, ctx->block_vars);
6069 BLOCK_VARS (block) = ctx->block_vars;
1e8e9920 6070}
6071
773c5ba7 6072
1e8e9920 6073/* Expand code for an OpenMP ordered directive. */
6074
6075static void
75a70cf9 6076lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 6077{
75a70cf9 6078 tree block;
6079 gimple stmt = gsi_stmt (*gsi_p), bind, x;
dac18d1a 6080 struct gimplify_ctx gctx;
1e8e9920 6081
dac18d1a 6082 push_gimplify_context (&gctx);
1e8e9920 6083
6084 block = make_node (BLOCK);
e3a19533 6085 bind = gimple_build_bind (NULL, NULL, block);
6086 gsi_replace (gsi_p, bind, true);
6087 gimple_bind_add_stmt (bind, stmt);
61e47ac8 6088
b9a16870 6089 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
6090 0);
75a70cf9 6091 gimple_bind_add_stmt (bind, x);
1e8e9920 6092
e3a19533 6093 lower_omp (gimple_omp_body_ptr (stmt), ctx);
75a70cf9 6094 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6095 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6096 gimple_omp_set_body (stmt, NULL);
1e8e9920 6097
b9a16870 6098 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
75a70cf9 6099 gimple_bind_add_stmt (bind, x);
61e47ac8 6100
75a70cf9 6101 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
61e47ac8 6102
1e8e9920 6103 pop_gimplify_context (bind);
773c5ba7 6104
75a70cf9 6105 gimple_bind_append_vars (bind, ctx->block_vars);
6106 BLOCK_VARS (block) = gimple_bind_vars (bind);
1e8e9920 6107}
6108
1e8e9920 6109
75a70cf9 6110/* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
1e8e9920 6111 substitution of a couple of function calls. But in the NAMED case,
6112 requires that languages coordinate a symbol name. It is therefore
6113 best put here in common code. */
6114
6115static GTY((param1_is (tree), param2_is (tree)))
6116 splay_tree critical_name_mutexes;
6117
6118static void
75a70cf9 6119lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 6120{
75a70cf9 6121 tree block;
6122 tree name, lock, unlock;
6123 gimple stmt = gsi_stmt (*gsi_p), bind;
389dd41b 6124 location_t loc = gimple_location (stmt);
75a70cf9 6125 gimple_seq tbody;
dac18d1a 6126 struct gimplify_ctx gctx;
1e8e9920 6127
75a70cf9 6128 name = gimple_omp_critical_name (stmt);
1e8e9920 6129 if (name)
6130 {
c2f47e15 6131 tree decl;
1e8e9920 6132 splay_tree_node n;
6133
6134 if (!critical_name_mutexes)
6135 critical_name_mutexes
ba72912a 6136 = splay_tree_new_ggc (splay_tree_compare_pointers,
6137 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
6138 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
1e8e9920 6139
6140 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
6141 if (n == NULL)
6142 {
6143 char *new_str;
6144
6145 decl = create_tmp_var_raw (ptr_type_node, NULL);
6146
6147 new_str = ACONCAT ((".gomp_critical_user_",
6148 IDENTIFIER_POINTER (name), NULL));
6149 DECL_NAME (decl) = get_identifier (new_str);
6150 TREE_PUBLIC (decl) = 1;
6151 TREE_STATIC (decl) = 1;
6152 DECL_COMMON (decl) = 1;
6153 DECL_ARTIFICIAL (decl) = 1;
6154 DECL_IGNORED_P (decl) = 1;
1d416bd7 6155 varpool_finalize_decl (decl);
1e8e9920 6156
6157 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
6158 (splay_tree_value) decl);
6159 }
6160 else
6161 decl = (tree) n->value;
6162
b9a16870 6163 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
389dd41b 6164 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
1e8e9920 6165
b9a16870 6166 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
389dd41b 6167 unlock = build_call_expr_loc (loc, unlock, 1,
6168 build_fold_addr_expr_loc (loc, decl));
1e8e9920 6169 }
6170 else
6171 {
b9a16870 6172 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
389dd41b 6173 lock = build_call_expr_loc (loc, lock, 0);
1e8e9920 6174
b9a16870 6175 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
389dd41b 6176 unlock = build_call_expr_loc (loc, unlock, 0);
1e8e9920 6177 }
6178
dac18d1a 6179 push_gimplify_context (&gctx);
1e8e9920 6180
6181 block = make_node (BLOCK);
e3a19533 6182 bind = gimple_build_bind (NULL, NULL, block);
6183 gsi_replace (gsi_p, bind, true);
6184 gimple_bind_add_stmt (bind, stmt);
61e47ac8 6185
75a70cf9 6186 tbody = gimple_bind_body (bind);
6187 gimplify_and_add (lock, &tbody);
6188 gimple_bind_set_body (bind, tbody);
1e8e9920 6189
e3a19533 6190 lower_omp (gimple_omp_body_ptr (stmt), ctx);
75a70cf9 6191 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6192 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6193 gimple_omp_set_body (stmt, NULL);
1e8e9920 6194
75a70cf9 6195 tbody = gimple_bind_body (bind);
6196 gimplify_and_add (unlock, &tbody);
6197 gimple_bind_set_body (bind, tbody);
61e47ac8 6198
75a70cf9 6199 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
1e8e9920 6200
6201 pop_gimplify_context (bind);
75a70cf9 6202 gimple_bind_append_vars (bind, ctx->block_vars);
6203 BLOCK_VARS (block) = gimple_bind_vars (bind);
773c5ba7 6204}
6205
6206
6207/* A subroutine of lower_omp_for. Generate code to emit the predicate
6208 for a lastprivate clause. Given a loop control predicate of (V
6209 cond N2), we gate the clause on (!(V cond N2)). The lowered form
1e4afe3c 6210 is appended to *DLIST, iterator initialization is appended to
6211 *BODY_P. */
773c5ba7 6212
6213static void
75a70cf9 6214lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6215 gimple_seq *dlist, struct omp_context *ctx)
773c5ba7 6216{
75a70cf9 6217 tree clauses, cond, vinit;
773c5ba7 6218 enum tree_code cond_code;
75a70cf9 6219 gimple_seq stmts;
48e1416a 6220
fd6481cf 6221 cond_code = fd->loop.cond_code;
773c5ba7 6222 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6223
6224 /* When possible, use a strict equality expression. This can let VRP
6225 type optimizations deduce the value and remove a copy. */
fd6481cf 6226 if (host_integerp (fd->loop.step, 0))
773c5ba7 6227 {
fd6481cf 6228 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
773c5ba7 6229 if (step == 1 || step == -1)
6230 cond_code = EQ_EXPR;
6231 }
6232
fd6481cf 6233 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
773c5ba7 6234
75a70cf9 6235 clauses = gimple_omp_for_clauses (fd->for_stmt);
1e4afe3c 6236 stmts = NULL;
6237 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
75a70cf9 6238 if (!gimple_seq_empty_p (stmts))
1e4afe3c 6239 {
75a70cf9 6240 gimple_seq_add_seq (&stmts, *dlist);
fd6481cf 6241 *dlist = stmts;
1e4afe3c 6242
6243 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
fd6481cf 6244 vinit = fd->loop.n1;
1e4afe3c 6245 if (cond_code == EQ_EXPR
fd6481cf 6246 && host_integerp (fd->loop.n2, 0)
6247 && ! integer_zerop (fd->loop.n2))
6248 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
1e4afe3c 6249
6250 /* Initialize the iterator variable, so that threads that don't execute
6251 any iterations don't execute the lastprivate clauses by accident. */
75a70cf9 6252 gimplify_assign (fd->loop.v, vinit, body_p);
1e4afe3c 6253 }
773c5ba7 6254}
6255
6256
6257/* Lower code for an OpenMP loop directive. */
6258
6259static void
75a70cf9 6260lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 6261{
75a70cf9 6262 tree *rhs_p, block;
773c5ba7 6263 struct omp_for_data fd;
75a70cf9 6264 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
f018d957 6265 gimple_seq omp_for_body, body, dlist;
75a70cf9 6266 size_t i;
dac18d1a 6267 struct gimplify_ctx gctx;
773c5ba7 6268
dac18d1a 6269 push_gimplify_context (&gctx);
773c5ba7 6270
e3a19533 6271 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
6272 lower_omp (gimple_omp_body_ptr (stmt), ctx);
773c5ba7 6273
1d22f541 6274 block = make_node (BLOCK);
75a70cf9 6275 new_stmt = gimple_build_bind (NULL, NULL, block);
e3a19533 6276 /* Replace at gsi right away, so that 'stmt' is no member
6277 of a sequence anymore as we're going to add to to a different
6278 one below. */
6279 gsi_replace (gsi_p, new_stmt, true);
1d22f541 6280
773c5ba7 6281 /* Move declaration of temporaries in the loop body before we make
6282 it go away. */
75a70cf9 6283 omp_for_body = gimple_omp_body (stmt);
6284 if (!gimple_seq_empty_p (omp_for_body)
6285 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6286 {
6287 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6288 gimple_bind_append_vars (new_stmt, vars);
6289 }
773c5ba7 6290
75a70cf9 6291 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
773c5ba7 6292 dlist = NULL;
75a70cf9 6293 body = NULL;
6294 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6295 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
773c5ba7 6296
6297 /* Lower the header expressions. At this point, we can assume that
6298 the header is of the form:
6299
6300 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6301
6302 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6303 using the .omp_data_s mapping, if needed. */
75a70cf9 6304 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
fd6481cf 6305 {
75a70cf9 6306 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
fd6481cf 6307 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 6308 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 6309
75a70cf9 6310 rhs_p = gimple_omp_for_final_ptr (stmt, i);
fd6481cf 6311 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 6312 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 6313
75a70cf9 6314 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
fd6481cf 6315 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 6316 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 6317 }
773c5ba7 6318
6319 /* Once lowered, extract the bounds and clauses. */
fd6481cf 6320 extract_omp_for_data (stmt, &fd, NULL);
773c5ba7 6321
75a70cf9 6322 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
773c5ba7 6323
75a70cf9 6324 gimple_seq_add_stmt (&body, stmt);
6325 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
61e47ac8 6326
75a70cf9 6327 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6328 fd.loop.v));
61e47ac8 6329
773c5ba7 6330 /* After the loop, add exit clauses. */
75a70cf9 6331 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6332 gimple_seq_add_seq (&body, dlist);
773c5ba7 6333
75a70cf9 6334 body = maybe_catch_exception (body);
aade31a0 6335
61e47ac8 6336 /* Region exit marker goes at the end of the loop body. */
75a70cf9 6337 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
773c5ba7 6338
1d22f541 6339 pop_gimplify_context (new_stmt);
75a70cf9 6340
6341 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6342 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
1d22f541 6343 if (BLOCK_VARS (block))
6344 TREE_USED (block) = 1;
773c5ba7 6345
75a70cf9 6346 gimple_bind_set_body (new_stmt, body);
6347 gimple_omp_set_body (stmt, NULL);
6348 gimple_omp_for_set_pre_body (stmt, NULL);
1e8e9920 6349}
6350
48e1416a 6351/* Callback for walk_stmts. Check if the current statement only contains
75a70cf9 6352 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
de7ef844 6353
6354static tree
75a70cf9 6355check_combined_parallel (gimple_stmt_iterator *gsi_p,
6356 bool *handled_ops_p,
6357 struct walk_stmt_info *wi)
de7ef844 6358{
4077bf7a 6359 int *info = (int *) wi->info;
75a70cf9 6360 gimple stmt = gsi_stmt (*gsi_p);
de7ef844 6361
75a70cf9 6362 *handled_ops_p = true;
6363 switch (gimple_code (stmt))
de7ef844 6364 {
75a70cf9 6365 WALK_SUBSTMTS;
6366
6367 case GIMPLE_OMP_FOR:
6368 case GIMPLE_OMP_SECTIONS:
de7ef844 6369 *info = *info == 0 ? 1 : -1;
6370 break;
6371 default:
6372 *info = -1;
6373 break;
6374 }
6375 return NULL;
6376}
773c5ba7 6377
fd6481cf 6378struct omp_taskcopy_context
6379{
6380 /* This field must be at the beginning, as we do "inheritance": Some
6381 callback functions for tree-inline.c (e.g., omp_copy_decl)
6382 receive a copy_body_data pointer that is up-casted to an
6383 omp_context pointer. */
6384 copy_body_data cb;
6385 omp_context *ctx;
6386};
6387
6388static tree
6389task_copyfn_copy_decl (tree var, copy_body_data *cb)
6390{
6391 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6392
6393 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6394 return create_tmp_var (TREE_TYPE (var), NULL);
6395
6396 return var;
6397}
6398
6399static tree
6400task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6401{
6402 tree name, new_fields = NULL, type, f;
6403
6404 type = lang_hooks.types.make_type (RECORD_TYPE);
6405 name = DECL_NAME (TYPE_NAME (orig_type));
e60a6f7b 6406 name = build_decl (gimple_location (tcctx->ctx->stmt),
6407 TYPE_DECL, name, type);
fd6481cf 6408 TYPE_NAME (type) = name;
6409
6410 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6411 {
6412 tree new_f = copy_node (f);
6413 DECL_CONTEXT (new_f) = type;
6414 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6415 TREE_CHAIN (new_f) = new_fields;
75a70cf9 6416 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6417 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6418 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6419 &tcctx->cb, NULL);
fd6481cf 6420 new_fields = new_f;
6421 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6422 }
6423 TYPE_FIELDS (type) = nreverse (new_fields);
6424 layout_type (type);
6425 return type;
6426}
6427
6428/* Create task copyfn. */
6429
6430static void
75a70cf9 6431create_task_copyfn (gimple task_stmt, omp_context *ctx)
fd6481cf 6432{
6433 struct function *child_cfun;
6434 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6435 tree record_type, srecord_type, bind, list;
6436 bool record_needs_remap = false, srecord_needs_remap = false;
6437 splay_tree_node n;
6438 struct omp_taskcopy_context tcctx;
dac18d1a 6439 struct gimplify_ctx gctx;
389dd41b 6440 location_t loc = gimple_location (task_stmt);
fd6481cf 6441
75a70cf9 6442 child_fn = gimple_omp_task_copy_fn (task_stmt);
fd6481cf 6443 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6444 gcc_assert (child_cfun->cfg == NULL);
fd6481cf 6445 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6446
6447 /* Reset DECL_CONTEXT on function arguments. */
1767a056 6448 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
fd6481cf 6449 DECL_CONTEXT (t) = child_fn;
6450
6451 /* Populate the function. */
dac18d1a 6452 push_gimplify_context (&gctx);
9078126c 6453 push_cfun (child_cfun);
fd6481cf 6454
6455 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6456 TREE_SIDE_EFFECTS (bind) = 1;
6457 list = NULL;
6458 DECL_SAVED_TREE (child_fn) = bind;
75a70cf9 6459 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
fd6481cf 6460
6461 /* Remap src and dst argument types if needed. */
6462 record_type = ctx->record_type;
6463 srecord_type = ctx->srecord_type;
1767a056 6464 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
fd6481cf 6465 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6466 {
6467 record_needs_remap = true;
6468 break;
6469 }
1767a056 6470 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
fd6481cf 6471 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6472 {
6473 srecord_needs_remap = true;
6474 break;
6475 }
6476
6477 if (record_needs_remap || srecord_needs_remap)
6478 {
6479 memset (&tcctx, '\0', sizeof (tcctx));
6480 tcctx.cb.src_fn = ctx->cb.src_fn;
6481 tcctx.cb.dst_fn = child_fn;
53f79206 6482 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6483 gcc_checking_assert (tcctx.cb.src_node);
fd6481cf 6484 tcctx.cb.dst_node = tcctx.cb.src_node;
6485 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6486 tcctx.cb.copy_decl = task_copyfn_copy_decl;
e38def9c 6487 tcctx.cb.eh_lp_nr = 0;
fd6481cf 6488 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6489 tcctx.cb.decl_map = pointer_map_create ();
6490 tcctx.ctx = ctx;
6491
6492 if (record_needs_remap)
6493 record_type = task_copyfn_remap_type (&tcctx, record_type);
6494 if (srecord_needs_remap)
6495 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6496 }
6497 else
6498 tcctx.cb.decl_map = NULL;
6499
fd6481cf 6500 arg = DECL_ARGUMENTS (child_fn);
6501 TREE_TYPE (arg) = build_pointer_type (record_type);
1767a056 6502 sarg = DECL_CHAIN (arg);
fd6481cf 6503 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6504
6505 /* First pass: initialize temporaries used in record_type and srecord_type
6506 sizes and field offsets. */
6507 if (tcctx.cb.decl_map)
75a70cf9 6508 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 6509 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6510 {
6511 tree *p;
6512
6513 decl = OMP_CLAUSE_DECL (c);
6514 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6515 if (p == NULL)
6516 continue;
6517 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6518 sf = (tree) n->value;
6519 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 6520 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 6521 src = omp_build_component_ref (src, sf);
75a70cf9 6522 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
fd6481cf 6523 append_to_statement_list (t, &list);
6524 }
6525
6526 /* Second pass: copy shared var pointers and copy construct non-VLA
6527 firstprivate vars. */
75a70cf9 6528 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 6529 switch (OMP_CLAUSE_CODE (c))
6530 {
6531 case OMP_CLAUSE_SHARED:
6532 decl = OMP_CLAUSE_DECL (c);
6533 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6534 if (n == NULL)
6535 break;
6536 f = (tree) n->value;
6537 if (tcctx.cb.decl_map)
6538 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6539 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6540 sf = (tree) n->value;
6541 if (tcctx.cb.decl_map)
6542 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 6543 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 6544 src = omp_build_component_ref (src, sf);
182cf5a9 6545 dst = build_simple_mem_ref_loc (loc, arg);
445d06b6 6546 dst = omp_build_component_ref (dst, f);
75a70cf9 6547 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
fd6481cf 6548 append_to_statement_list (t, &list);
6549 break;
6550 case OMP_CLAUSE_FIRSTPRIVATE:
6551 decl = OMP_CLAUSE_DECL (c);
6552 if (is_variable_sized (decl))
6553 break;
6554 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6555 if (n == NULL)
6556 break;
6557 f = (tree) n->value;
6558 if (tcctx.cb.decl_map)
6559 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6560 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6561 if (n != NULL)
6562 {
6563 sf = (tree) n->value;
6564 if (tcctx.cb.decl_map)
6565 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 6566 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 6567 src = omp_build_component_ref (src, sf);
fd6481cf 6568 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
182cf5a9 6569 src = build_simple_mem_ref_loc (loc, src);
fd6481cf 6570 }
6571 else
6572 src = decl;
182cf5a9 6573 dst = build_simple_mem_ref_loc (loc, arg);
445d06b6 6574 dst = omp_build_component_ref (dst, f);
fd6481cf 6575 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6576 append_to_statement_list (t, &list);
6577 break;
6578 case OMP_CLAUSE_PRIVATE:
6579 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6580 break;
6581 decl = OMP_CLAUSE_DECL (c);
6582 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6583 f = (tree) n->value;
6584 if (tcctx.cb.decl_map)
6585 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6586 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6587 if (n != NULL)
6588 {
6589 sf = (tree) n->value;
6590 if (tcctx.cb.decl_map)
6591 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 6592 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 6593 src = omp_build_component_ref (src, sf);
fd6481cf 6594 if (use_pointer_for_field (decl, NULL))
182cf5a9 6595 src = build_simple_mem_ref_loc (loc, src);
fd6481cf 6596 }
6597 else
6598 src = decl;
182cf5a9 6599 dst = build_simple_mem_ref_loc (loc, arg);
445d06b6 6600 dst = omp_build_component_ref (dst, f);
75a70cf9 6601 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
fd6481cf 6602 append_to_statement_list (t, &list);
6603 break;
6604 default:
6605 break;
6606 }
6607
6608 /* Last pass: handle VLA firstprivates. */
6609 if (tcctx.cb.decl_map)
75a70cf9 6610 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 6611 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6612 {
6613 tree ind, ptr, df;
6614
6615 decl = OMP_CLAUSE_DECL (c);
6616 if (!is_variable_sized (decl))
6617 continue;
6618 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6619 if (n == NULL)
6620 continue;
6621 f = (tree) n->value;
6622 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6623 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6624 ind = DECL_VALUE_EXPR (decl);
6625 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6626 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6627 n = splay_tree_lookup (ctx->sfield_map,
6628 (splay_tree_key) TREE_OPERAND (ind, 0));
6629 sf = (tree) n->value;
6630 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 6631 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 6632 src = omp_build_component_ref (src, sf);
182cf5a9 6633 src = build_simple_mem_ref_loc (loc, src);
6634 dst = build_simple_mem_ref_loc (loc, arg);
445d06b6 6635 dst = omp_build_component_ref (dst, f);
fd6481cf 6636 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6637 append_to_statement_list (t, &list);
6638 n = splay_tree_lookup (ctx->field_map,
6639 (splay_tree_key) TREE_OPERAND (ind, 0));
6640 df = (tree) n->value;
6641 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
182cf5a9 6642 ptr = build_simple_mem_ref_loc (loc, arg);
445d06b6 6643 ptr = omp_build_component_ref (ptr, df);
75a70cf9 6644 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
389dd41b 6645 build_fold_addr_expr_loc (loc, dst));
fd6481cf 6646 append_to_statement_list (t, &list);
6647 }
6648
6649 t = build1 (RETURN_EXPR, void_type_node, NULL);
6650 append_to_statement_list (t, &list);
6651
6652 if (tcctx.cb.decl_map)
6653 pointer_map_destroy (tcctx.cb.decl_map);
6654 pop_gimplify_context (NULL);
6655 BIND_EXPR_BODY (bind) = list;
6656 pop_cfun ();
fd6481cf 6657}
6658
75a70cf9 6659/* Lower the OpenMP parallel or task directive in the current statement
6660 in GSI_P. CTX holds context information for the directive. */
773c5ba7 6661
6662static void
75a70cf9 6663lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 6664{
75a70cf9 6665 tree clauses;
6666 tree child_fn, t;
6667 gimple stmt = gsi_stmt (*gsi_p);
6668 gimple par_bind, bind;
6669 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
dac18d1a 6670 struct gimplify_ctx gctx;
389dd41b 6671 location_t loc = gimple_location (stmt);
773c5ba7 6672
75a70cf9 6673 clauses = gimple_omp_taskreg_clauses (stmt);
6674 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6675 par_body = gimple_bind_body (par_bind);
773c5ba7 6676 child_fn = ctx->cb.dst_fn;
75a70cf9 6677 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6678 && !gimple_omp_parallel_combined_p (stmt))
de7ef844 6679 {
6680 struct walk_stmt_info wi;
6681 int ws_num = 0;
6682
6683 memset (&wi, 0, sizeof (wi));
de7ef844 6684 wi.info = &ws_num;
6685 wi.val_only = true;
75a70cf9 6686 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
de7ef844 6687 if (ws_num == 1)
75a70cf9 6688 gimple_omp_parallel_set_combined_p (stmt, true);
de7ef844 6689 }
fd6481cf 6690 if (ctx->srecord_type)
6691 create_task_copyfn (stmt, ctx);
773c5ba7 6692
dac18d1a 6693 push_gimplify_context (&gctx);
773c5ba7 6694
75a70cf9 6695 par_olist = NULL;
6696 par_ilist = NULL;
773c5ba7 6697 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
e3a19533 6698 lower_omp (&par_body, ctx);
75a70cf9 6699 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
fd6481cf 6700 lower_reduction_clauses (clauses, &par_olist, ctx);
773c5ba7 6701
6702 /* Declare all the variables created by mapping and the variables
6703 declared in the scope of the parallel body. */
6704 record_vars_into (ctx->block_vars, child_fn);
75a70cf9 6705 record_vars_into (gimple_bind_vars (par_bind), child_fn);
773c5ba7 6706
6707 if (ctx->record_type)
6708 {
fd6481cf 6709 ctx->sender_decl
6710 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6711 : ctx->record_type, ".omp_data_o");
84bfaaeb 6712 DECL_NAMELESS (ctx->sender_decl) = 1;
86f2ad37 6713 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
75a70cf9 6714 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
773c5ba7 6715 }
6716
75a70cf9 6717 olist = NULL;
6718 ilist = NULL;
773c5ba7 6719 lower_send_clauses (clauses, &ilist, &olist, ctx);
6720 lower_send_shared_vars (&ilist, &olist, ctx);
6721
6722 /* Once all the expansions are done, sequence all the different
75a70cf9 6723 fragments inside gimple_omp_body. */
773c5ba7 6724
75a70cf9 6725 new_body = NULL;
773c5ba7 6726
6727 if (ctx->record_type)
6728 {
389dd41b 6729 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
cc6b725b 6730 /* fixup_child_record_type might have changed receiver_decl's type. */
389dd41b 6731 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
75a70cf9 6732 gimple_seq_add_stmt (&new_body,
6733 gimple_build_assign (ctx->receiver_decl, t));
773c5ba7 6734 }
6735
75a70cf9 6736 gimple_seq_add_seq (&new_body, par_ilist);
6737 gimple_seq_add_seq (&new_body, par_body);
6738 gimple_seq_add_seq (&new_body, par_olist);
6739 new_body = maybe_catch_exception (new_body);
6740 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6741 gimple_omp_set_body (stmt, new_body);
773c5ba7 6742
75a70cf9 6743 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
75a70cf9 6744 gsi_replace (gsi_p, bind, true);
e3a19533 6745 gimple_bind_add_seq (bind, ilist);
6746 gimple_bind_add_stmt (bind, stmt);
6747 gimple_bind_add_seq (bind, olist);
773c5ba7 6748
75a70cf9 6749 pop_gimplify_context (NULL);
773c5ba7 6750}
6751
a4890dc9 6752/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
75a70cf9 6753 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6754 of OpenMP context, but with task_shared_vars set. */
46515c92 6755
6756static tree
75a70cf9 6757lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6758 void *data)
46515c92 6759{
a4890dc9 6760 tree t = *tp;
46515c92 6761
a4890dc9 6762 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
75a70cf9 6763 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
9f49e155 6764 return t;
6765
6766 if (task_shared_vars
6767 && DECL_P (t)
6768 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
a4890dc9 6769 return t;
46515c92 6770
a4890dc9 6771 /* If a global variable has been privatized, TREE_CONSTANT on
6772 ADDR_EXPR might be wrong. */
75a70cf9 6773 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
a4890dc9 6774 recompute_tree_invariant_for_addr_expr (t);
46515c92 6775
a4890dc9 6776 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6777 return NULL_TREE;
46515c92 6778}
773c5ba7 6779
a4890dc9 6780static void
75a70cf9 6781lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 6782{
75a70cf9 6783 gimple stmt = gsi_stmt (*gsi_p);
6784 struct walk_stmt_info wi;
1e8e9920 6785
75a70cf9 6786 if (gimple_has_location (stmt))
6787 input_location = gimple_location (stmt);
a4890dc9 6788
75a70cf9 6789 if (task_shared_vars)
6790 memset (&wi, '\0', sizeof (wi));
a4890dc9 6791
773c5ba7 6792 /* If we have issued syntax errors, avoid doing any heavy lifting.
6793 Just replace the OpenMP directives with a NOP to avoid
6794 confusing RTL expansion. */
852f689e 6795 if (seen_error () && is_gimple_omp (stmt))
773c5ba7 6796 {
75a70cf9 6797 gsi_replace (gsi_p, gimple_build_nop (), true);
a4890dc9 6798 return;
773c5ba7 6799 }
6800
75a70cf9 6801 switch (gimple_code (stmt))
1e8e9920 6802 {
75a70cf9 6803 case GIMPLE_COND:
fd6481cf 6804 if ((ctx || task_shared_vars)
75a70cf9 6805 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6806 ctx ? NULL : &wi, NULL)
6807 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6808 ctx ? NULL : &wi, NULL)))
6809 gimple_regimplify_operands (stmt, gsi_p);
a4890dc9 6810 break;
75a70cf9 6811 case GIMPLE_CATCH:
e3a19533 6812 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
a4890dc9 6813 break;
75a70cf9 6814 case GIMPLE_EH_FILTER:
e3a19533 6815 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
a4890dc9 6816 break;
75a70cf9 6817 case GIMPLE_TRY:
e3a19533 6818 lower_omp (gimple_try_eval_ptr (stmt), ctx);
6819 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
a4890dc9 6820 break;
35215227 6821 case GIMPLE_TRANSACTION:
6822 lower_omp (gimple_transaction_body_ptr (stmt), ctx);
6823 break;
75a70cf9 6824 case GIMPLE_BIND:
e3a19533 6825 lower_omp (gimple_bind_body_ptr (stmt), ctx);
a4890dc9 6826 break;
75a70cf9 6827 case GIMPLE_OMP_PARALLEL:
6828 case GIMPLE_OMP_TASK:
6829 ctx = maybe_lookup_ctx (stmt);
6830 lower_omp_taskreg (gsi_p, ctx);
a4890dc9 6831 break;
75a70cf9 6832 case GIMPLE_OMP_FOR:
6833 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6834 gcc_assert (ctx);
75a70cf9 6835 lower_omp_for (gsi_p, ctx);
1e8e9920 6836 break;
75a70cf9 6837 case GIMPLE_OMP_SECTIONS:
6838 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6839 gcc_assert (ctx);
75a70cf9 6840 lower_omp_sections (gsi_p, ctx);
1e8e9920 6841 break;
75a70cf9 6842 case GIMPLE_OMP_SINGLE:
6843 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6844 gcc_assert (ctx);
75a70cf9 6845 lower_omp_single (gsi_p, ctx);
1e8e9920 6846 break;
75a70cf9 6847 case GIMPLE_OMP_MASTER:
6848 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6849 gcc_assert (ctx);
75a70cf9 6850 lower_omp_master (gsi_p, ctx);
1e8e9920 6851 break;
75a70cf9 6852 case GIMPLE_OMP_ORDERED:
6853 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6854 gcc_assert (ctx);
75a70cf9 6855 lower_omp_ordered (gsi_p, ctx);
1e8e9920 6856 break;
75a70cf9 6857 case GIMPLE_OMP_CRITICAL:
6858 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6859 gcc_assert (ctx);
75a70cf9 6860 lower_omp_critical (gsi_p, ctx);
6861 break;
6862 case GIMPLE_OMP_ATOMIC_LOAD:
6863 if ((ctx || task_shared_vars)
6864 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6865 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6866 gimple_regimplify_operands (stmt, gsi_p);
1e8e9920 6867 break;
a4890dc9 6868 default:
fd6481cf 6869 if ((ctx || task_shared_vars)
75a70cf9 6870 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6871 ctx ? NULL : &wi))
6872 gimple_regimplify_operands (stmt, gsi_p);
1e8e9920 6873 break;
1e8e9920 6874 }
1e8e9920 6875}
6876
6877static void
e3a19533 6878lower_omp (gimple_seq *body, omp_context *ctx)
1e8e9920 6879{
1d22f541 6880 location_t saved_location = input_location;
e3a19533 6881 gimple_stmt_iterator gsi;
6882 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
75a70cf9 6883 lower_omp_1 (&gsi, ctx);
1d22f541 6884 input_location = saved_location;
1e8e9920 6885}
6886\f
6887/* Main entry point. */
6888
2a1990e9 6889static unsigned int
1e8e9920 6890execute_lower_omp (void)
6891{
75a70cf9 6892 gimple_seq body;
6893
41709826 6894 /* This pass always runs, to provide PROP_gimple_lomp.
6895 But there is nothing to do unless -fopenmp is given. */
6896 if (flag_openmp == 0)
6897 return 0;
6898
1e8e9920 6899 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6900 delete_omp_context);
6901
75a70cf9 6902 body = gimple_body (current_function_decl);
ab129075 6903 scan_omp (&body, NULL);
fd6481cf 6904 gcc_assert (taskreg_nesting_level == 0);
1e8e9920 6905
6906 if (all_contexts->root)
fd6481cf 6907 {
dac18d1a 6908 struct gimplify_ctx gctx;
6909
fd6481cf 6910 if (task_shared_vars)
dac18d1a 6911 push_gimplify_context (&gctx);
e3a19533 6912 lower_omp (&body, NULL);
fd6481cf 6913 if (task_shared_vars)
6914 pop_gimplify_context (NULL);
6915 }
1e8e9920 6916
773c5ba7 6917 if (all_contexts)
6918 {
6919 splay_tree_delete (all_contexts);
6920 all_contexts = NULL;
6921 }
fd6481cf 6922 BITMAP_FREE (task_shared_vars);
2a1990e9 6923 return 0;
1e8e9920 6924}
6925
48e1416a 6926struct gimple_opt_pass pass_lower_omp =
1e8e9920 6927{
20099e35 6928 {
6929 GIMPLE_PASS,
1e8e9920 6930 "omplower", /* name */
c7875731 6931 OPTGROUP_NONE, /* optinfo_flags */
41709826 6932 NULL, /* gate */
1e8e9920 6933 execute_lower_omp, /* execute */
6934 NULL, /* sub */
6935 NULL, /* next */
6936 0, /* static_pass_number */
0b1615c1 6937 TV_NONE, /* tv_id */
1e8e9920 6938 PROP_gimple_any, /* properties_required */
6939 PROP_gimple_lomp, /* properties_provided */
6940 0, /* properties_destroyed */
6941 0, /* todo_flags_start */
771e2890 6942 0 /* todo_flags_finish */
20099e35 6943 }
1e8e9920 6944};
1e8e9920 6945\f
6946/* The following is a utility to diagnose OpenMP structured block violations.
61e47ac8 6947 It is not part of the "omplower" pass, as that's invoked too late. It
6948 should be invoked by the respective front ends after gimplification. */
1e8e9920 6949
6950static splay_tree all_labels;
6951
6952/* Check for mismatched contexts and generate an error if needed. Return
6953 true if an error is detected. */
6954
6955static bool
75a70cf9 6956diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6957 gimple branch_ctx, gimple label_ctx)
1e8e9920 6958{
75a70cf9 6959 if (label_ctx == branch_ctx)
1e8e9920 6960 return false;
6961
48e1416a 6962
75a70cf9 6963 /*
6964 Previously we kept track of the label's entire context in diagnose_sb_[12]
6965 so we could traverse it and issue a correct "exit" or "enter" error
6966 message upon a structured block violation.
6967
6968 We built the context by building a list with tree_cons'ing, but there is
6969 no easy counterpart in gimple tuples. It seems like far too much work
6970 for issuing exit/enter error messages. If someone really misses the
6971 distinct error message... patches welcome.
6972 */
48e1416a 6973
75a70cf9 6974#if 0
1e8e9920 6975 /* Try to avoid confusing the user by producing and error message
f0b5f617 6976 with correct "exit" or "enter" verbiage. We prefer "exit"
1e8e9920 6977 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6978 if (branch_ctx == NULL)
6979 exit_p = false;
6980 else
6981 {
6982 while (label_ctx)
6983 {
6984 if (TREE_VALUE (label_ctx) == branch_ctx)
6985 {
6986 exit_p = false;
6987 break;
6988 }
6989 label_ctx = TREE_CHAIN (label_ctx);
6990 }
6991 }
6992
6993 if (exit_p)
6994 error ("invalid exit from OpenMP structured block");
6995 else
6996 error ("invalid entry to OpenMP structured block");
75a70cf9 6997#endif
1e8e9920 6998
75a70cf9 6999 /* If it's obvious we have an invalid entry, be specific about the error. */
7000 if (branch_ctx == NULL)
7001 error ("invalid entry to OpenMP structured block");
7002 else
7003 /* Otherwise, be vague and lazy, but efficient. */
7004 error ("invalid branch to/from an OpenMP structured block");
7005
7006 gsi_replace (gsi_p, gimple_build_nop (), false);
1e8e9920 7007 return true;
7008}
7009
7010/* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
75a70cf9 7011 where each label is found. */
1e8e9920 7012
7013static tree
75a70cf9 7014diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7015 struct walk_stmt_info *wi)
1e8e9920 7016{
75a70cf9 7017 gimple context = (gimple) wi->info;
7018 gimple inner_context;
7019 gimple stmt = gsi_stmt (*gsi_p);
1e8e9920 7020
75a70cf9 7021 *handled_ops_p = true;
7022
7023 switch (gimple_code (stmt))
1e8e9920 7024 {
75a70cf9 7025 WALK_SUBSTMTS;
48e1416a 7026
75a70cf9 7027 case GIMPLE_OMP_PARALLEL:
7028 case GIMPLE_OMP_TASK:
7029 case GIMPLE_OMP_SECTIONS:
7030 case GIMPLE_OMP_SINGLE:
7031 case GIMPLE_OMP_SECTION:
7032 case GIMPLE_OMP_MASTER:
7033 case GIMPLE_OMP_ORDERED:
7034 case GIMPLE_OMP_CRITICAL:
7035 /* The minimal context here is just the current OMP construct. */
7036 inner_context = stmt;
1e8e9920 7037 wi->info = inner_context;
75a70cf9 7038 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
1e8e9920 7039 wi->info = context;
7040 break;
7041
75a70cf9 7042 case GIMPLE_OMP_FOR:
7043 inner_context = stmt;
1e8e9920 7044 wi->info = inner_context;
75a70cf9 7045 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7046 walk them. */
7047 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7048 diagnose_sb_1, NULL, wi);
7049 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
1e8e9920 7050 wi->info = context;
7051 break;
7052
75a70cf9 7053 case GIMPLE_LABEL:
7054 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
1e8e9920 7055 (splay_tree_value) context);
7056 break;
7057
7058 default:
7059 break;
7060 }
7061
7062 return NULL_TREE;
7063}
7064
7065/* Pass 2: Check each branch and see if its context differs from that of
7066 the destination label's context. */
7067
7068static tree
75a70cf9 7069diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7070 struct walk_stmt_info *wi)
1e8e9920 7071{
75a70cf9 7072 gimple context = (gimple) wi->info;
1e8e9920 7073 splay_tree_node n;
75a70cf9 7074 gimple stmt = gsi_stmt (*gsi_p);
1e8e9920 7075
75a70cf9 7076 *handled_ops_p = true;
7077
7078 switch (gimple_code (stmt))
1e8e9920 7079 {
75a70cf9 7080 WALK_SUBSTMTS;
7081
7082 case GIMPLE_OMP_PARALLEL:
7083 case GIMPLE_OMP_TASK:
7084 case GIMPLE_OMP_SECTIONS:
7085 case GIMPLE_OMP_SINGLE:
7086 case GIMPLE_OMP_SECTION:
7087 case GIMPLE_OMP_MASTER:
7088 case GIMPLE_OMP_ORDERED:
7089 case GIMPLE_OMP_CRITICAL:
7090 wi->info = stmt;
e3a19533 7091 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
1e8e9920 7092 wi->info = context;
7093 break;
7094
75a70cf9 7095 case GIMPLE_OMP_FOR:
7096 wi->info = stmt;
7097 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7098 walk them. */
e3a19533 7099 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
7100 diagnose_sb_2, NULL, wi);
7101 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
1e8e9920 7102 wi->info = context;
7103 break;
7104
0e1818e7 7105 case GIMPLE_COND:
7106 {
7107 tree lab = gimple_cond_true_label (stmt);
7108 if (lab)
7109 {
7110 n = splay_tree_lookup (all_labels,
7111 (splay_tree_key) lab);
7112 diagnose_sb_0 (gsi_p, context,
7113 n ? (gimple) n->value : NULL);
7114 }
7115 lab = gimple_cond_false_label (stmt);
7116 if (lab)
7117 {
7118 n = splay_tree_lookup (all_labels,
7119 (splay_tree_key) lab);
7120 diagnose_sb_0 (gsi_p, context,
7121 n ? (gimple) n->value : NULL);
7122 }
7123 }
7124 break;
7125
75a70cf9 7126 case GIMPLE_GOTO:
1e8e9920 7127 {
75a70cf9 7128 tree lab = gimple_goto_dest (stmt);
1e8e9920 7129 if (TREE_CODE (lab) != LABEL_DECL)
7130 break;
7131
7132 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
75a70cf9 7133 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
1e8e9920 7134 }
7135 break;
7136
75a70cf9 7137 case GIMPLE_SWITCH:
1e8e9920 7138 {
75a70cf9 7139 unsigned int i;
7140 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
1e8e9920 7141 {
75a70cf9 7142 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
1e8e9920 7143 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
75a70cf9 7144 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
1e8e9920 7145 break;
7146 }
7147 }
7148 break;
7149
75a70cf9 7150 case GIMPLE_RETURN:
7151 diagnose_sb_0 (gsi_p, context, NULL);
1e8e9920 7152 break;
7153
7154 default:
7155 break;
7156 }
7157
7158 return NULL_TREE;
7159}
7160
bfec3452 7161static unsigned int
7162diagnose_omp_structured_block_errors (void)
1e8e9920 7163{
1e8e9920 7164 struct walk_stmt_info wi;
bfec3452 7165 gimple_seq body = gimple_body (current_function_decl);
1e8e9920 7166
7167 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
7168
7169 memset (&wi, 0, sizeof (wi));
75a70cf9 7170 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
1e8e9920 7171
7172 memset (&wi, 0, sizeof (wi));
1e8e9920 7173 wi.want_locations = true;
e3a19533 7174 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
7175
7176 gimple_set_body (current_function_decl, body);
1e8e9920 7177
7178 splay_tree_delete (all_labels);
7179 all_labels = NULL;
7180
bfec3452 7181 return 0;
1e8e9920 7182}
7183
bfec3452 7184static bool
7185gate_diagnose_omp_blocks (void)
7186{
7187 return flag_openmp != 0;
7188}
7189
7190struct gimple_opt_pass pass_diagnose_omp_blocks =
7191{
7192 {
7193 GIMPLE_PASS,
53b5ae07 7194 "*diagnose_omp_blocks", /* name */
c7875731 7195 OPTGROUP_NONE, /* optinfo_flags */
bfec3452 7196 gate_diagnose_omp_blocks, /* gate */
7197 diagnose_omp_structured_block_errors, /* execute */
7198 NULL, /* sub */
7199 NULL, /* next */
7200 0, /* static_pass_number */
7201 TV_NONE, /* tv_id */
7202 PROP_gimple_any, /* properties_required */
7203 0, /* properties_provided */
7204 0, /* properties_destroyed */
7205 0, /* todo_flags_start */
7206 0, /* todo_flags_finish */
7207 }
7208};
7209
1e8e9920 7210#include "gt-omp-low.h"