]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/omp-low.c
* tree-vrp.c (debug_value_range, debug_all_value_ranges,
[thirdparty/gcc.git] / gcc / omp-low.c
CommitLineData
1e8e9920 1/* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
7cf0dbf3 6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
7 Free Software Foundation, Inc.
1e8e9920 8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
8c4c00c1 13Software Foundation; either version 3, or (at your option) any later
1e8e9920 14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
8c4c00c1 22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
1e8e9920 24
25#include "config.h"
26#include "system.h"
27#include "coretypes.h"
28#include "tm.h"
29#include "tree.h"
30#include "rtl.h"
75a70cf9 31#include "gimple.h"
32#include "tree-iterator.h"
1e8e9920 33#include "tree-inline.h"
34#include "langhooks.h"
852f689e 35#include "diagnostic-core.h"
1e8e9920 36#include "tree-flow.h"
37#include "timevar.h"
38#include "flags.h"
39#include "function.h"
40#include "expr.h"
41#include "toplev.h"
42#include "tree-pass.h"
43#include "ggc.h"
44#include "except.h"
e3022db7 45#include "splay-tree.h"
cb7f680b 46#include "optabs.h"
47#include "cfgloop.h"
1e8e9920 48
75a70cf9 49
48e1416a 50/* Lowering of OpenMP parallel and workshare constructs proceeds in two
1e8e9920 51 phases. The first phase scans the function looking for OMP statements
52 and then for variables that must be replaced to satisfy data sharing
53 clauses. The second phase expands code for the constructs, as well as
334ec2d8 54 re-gimplifying things when variables have been replaced with complex
1e8e9920 55 expressions.
56
d134bccc 57 Final code generation is done by pass_expand_omp. The flowgraph is
58 scanned for parallel regions which are then moved to a new
59 function, to be invoked by the thread library. */
1e8e9920 60
61/* Context structure. Used to store information about each parallel
62 directive in the code. */
63
64typedef struct omp_context
65{
66 /* This field must be at the beginning, as we do "inheritance": Some
67 callback functions for tree-inline.c (e.g., omp_copy_decl)
68 receive a copy_body_data pointer that is up-casted to an
69 omp_context pointer. */
70 copy_body_data cb;
71
72 /* The tree of contexts corresponding to the encountered constructs. */
73 struct omp_context *outer;
75a70cf9 74 gimple stmt;
1e8e9920 75
48e1416a 76 /* Map variables to fields in a structure that allows communication
1e8e9920 77 between sending and receiving threads. */
78 splay_tree field_map;
79 tree record_type;
80 tree sender_decl;
81 tree receiver_decl;
82
fd6481cf 83 /* These are used just by task contexts, if task firstprivate fn is
84 needed. srecord_type is used to communicate from the thread
85 that encountered the task construct to task firstprivate fn,
86 record_type is allocated by GOMP_task, initialized by task firstprivate
87 fn and passed to the task body fn. */
88 splay_tree sfield_map;
89 tree srecord_type;
90
1e8e9920 91 /* A chain of variables to add to the top-level block surrounding the
92 construct. In the case of a parallel, this is in the child function. */
93 tree block_vars;
94
95 /* What to do with variables with implicitly determined sharing
96 attributes. */
97 enum omp_clause_default_kind default_kind;
98
99 /* Nesting depth of this context. Used to beautify error messages re
100 invalid gotos. The outermost ctx is depth 1, with depth 0 being
101 reserved for the main body of the function. */
102 int depth;
103
1e8e9920 104 /* True if this parallel directive is nested within another. */
105 bool is_nested;
1e8e9920 106} omp_context;
107
108
fd6481cf 109struct omp_for_data_loop
110{
111 tree v, n1, n2, step;
112 enum tree_code cond_code;
113};
114
773c5ba7 115/* A structure describing the main elements of a parallel loop. */
1e8e9920 116
773c5ba7 117struct omp_for_data
1e8e9920 118{
fd6481cf 119 struct omp_for_data_loop loop;
75a70cf9 120 tree chunk_size;
121 gimple for_stmt;
fd6481cf 122 tree pre, iter_type;
123 int collapse;
1e8e9920 124 bool have_nowait, have_ordered;
125 enum omp_clause_schedule_kind sched_kind;
fd6481cf 126 struct omp_for_data_loop *loops;
1e8e9920 127};
128
773c5ba7 129
1e8e9920 130static splay_tree all_contexts;
fd6481cf 131static int taskreg_nesting_level;
61e47ac8 132struct omp_region *root_omp_region;
fd6481cf 133static bitmap task_shared_vars;
1e8e9920 134
75a70cf9 135static void scan_omp (gimple_seq, omp_context *);
136static tree scan_omp_1_op (tree *, int *, void *);
137
138#define WALK_SUBSTMTS \
139 case GIMPLE_BIND: \
140 case GIMPLE_TRY: \
141 case GIMPLE_CATCH: \
142 case GIMPLE_EH_FILTER: \
143 /* The sub-statements for these should be walked. */ \
144 *handled_ops_p = false; \
145 break;
146
147/* Convenience function for calling scan_omp_1_op on tree operands. */
148
149static inline tree
150scan_omp_op (tree *tp, omp_context *ctx)
151{
152 struct walk_stmt_info wi;
153
154 memset (&wi, 0, sizeof (wi));
155 wi.info = ctx;
156 wi.want_locations = true;
157
158 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
159}
160
161static void lower_omp (gimple_seq, omp_context *);
f49d7bb5 162static tree lookup_decl_in_outer_ctx (tree, omp_context *);
163static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
1e8e9920 164
165/* Find an OpenMP clause of type KIND within CLAUSES. */
166
79acaae1 167tree
590c3166 168find_omp_clause (tree clauses, enum omp_clause_code kind)
1e8e9920 169{
170 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
55d6e7cd 171 if (OMP_CLAUSE_CODE (clauses) == kind)
1e8e9920 172 return clauses;
173
174 return NULL_TREE;
175}
176
177/* Return true if CTX is for an omp parallel. */
178
179static inline bool
180is_parallel_ctx (omp_context *ctx)
181{
75a70cf9 182 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
1e8e9920 183}
184
773c5ba7 185
fd6481cf 186/* Return true if CTX is for an omp task. */
187
188static inline bool
189is_task_ctx (omp_context *ctx)
190{
75a70cf9 191 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
fd6481cf 192}
193
194
195/* Return true if CTX is for an omp parallel or omp task. */
196
197static inline bool
198is_taskreg_ctx (omp_context *ctx)
199{
75a70cf9 200 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
201 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
fd6481cf 202}
203
204
773c5ba7 205/* Return true if REGION is a combined parallel+workshare region. */
1e8e9920 206
207static inline bool
773c5ba7 208is_combined_parallel (struct omp_region *region)
209{
210 return region->is_combined_parallel;
211}
212
213
214/* Extract the header elements of parallel loop FOR_STMT and store
215 them into *FD. */
216
217static void
75a70cf9 218extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
fd6481cf 219 struct omp_for_data_loop *loops)
773c5ba7 220{
fd6481cf 221 tree t, var, *collapse_iter, *collapse_count;
222 tree count = NULL_TREE, iter_type = long_integer_type_node;
223 struct omp_for_data_loop *loop;
224 int i;
225 struct omp_for_data_loop dummy_loop;
389dd41b 226 location_t loc = gimple_location (for_stmt);
773c5ba7 227
228 fd->for_stmt = for_stmt;
229 fd->pre = NULL;
75a70cf9 230 fd->collapse = gimple_omp_for_collapse (for_stmt);
fd6481cf 231 if (fd->collapse > 1)
232 fd->loops = loops;
233 else
234 fd->loops = &fd->loop;
773c5ba7 235
236 fd->have_nowait = fd->have_ordered = false;
237 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238 fd->chunk_size = NULL_TREE;
fd6481cf 239 collapse_iter = NULL;
240 collapse_count = NULL;
773c5ba7 241
75a70cf9 242 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
55d6e7cd 243 switch (OMP_CLAUSE_CODE (t))
773c5ba7 244 {
245 case OMP_CLAUSE_NOWAIT:
246 fd->have_nowait = true;
247 break;
248 case OMP_CLAUSE_ORDERED:
249 fd->have_ordered = true;
250 break;
251 case OMP_CLAUSE_SCHEDULE:
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254 break;
fd6481cf 255 case OMP_CLAUSE_COLLAPSE:
256 if (fd->collapse > 1)
257 {
258 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 }
773c5ba7 261 default:
262 break;
263 }
264
fd6481cf 265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
bde357c8 268 is best) or if it varies (then schedule(dynamic,N) is better). */
fd6481cf 269 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 {
271 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272 gcc_assert (fd->chunk_size == NULL);
273 }
274 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
773c5ba7 275 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276 gcc_assert (fd->chunk_size == NULL);
277 else if (fd->chunk_size == NULL)
278 {
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
fd6481cf 281 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 || fd->have_ordered
283 || fd->collapse > 1)
773c5ba7 284 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285 ? integer_zero_node : integer_one_node;
286 }
fd6481cf 287
288 for (i = 0; i < fd->collapse; i++)
289 {
290 if (fd->collapse == 1)
291 loop = &fd->loop;
292 else if (loops != NULL)
293 loop = loops + i;
294 else
295 loop = &dummy_loop;
296
48e1416a 297
75a70cf9 298 loop->v = gimple_omp_for_index (for_stmt, i);
fd6481cf 299 gcc_assert (SSA_VAR_P (loop->v));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
75a70cf9 303 loop->n1 = gimple_omp_for_initial (for_stmt, i);
fd6481cf 304
75a70cf9 305 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306 loop->n2 = gimple_omp_for_final (for_stmt, i);
fd6481cf 307 switch (loop->cond_code)
308 {
309 case LT_EXPR:
310 case GT_EXPR:
311 break;
312 case LE_EXPR:
313 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
389dd41b 314 loop->n2 = fold_build2_loc (loc,
315 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
fd6481cf 316 loop->n2, size_one_node);
317 else
389dd41b 318 loop->n2 = fold_build2_loc (loc,
319 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
fd6481cf 320 build_int_cst (TREE_TYPE (loop->n2), 1));
321 loop->cond_code = LT_EXPR;
322 break;
323 case GE_EXPR:
324 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
389dd41b 325 loop->n2 = fold_build2_loc (loc,
326 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
fd6481cf 327 loop->n2, size_int (-1));
328 else
389dd41b 329 loop->n2 = fold_build2_loc (loc,
330 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
fd6481cf 331 build_int_cst (TREE_TYPE (loop->n2), 1));
332 loop->cond_code = GT_EXPR;
333 break;
334 default:
335 gcc_unreachable ();
336 }
337
75a70cf9 338 t = gimple_omp_for_incr (for_stmt, i);
fd6481cf 339 gcc_assert (TREE_OPERAND (t, 0) == var);
340 switch (TREE_CODE (t))
341 {
342 case PLUS_EXPR:
343 case POINTER_PLUS_EXPR:
344 loop->step = TREE_OPERAND (t, 1);
345 break;
346 case MINUS_EXPR:
347 loop->step = TREE_OPERAND (t, 1);
389dd41b 348 loop->step = fold_build1_loc (loc,
349 NEGATE_EXPR, TREE_TYPE (loop->step),
fd6481cf 350 loop->step);
351 break;
352 default:
353 gcc_unreachable ();
354 }
355
356 if (iter_type != long_long_unsigned_type_node)
357 {
358 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
359 iter_type = long_long_unsigned_type_node;
360 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
361 && TYPE_PRECISION (TREE_TYPE (loop->v))
362 >= TYPE_PRECISION (iter_type))
363 {
364 tree n;
365
366 if (loop->cond_code == LT_EXPR)
389dd41b 367 n = fold_build2_loc (loc,
368 PLUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 369 loop->n2, loop->step);
370 else
371 n = loop->n1;
372 if (TREE_CODE (n) != INTEGER_CST
373 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
374 iter_type = long_long_unsigned_type_node;
375 }
376 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
377 > TYPE_PRECISION (iter_type))
378 {
379 tree n1, n2;
380
381 if (loop->cond_code == LT_EXPR)
382 {
383 n1 = loop->n1;
389dd41b 384 n2 = fold_build2_loc (loc,
385 PLUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 386 loop->n2, loop->step);
387 }
388 else
389 {
389dd41b 390 n1 = fold_build2_loc (loc,
391 MINUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 392 loop->n2, loop->step);
393 n2 = loop->n1;
394 }
395 if (TREE_CODE (n1) != INTEGER_CST
396 || TREE_CODE (n2) != INTEGER_CST
397 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
398 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
399 iter_type = long_long_unsigned_type_node;
400 }
401 }
402
403 if (collapse_count && *collapse_count == NULL)
404 {
405 if ((i == 0 || count != NULL_TREE)
406 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
407 && TREE_CONSTANT (loop->n1)
408 && TREE_CONSTANT (loop->n2)
409 && TREE_CODE (loop->step) == INTEGER_CST)
410 {
411 tree itype = TREE_TYPE (loop->v);
412
413 if (POINTER_TYPE_P (itype))
414 itype
415 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
416 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
389dd41b 417 t = fold_build2_loc (loc,
418 PLUS_EXPR, itype,
419 fold_convert_loc (loc, itype, loop->step), t);
420 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
421 fold_convert_loc (loc, itype, loop->n2));
422 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
423 fold_convert_loc (loc, itype, loop->n1));
fd6481cf 424 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
389dd41b 425 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
426 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
427 fold_build1_loc (loc, NEGATE_EXPR, itype,
428 fold_convert_loc (loc, itype,
429 loop->step)));
fd6481cf 430 else
389dd41b 431 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
432 fold_convert_loc (loc, itype, loop->step));
433 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
fd6481cf 434 if (count != NULL_TREE)
389dd41b 435 count = fold_build2_loc (loc,
436 MULT_EXPR, long_long_unsigned_type_node,
fd6481cf 437 count, t);
438 else
439 count = t;
440 if (TREE_CODE (count) != INTEGER_CST)
441 count = NULL_TREE;
442 }
443 else
444 count = NULL_TREE;
445 }
446 }
447
448 if (count)
449 {
450 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
451 iter_type = long_long_unsigned_type_node;
452 else
453 iter_type = long_integer_type_node;
454 }
455 else if (collapse_iter && *collapse_iter != NULL)
456 iter_type = TREE_TYPE (*collapse_iter);
457 fd->iter_type = iter_type;
458 if (collapse_iter && *collapse_iter == NULL)
459 *collapse_iter = create_tmp_var (iter_type, ".iter");
460 if (collapse_count && *collapse_count == NULL)
461 {
462 if (count)
389dd41b 463 *collapse_count = fold_convert_loc (loc, iter_type, count);
fd6481cf 464 else
465 *collapse_count = create_tmp_var (iter_type, ".count");
466 }
467
468 if (fd->collapse > 1)
469 {
470 fd->loop.v = *collapse_iter;
471 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
472 fd->loop.n2 = *collapse_count;
473 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
474 fd->loop.cond_code = LT_EXPR;
475 }
773c5ba7 476}
477
478
479/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
480 is the immediate dominator of PAR_ENTRY_BB, return true if there
481 are no data dependencies that would prevent expanding the parallel
482 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
483
484 When expanding a combined parallel+workshare region, the call to
485 the child function may need additional arguments in the case of
75a70cf9 486 GIMPLE_OMP_FOR regions. In some cases, these arguments are
487 computed out of variables passed in from the parent to the child
488 via 'struct .omp_data_s'. For instance:
773c5ba7 489
490 #pragma omp parallel for schedule (guided, i * 4)
491 for (j ...)
492
493 Is lowered into:
494
495 # BLOCK 2 (PAR_ENTRY_BB)
496 .omp_data_o.i = i;
497 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
48e1416a 498
773c5ba7 499 # BLOCK 3 (WS_ENTRY_BB)
500 .omp_data_i = &.omp_data_o;
501 D.1667 = .omp_data_i->i;
502 D.1598 = D.1667 * 4;
503 #pragma omp for schedule (guided, D.1598)
504
505 When we outline the parallel region, the call to the child function
506 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
507 that value is computed *after* the call site. So, in principle we
508 cannot do the transformation.
509
510 To see whether the code in WS_ENTRY_BB blocks the combined
511 parallel+workshare call, we collect all the variables used in the
75a70cf9 512 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
773c5ba7 513 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
514 call.
515
516 FIXME. If we had the SSA form built at this point, we could merely
517 hoist the code in block 3 into block 2 and be done with it. But at
518 this point we don't have dataflow information and though we could
519 hack something up here, it is really not worth the aggravation. */
520
521static bool
f018d957 522workshare_safe_to_combine_p (basic_block ws_entry_bb)
773c5ba7 523{
524 struct omp_for_data fd;
f018d957 525 gimple ws_stmt = last_stmt (ws_entry_bb);
773c5ba7 526
75a70cf9 527 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
773c5ba7 528 return true;
529
75a70cf9 530 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
773c5ba7 531
fd6481cf 532 extract_omp_for_data (ws_stmt, &fd, NULL);
533
534 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
535 return false;
536 if (fd.iter_type != long_integer_type_node)
537 return false;
773c5ba7 538
539 /* FIXME. We give up too easily here. If any of these arguments
540 are not constants, they will likely involve variables that have
541 been mapped into fields of .omp_data_s for sharing with the child
542 function. With appropriate data flow, it would be possible to
543 see through this. */
fd6481cf 544 if (!is_gimple_min_invariant (fd.loop.n1)
545 || !is_gimple_min_invariant (fd.loop.n2)
546 || !is_gimple_min_invariant (fd.loop.step)
773c5ba7 547 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
548 return false;
549
550 return true;
551}
552
553
554/* Collect additional arguments needed to emit a combined
555 parallel+workshare call. WS_STMT is the workshare directive being
556 expanded. */
557
558static tree
75a70cf9 559get_ws_args_for (gimple ws_stmt)
773c5ba7 560{
561 tree t;
389dd41b 562 location_t loc = gimple_location (ws_stmt);
773c5ba7 563
75a70cf9 564 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
773c5ba7 565 {
566 struct omp_for_data fd;
567 tree ws_args;
568
fd6481cf 569 extract_omp_for_data (ws_stmt, &fd, NULL);
773c5ba7 570
571 ws_args = NULL_TREE;
572 if (fd.chunk_size)
573 {
389dd41b 574 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
773c5ba7 575 ws_args = tree_cons (NULL, t, ws_args);
576 }
577
389dd41b 578 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
773c5ba7 579 ws_args = tree_cons (NULL, t, ws_args);
580
389dd41b 581 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
773c5ba7 582 ws_args = tree_cons (NULL, t, ws_args);
583
389dd41b 584 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
773c5ba7 585 ws_args = tree_cons (NULL, t, ws_args);
586
587 return ws_args;
588 }
75a70cf9 589 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
773c5ba7 590 {
ac6e3339 591 /* Number of sections is equal to the number of edges from the
75a70cf9 592 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
593 the exit of the sections region. */
594 basic_block bb = single_succ (gimple_bb (ws_stmt));
ac6e3339 595 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
773c5ba7 596 t = tree_cons (NULL, t, NULL);
597 return t;
598 }
599
600 gcc_unreachable ();
601}
602
603
604/* Discover whether REGION is a combined parallel+workshare region. */
605
606static void
607determine_parallel_type (struct omp_region *region)
1e8e9920 608{
773c5ba7 609 basic_block par_entry_bb, par_exit_bb;
610 basic_block ws_entry_bb, ws_exit_bb;
611
03ed154b 612 if (region == NULL || region->inner == NULL
ac6e3339 613 || region->exit == NULL || region->inner->exit == NULL
614 || region->inner->cont == NULL)
773c5ba7 615 return;
616
617 /* We only support parallel+for and parallel+sections. */
75a70cf9 618 if (region->type != GIMPLE_OMP_PARALLEL
619 || (region->inner->type != GIMPLE_OMP_FOR
620 && region->inner->type != GIMPLE_OMP_SECTIONS))
773c5ba7 621 return;
622
623 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
624 WS_EXIT_BB -> PAR_EXIT_BB. */
61e47ac8 625 par_entry_bb = region->entry;
626 par_exit_bb = region->exit;
627 ws_entry_bb = region->inner->entry;
628 ws_exit_bb = region->inner->exit;
773c5ba7 629
630 if (single_succ (par_entry_bb) == ws_entry_bb
631 && single_succ (ws_exit_bb) == par_exit_bb
f018d957 632 && workshare_safe_to_combine_p (ws_entry_bb)
75a70cf9 633 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
de7ef844 634 || (last_and_only_stmt (ws_entry_bb)
635 && last_and_only_stmt (par_exit_bb))))
773c5ba7 636 {
75a70cf9 637 gimple ws_stmt = last_stmt (ws_entry_bb);
61e47ac8 638
75a70cf9 639 if (region->inner->type == GIMPLE_OMP_FOR)
773c5ba7 640 {
641 /* If this is a combined parallel loop, we need to determine
642 whether or not to use the combined library calls. There
643 are two cases where we do not apply the transformation:
644 static loops and any kind of ordered loop. In the first
645 case, we already open code the loop so there is no need
646 to do anything else. In the latter case, the combined
647 parallel loop call would still need extra synchronization
648 to implement ordered semantics, so there would not be any
649 gain in using the combined call. */
75a70cf9 650 tree clauses = gimple_omp_for_clauses (ws_stmt);
773c5ba7 651 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
652 if (c == NULL
653 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
654 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
655 {
656 region->is_combined_parallel = false;
657 region->inner->is_combined_parallel = false;
658 return;
659 }
660 }
661
662 region->is_combined_parallel = true;
663 region->inner->is_combined_parallel = true;
61e47ac8 664 region->ws_args = get_ws_args_for (ws_stmt);
773c5ba7 665 }
1e8e9920 666}
667
773c5ba7 668
1e8e9920 669/* Return true if EXPR is variable sized. */
670
671static inline bool
1f1872fd 672is_variable_sized (const_tree expr)
1e8e9920 673{
674 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
675}
676
677/* Return true if DECL is a reference type. */
678
679static inline bool
680is_reference (tree decl)
681{
682 return lang_hooks.decls.omp_privatize_by_reference (decl);
683}
684
685/* Lookup variables in the decl or field splay trees. The "maybe" form
686 allows for the variable form to not have been entered, otherwise we
687 assert that the variable must have been entered. */
688
689static inline tree
690lookup_decl (tree var, omp_context *ctx)
691{
e3022db7 692 tree *n;
693 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
694 return *n;
1e8e9920 695}
696
697static inline tree
e8a588af 698maybe_lookup_decl (const_tree var, omp_context *ctx)
1e8e9920 699{
e3022db7 700 tree *n;
701 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
702 return n ? *n : NULL_TREE;
1e8e9920 703}
704
705static inline tree
706lookup_field (tree var, omp_context *ctx)
707{
708 splay_tree_node n;
709 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
710 return (tree) n->value;
711}
712
fd6481cf 713static inline tree
714lookup_sfield (tree var, omp_context *ctx)
715{
716 splay_tree_node n;
717 n = splay_tree_lookup (ctx->sfield_map
718 ? ctx->sfield_map : ctx->field_map,
719 (splay_tree_key) var);
720 return (tree) n->value;
721}
722
1e8e9920 723static inline tree
724maybe_lookup_field (tree var, omp_context *ctx)
725{
726 splay_tree_node n;
727 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
728 return n ? (tree) n->value : NULL_TREE;
729}
730
e8a588af 731/* Return true if DECL should be copied by pointer. SHARED_CTX is
732 the parallel context if DECL is to be shared. */
1e8e9920 733
734static bool
fd6481cf 735use_pointer_for_field (tree decl, omp_context *shared_ctx)
1e8e9920 736{
737 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
738 return true;
739
554f2707 740 /* We can only use copy-in/copy-out semantics for shared variables
1e8e9920 741 when we know the value is not accessible from an outer scope. */
e8a588af 742 if (shared_ctx)
1e8e9920 743 {
744 /* ??? Trivially accessible from anywhere. But why would we even
745 be passing an address in this case? Should we simply assert
746 this to be false, or should we have a cleanup pass that removes
747 these from the list of mappings? */
748 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
749 return true;
750
751 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
752 without analyzing the expression whether or not its location
753 is accessible to anyone else. In the case of nested parallel
754 regions it certainly may be. */
df2c34fc 755 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
1e8e9920 756 return true;
757
758 /* Do not use copy-in/copy-out for variables that have their
759 address taken. */
760 if (TREE_ADDRESSABLE (decl))
761 return true;
e8a588af 762
763 /* Disallow copy-in/out in nested parallel if
764 decl is shared in outer parallel, otherwise
765 each thread could store the shared variable
766 in its own copy-in location, making the
767 variable no longer really shared. */
768 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
769 {
770 omp_context *up;
771
772 for (up = shared_ctx->outer; up; up = up->outer)
0cb159ec 773 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
e8a588af 774 break;
775
0cb159ec 776 if (up)
e8a588af 777 {
778 tree c;
779
75a70cf9 780 for (c = gimple_omp_taskreg_clauses (up->stmt);
e8a588af 781 c; c = OMP_CLAUSE_CHAIN (c))
782 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
783 && OMP_CLAUSE_DECL (c) == decl)
784 break;
785
786 if (c)
787 return true;
788 }
789 }
fd6481cf 790
791 /* For tasks avoid using copy-in/out, unless they are readonly
792 (in which case just copy-in is used). As tasks can be
793 deferred or executed in different thread, when GOMP_task
794 returns, the task hasn't necessarily terminated. */
795 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
796 {
797 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
798 if (is_gimple_reg (outer))
799 {
800 /* Taking address of OUTER in lower_send_shared_vars
801 might need regimplification of everything that uses the
802 variable. */
803 if (!task_shared_vars)
804 task_shared_vars = BITMAP_ALLOC (NULL);
805 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
806 TREE_ADDRESSABLE (outer) = 1;
807 }
808 return true;
809 }
1e8e9920 810 }
811
812 return false;
813}
814
79acaae1 815/* Create a new VAR_DECL and copy information from VAR to it. */
1e8e9920 816
79acaae1 817tree
818copy_var_decl (tree var, tree name, tree type)
1e8e9920 819{
e60a6f7b 820 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
1e8e9920 821
822 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
79acaae1 823 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
8ea8de24 824 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
1e8e9920 825 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
826 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
79acaae1 827 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
1e8e9920 828 TREE_USED (copy) = 1;
1e8e9920 829 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
830
79acaae1 831 return copy;
832}
833
834/* Construct a new automatic decl similar to VAR. */
835
836static tree
837omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
838{
839 tree copy = copy_var_decl (var, name, type);
840
841 DECL_CONTEXT (copy) = current_function_decl;
1e8e9920 842 TREE_CHAIN (copy) = ctx->block_vars;
843 ctx->block_vars = copy;
844
845 return copy;
846}
847
848static tree
849omp_copy_decl_1 (tree var, omp_context *ctx)
850{
851 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
852}
853
854/* Build tree nodes to access the field for VAR on the receiver side. */
855
856static tree
857build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
858{
859 tree x, field = lookup_field (var, ctx);
860
861 /* If the receiver record type was remapped in the child function,
862 remap the field into the new record type. */
863 x = maybe_lookup_field (field, ctx);
864 if (x != NULL)
865 field = x;
866
867 x = build_fold_indirect_ref (ctx->receiver_decl);
868 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
869 if (by_ref)
870 x = build_fold_indirect_ref (x);
871
872 return x;
873}
874
875/* Build tree nodes to access VAR in the scope outer to CTX. In the case
876 of a parallel, this is a component reference; for workshare constructs
877 this is some variable. */
878
879static tree
880build_outer_var_ref (tree var, omp_context *ctx)
881{
882 tree x;
883
f49d7bb5 884 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1e8e9920 885 x = var;
886 else if (is_variable_sized (var))
887 {
888 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
889 x = build_outer_var_ref (x, ctx);
890 x = build_fold_indirect_ref (x);
891 }
fd6481cf 892 else if (is_taskreg_ctx (ctx))
1e8e9920 893 {
e8a588af 894 bool by_ref = use_pointer_for_field (var, NULL);
1e8e9920 895 x = build_receiver_ref (var, by_ref, ctx);
896 }
897 else if (ctx->outer)
898 x = lookup_decl (var, ctx->outer);
9438af57 899 else if (is_reference (var))
900 /* This can happen with orphaned constructs. If var is reference, it is
901 possible it is shared and as such valid. */
902 x = var;
1e8e9920 903 else
904 gcc_unreachable ();
905
906 if (is_reference (var))
907 x = build_fold_indirect_ref (x);
908
909 return x;
910}
911
912/* Build tree nodes to access the field for VAR on the sender side. */
913
914static tree
915build_sender_ref (tree var, omp_context *ctx)
916{
fd6481cf 917 tree field = lookup_sfield (var, ctx);
1e8e9920 918 return build3 (COMPONENT_REF, TREE_TYPE (field),
919 ctx->sender_decl, field, NULL);
920}
921
922/* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
923
924static void
fd6481cf 925install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1e8e9920 926{
fd6481cf 927 tree field, type, sfield = NULL_TREE;
1e8e9920 928
fd6481cf 929 gcc_assert ((mask & 1) == 0
930 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
931 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
932 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1e8e9920 933
934 type = TREE_TYPE (var);
935 if (by_ref)
936 type = build_pointer_type (type);
fd6481cf 937 else if ((mask & 3) == 1 && is_reference (var))
938 type = TREE_TYPE (type);
1e8e9920 939
e60a6f7b 940 field = build_decl (DECL_SOURCE_LOCATION (var),
941 FIELD_DECL, DECL_NAME (var), type);
1e8e9920 942
943 /* Remember what variable this field was created for. This does have a
944 side effect of making dwarf2out ignore this member, so for helpful
945 debugging we clear it later in delete_omp_context. */
946 DECL_ABSTRACT_ORIGIN (field) = var;
fd6481cf 947 if (type == TREE_TYPE (var))
948 {
949 DECL_ALIGN (field) = DECL_ALIGN (var);
950 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
951 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
952 }
953 else
954 DECL_ALIGN (field) = TYPE_ALIGN (type);
1e8e9920 955
fd6481cf 956 if ((mask & 3) == 3)
957 {
958 insert_field_into_struct (ctx->record_type, field);
959 if (ctx->srecord_type)
960 {
e60a6f7b 961 sfield = build_decl (DECL_SOURCE_LOCATION (var),
962 FIELD_DECL, DECL_NAME (var), type);
fd6481cf 963 DECL_ABSTRACT_ORIGIN (sfield) = var;
964 DECL_ALIGN (sfield) = DECL_ALIGN (field);
965 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
966 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
967 insert_field_into_struct (ctx->srecord_type, sfield);
968 }
969 }
970 else
971 {
972 if (ctx->srecord_type == NULL_TREE)
973 {
974 tree t;
975
976 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
977 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
978 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
979 {
e60a6f7b 980 sfield = build_decl (DECL_SOURCE_LOCATION (var),
981 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
fd6481cf 982 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
983 insert_field_into_struct (ctx->srecord_type, sfield);
984 splay_tree_insert (ctx->sfield_map,
985 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
986 (splay_tree_value) sfield);
987 }
988 }
989 sfield = field;
990 insert_field_into_struct ((mask & 1) ? ctx->record_type
991 : ctx->srecord_type, field);
992 }
1e8e9920 993
fd6481cf 994 if (mask & 1)
995 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
996 (splay_tree_value) field);
997 if ((mask & 2) && ctx->sfield_map)
998 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
999 (splay_tree_value) sfield);
1e8e9920 1000}
1001
1002static tree
1003install_var_local (tree var, omp_context *ctx)
1004{
1005 tree new_var = omp_copy_decl_1 (var, ctx);
1006 insert_decl_map (&ctx->cb, var, new_var);
1007 return new_var;
1008}
1009
1010/* Adjust the replacement for DECL in CTX for the new context. This means
1011 copying the DECL_VALUE_EXPR, and fixing up the type. */
1012
1013static void
1014fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1015{
1016 tree new_decl, size;
1017
1018 new_decl = lookup_decl (decl, ctx);
1019
1020 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1021
1022 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1023 && DECL_HAS_VALUE_EXPR_P (decl))
1024 {
1025 tree ve = DECL_VALUE_EXPR (decl);
75a70cf9 1026 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1e8e9920 1027 SET_DECL_VALUE_EXPR (new_decl, ve);
1028 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1029 }
1030
1031 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1032 {
1033 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1034 if (size == error_mark_node)
1035 size = TYPE_SIZE (TREE_TYPE (new_decl));
1036 DECL_SIZE (new_decl) = size;
1037
1038 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1039 if (size == error_mark_node)
1040 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1041 DECL_SIZE_UNIT (new_decl) = size;
1042 }
1043}
1044
1045/* The callback for remap_decl. Search all containing contexts for a
1046 mapping of the variable; this avoids having to duplicate the splay
1047 tree ahead of time. We know a mapping doesn't already exist in the
1048 given context. Create new mappings to implement default semantics. */
1049
1050static tree
1051omp_copy_decl (tree var, copy_body_data *cb)
1052{
1053 omp_context *ctx = (omp_context *) cb;
1054 tree new_var;
1055
1e8e9920 1056 if (TREE_CODE (var) == LABEL_DECL)
1057 {
e60a6f7b 1058 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
773c5ba7 1059 DECL_CONTEXT (new_var) = current_function_decl;
1e8e9920 1060 insert_decl_map (&ctx->cb, var, new_var);
1061 return new_var;
1062 }
1063
fd6481cf 1064 while (!is_taskreg_ctx (ctx))
1e8e9920 1065 {
1066 ctx = ctx->outer;
1067 if (ctx == NULL)
1068 return var;
1069 new_var = maybe_lookup_decl (var, ctx);
1070 if (new_var)
1071 return new_var;
1072 }
1073
f49d7bb5 1074 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1075 return var;
1076
1e8e9920 1077 return error_mark_node;
1078}
1079
773c5ba7 1080
1081/* Return the parallel region associated with STMT. */
1082
773c5ba7 1083/* Debugging dumps for parallel regions. */
1084void dump_omp_region (FILE *, struct omp_region *, int);
1085void debug_omp_region (struct omp_region *);
1086void debug_all_omp_regions (void);
1087
1088/* Dump the parallel region tree rooted at REGION. */
1089
1090void
1091dump_omp_region (FILE *file, struct omp_region *region, int indent)
1092{
61e47ac8 1093 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
75a70cf9 1094 gimple_code_name[region->type]);
773c5ba7 1095
1096 if (region->inner)
1097 dump_omp_region (file, region->inner, indent + 4);
1098
61e47ac8 1099 if (region->cont)
1100 {
75a70cf9 1101 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
61e47ac8 1102 region->cont->index);
1103 }
48e1416a 1104
773c5ba7 1105 if (region->exit)
75a70cf9 1106 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
61e47ac8 1107 region->exit->index);
773c5ba7 1108 else
61e47ac8 1109 fprintf (file, "%*s[no exit marker]\n", indent, "");
773c5ba7 1110
1111 if (region->next)
61e47ac8 1112 dump_omp_region (file, region->next, indent);
773c5ba7 1113}
1114
4b987fac 1115DEBUG_FUNCTION void
773c5ba7 1116debug_omp_region (struct omp_region *region)
1117{
1118 dump_omp_region (stderr, region, 0);
1119}
1120
4b987fac 1121DEBUG_FUNCTION void
773c5ba7 1122debug_all_omp_regions (void)
1123{
1124 dump_omp_region (stderr, root_omp_region, 0);
1125}
1126
1127
1128/* Create a new parallel region starting at STMT inside region PARENT. */
1129
61e47ac8 1130struct omp_region *
75a70cf9 1131new_omp_region (basic_block bb, enum gimple_code type,
1132 struct omp_region *parent)
773c5ba7 1133{
4077bf7a 1134 struct omp_region *region = XCNEW (struct omp_region);
773c5ba7 1135
1136 region->outer = parent;
61e47ac8 1137 region->entry = bb;
1138 region->type = type;
773c5ba7 1139
1140 if (parent)
1141 {
1142 /* This is a nested region. Add it to the list of inner
1143 regions in PARENT. */
1144 region->next = parent->inner;
1145 parent->inner = region;
1146 }
61e47ac8 1147 else
773c5ba7 1148 {
1149 /* This is a toplevel region. Add it to the list of toplevel
1150 regions in ROOT_OMP_REGION. */
1151 region->next = root_omp_region;
1152 root_omp_region = region;
1153 }
61e47ac8 1154
1155 return region;
1156}
1157
1158/* Release the memory associated with the region tree rooted at REGION. */
1159
1160static void
1161free_omp_region_1 (struct omp_region *region)
1162{
1163 struct omp_region *i, *n;
1164
1165 for (i = region->inner; i ; i = n)
773c5ba7 1166 {
61e47ac8 1167 n = i->next;
1168 free_omp_region_1 (i);
773c5ba7 1169 }
1170
61e47ac8 1171 free (region);
1172}
773c5ba7 1173
61e47ac8 1174/* Release the memory for the entire omp region tree. */
1175
1176void
1177free_omp_regions (void)
1178{
1179 struct omp_region *r, *n;
1180 for (r = root_omp_region; r ; r = n)
1181 {
1182 n = r->next;
1183 free_omp_region_1 (r);
1184 }
1185 root_omp_region = NULL;
773c5ba7 1186}
1187
1188
1e8e9920 1189/* Create a new context, with OUTER_CTX being the surrounding context. */
1190
1191static omp_context *
75a70cf9 1192new_omp_context (gimple stmt, omp_context *outer_ctx)
1e8e9920 1193{
1194 omp_context *ctx = XCNEW (omp_context);
1195
1196 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1197 (splay_tree_value) ctx);
1198 ctx->stmt = stmt;
1199
1200 if (outer_ctx)
1201 {
1202 ctx->outer = outer_ctx;
1203 ctx->cb = outer_ctx->cb;
1204 ctx->cb.block = NULL;
1205 ctx->depth = outer_ctx->depth + 1;
1206 }
1207 else
1208 {
1209 ctx->cb.src_fn = current_function_decl;
1210 ctx->cb.dst_fn = current_function_decl;
1211 ctx->cb.src_node = cgraph_node (current_function_decl);
1212 ctx->cb.dst_node = ctx->cb.src_node;
1213 ctx->cb.src_cfun = cfun;
1214 ctx->cb.copy_decl = omp_copy_decl;
e38def9c 1215 ctx->cb.eh_lp_nr = 0;
1e8e9920 1216 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1217 ctx->depth = 1;
1218 }
1219
e3022db7 1220 ctx->cb.decl_map = pointer_map_create ();
1e8e9920 1221
1222 return ctx;
1223}
1224
75a70cf9 1225static gimple_seq maybe_catch_exception (gimple_seq);
f6430caa 1226
1227/* Finalize task copyfn. */
1228
1229static void
75a70cf9 1230finalize_task_copyfn (gimple task_stmt)
f6430caa 1231{
1232 struct function *child_cfun;
1233 tree child_fn, old_fn;
75a70cf9 1234 gimple_seq seq, new_seq;
1235 gimple bind;
f6430caa 1236
75a70cf9 1237 child_fn = gimple_omp_task_copy_fn (task_stmt);
f6430caa 1238 if (child_fn == NULL_TREE)
1239 return;
1240
1241 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1242
1243 /* Inform the callgraph about the new function. */
1244 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1245 = cfun->curr_properties;
1246
1247 old_fn = current_function_decl;
1248 push_cfun (child_cfun);
1249 current_function_decl = child_fn;
75a70cf9 1250 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1251 seq = gimple_seq_alloc ();
1252 gimple_seq_add_stmt (&seq, bind);
1253 new_seq = maybe_catch_exception (seq);
1254 if (new_seq != seq)
1255 {
1256 bind = gimple_build_bind (NULL, new_seq, NULL);
1257 seq = gimple_seq_alloc ();
1258 gimple_seq_add_stmt (&seq, bind);
1259 }
1260 gimple_set_body (child_fn, seq);
f6430caa 1261 pop_cfun ();
1262 current_function_decl = old_fn;
1263
1264 cgraph_add_new_function (child_fn, false);
1265}
1266
1e8e9920 1267/* Destroy a omp_context data structures. Called through the splay tree
1268 value delete callback. */
1269
1270static void
1271delete_omp_context (splay_tree_value value)
1272{
1273 omp_context *ctx = (omp_context *) value;
1274
e3022db7 1275 pointer_map_destroy (ctx->cb.decl_map);
1e8e9920 1276
1277 if (ctx->field_map)
1278 splay_tree_delete (ctx->field_map);
fd6481cf 1279 if (ctx->sfield_map)
1280 splay_tree_delete (ctx->sfield_map);
1e8e9920 1281
1282 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1283 it produces corrupt debug information. */
1284 if (ctx->record_type)
1285 {
1286 tree t;
1287 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1288 DECL_ABSTRACT_ORIGIN (t) = NULL;
1289 }
fd6481cf 1290 if (ctx->srecord_type)
1291 {
1292 tree t;
1293 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1294 DECL_ABSTRACT_ORIGIN (t) = NULL;
1295 }
1e8e9920 1296
f6430caa 1297 if (is_task_ctx (ctx))
1298 finalize_task_copyfn (ctx->stmt);
1299
1e8e9920 1300 XDELETE (ctx);
1301}
1302
1303/* Fix up RECEIVER_DECL with a type that has been remapped to the child
1304 context. */
1305
1306static void
1307fixup_child_record_type (omp_context *ctx)
1308{
1309 tree f, type = ctx->record_type;
1310
1311 /* ??? It isn't sufficient to just call remap_type here, because
1312 variably_modified_type_p doesn't work the way we expect for
1313 record types. Testing each field for whether it needs remapping
1314 and creating a new record by hand works, however. */
1315 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1316 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1317 break;
1318 if (f)
1319 {
1320 tree name, new_fields = NULL;
1321
1322 type = lang_hooks.types.make_type (RECORD_TYPE);
1323 name = DECL_NAME (TYPE_NAME (ctx->record_type));
e60a6f7b 1324 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1325 TYPE_DECL, name, type);
1e8e9920 1326 TYPE_NAME (type) = name;
1327
1328 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1329 {
1330 tree new_f = copy_node (f);
1331 DECL_CONTEXT (new_f) = type;
1332 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1333 TREE_CHAIN (new_f) = new_fields;
75a70cf9 1334 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1335 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1336 &ctx->cb, NULL);
1337 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1338 &ctx->cb, NULL);
1e8e9920 1339 new_fields = new_f;
1340
1341 /* Arrange to be able to look up the receiver field
1342 given the sender field. */
1343 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1344 (splay_tree_value) new_f);
1345 }
1346 TYPE_FIELDS (type) = nreverse (new_fields);
1347 layout_type (type);
1348 }
1349
1350 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1351}
1352
1353/* Instantiate decls as necessary in CTX to satisfy the data sharing
1354 specified by CLAUSES. */
1355
1356static void
1357scan_sharing_clauses (tree clauses, omp_context *ctx)
1358{
1359 tree c, decl;
1360 bool scan_array_reductions = false;
1361
1362 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1363 {
1364 bool by_ref;
1365
55d6e7cd 1366 switch (OMP_CLAUSE_CODE (c))
1e8e9920 1367 {
1368 case OMP_CLAUSE_PRIVATE:
1369 decl = OMP_CLAUSE_DECL (c);
fd6481cf 1370 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1371 goto do_private;
1372 else if (!is_variable_sized (decl))
1e8e9920 1373 install_var_local (decl, ctx);
1374 break;
1375
1376 case OMP_CLAUSE_SHARED:
fd6481cf 1377 gcc_assert (is_taskreg_ctx (ctx));
1e8e9920 1378 decl = OMP_CLAUSE_DECL (c);
e7327393 1379 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1380 || !is_variable_sized (decl));
f49d7bb5 1381 /* Global variables don't need to be copied,
1382 the receiver side will use them directly. */
1383 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1384 break;
fd6481cf 1385 by_ref = use_pointer_for_field (decl, ctx);
1e8e9920 1386 if (! TREE_READONLY (decl)
1387 || TREE_ADDRESSABLE (decl)
1388 || by_ref
1389 || is_reference (decl))
1390 {
fd6481cf 1391 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1392 install_var_local (decl, ctx);
1393 break;
1394 }
1395 /* We don't need to copy const scalar vars back. */
55d6e7cd 1396 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1e8e9920 1397 goto do_private;
1398
1399 case OMP_CLAUSE_LASTPRIVATE:
1400 /* Let the corresponding firstprivate clause create
1401 the variable. */
1402 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1403 break;
1404 /* FALLTHRU */
1405
1406 case OMP_CLAUSE_FIRSTPRIVATE:
1407 case OMP_CLAUSE_REDUCTION:
1408 decl = OMP_CLAUSE_DECL (c);
1409 do_private:
1410 if (is_variable_sized (decl))
1e8e9920 1411 {
fd6481cf 1412 if (is_task_ctx (ctx))
1413 install_var_field (decl, false, 1, ctx);
1414 break;
1415 }
1416 else if (is_taskreg_ctx (ctx))
1417 {
1418 bool global
1419 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
e8a588af 1420 by_ref = use_pointer_for_field (decl, NULL);
fd6481cf 1421
1422 if (is_task_ctx (ctx)
1423 && (global || by_ref || is_reference (decl)))
1424 {
1425 install_var_field (decl, false, 1, ctx);
1426 if (!global)
1427 install_var_field (decl, by_ref, 2, ctx);
1428 }
1429 else if (!global)
1430 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1431 }
1432 install_var_local (decl, ctx);
1433 break;
1434
1435 case OMP_CLAUSE_COPYPRIVATE:
1e8e9920 1436 case OMP_CLAUSE_COPYIN:
1437 decl = OMP_CLAUSE_DECL (c);
e8a588af 1438 by_ref = use_pointer_for_field (decl, NULL);
fd6481cf 1439 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1440 break;
1441
1442 case OMP_CLAUSE_DEFAULT:
1443 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1444 break;
1445
1446 case OMP_CLAUSE_IF:
1447 case OMP_CLAUSE_NUM_THREADS:
1448 case OMP_CLAUSE_SCHEDULE:
1449 if (ctx->outer)
75a70cf9 1450 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1e8e9920 1451 break;
1452
1453 case OMP_CLAUSE_NOWAIT:
1454 case OMP_CLAUSE_ORDERED:
fd6481cf 1455 case OMP_CLAUSE_COLLAPSE:
1456 case OMP_CLAUSE_UNTIED:
1e8e9920 1457 break;
1458
1459 default:
1460 gcc_unreachable ();
1461 }
1462 }
1463
1464 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1465 {
55d6e7cd 1466 switch (OMP_CLAUSE_CODE (c))
1e8e9920 1467 {
1468 case OMP_CLAUSE_LASTPRIVATE:
1469 /* Let the corresponding firstprivate clause create
1470 the variable. */
75a70cf9 1471 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
fd6481cf 1472 scan_array_reductions = true;
1e8e9920 1473 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1474 break;
1475 /* FALLTHRU */
1476
1477 case OMP_CLAUSE_PRIVATE:
1478 case OMP_CLAUSE_FIRSTPRIVATE:
1479 case OMP_CLAUSE_REDUCTION:
1480 decl = OMP_CLAUSE_DECL (c);
1481 if (is_variable_sized (decl))
1482 install_var_local (decl, ctx);
1483 fixup_remapped_decl (decl, ctx,
55d6e7cd 1484 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1e8e9920 1485 && OMP_CLAUSE_PRIVATE_DEBUG (c));
55d6e7cd 1486 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1e8e9920 1487 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1488 scan_array_reductions = true;
1489 break;
1490
1491 case OMP_CLAUSE_SHARED:
1492 decl = OMP_CLAUSE_DECL (c);
f49d7bb5 1493 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1494 fixup_remapped_decl (decl, ctx, false);
1e8e9920 1495 break;
1496
1497 case OMP_CLAUSE_COPYPRIVATE:
1498 case OMP_CLAUSE_COPYIN:
1499 case OMP_CLAUSE_DEFAULT:
1500 case OMP_CLAUSE_IF:
1501 case OMP_CLAUSE_NUM_THREADS:
1502 case OMP_CLAUSE_SCHEDULE:
1503 case OMP_CLAUSE_NOWAIT:
1504 case OMP_CLAUSE_ORDERED:
fd6481cf 1505 case OMP_CLAUSE_COLLAPSE:
1506 case OMP_CLAUSE_UNTIED:
1e8e9920 1507 break;
1508
1509 default:
1510 gcc_unreachable ();
1511 }
1512 }
1513
1514 if (scan_array_reductions)
1515 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
55d6e7cd 1516 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1e8e9920 1517 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1518 {
75a70cf9 1519 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1520 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1e8e9920 1521 }
fd6481cf 1522 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
75a70cf9 1523 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1524 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1e8e9920 1525}
1526
1527/* Create a new name for omp child function. Returns an identifier. */
1528
1529static GTY(()) unsigned int tmp_ompfn_id_num;
1530
1531static tree
fd6481cf 1532create_omp_child_function_name (bool task_copy)
1e8e9920 1533{
a70a5e2c 1534 return (clone_function_name (current_function_decl,
1535 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1e8e9920 1536}
1537
1538/* Build a decl for the omp child function. It'll not contain a body
1539 yet, just the bare decl. */
1540
1541static void
fd6481cf 1542create_omp_child_function (omp_context *ctx, bool task_copy)
1e8e9920 1543{
1544 tree decl, type, name, t;
1545
fd6481cf 1546 name = create_omp_child_function_name (task_copy);
1547 if (task_copy)
1548 type = build_function_type_list (void_type_node, ptr_type_node,
1549 ptr_type_node, NULL_TREE);
1550 else
1551 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1e8e9920 1552
e60a6f7b 1553 decl = build_decl (gimple_location (ctx->stmt),
1554 FUNCTION_DECL, name, type);
1e8e9920 1555
fd6481cf 1556 if (!task_copy)
1557 ctx->cb.dst_fn = decl;
1558 else
75a70cf9 1559 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1e8e9920 1560
1561 TREE_STATIC (decl) = 1;
1562 TREE_USED (decl) = 1;
1563 DECL_ARTIFICIAL (decl) = 1;
1564 DECL_IGNORED_P (decl) = 0;
1565 TREE_PUBLIC (decl) = 0;
1566 DECL_UNINLINABLE (decl) = 1;
1567 DECL_EXTERNAL (decl) = 0;
1568 DECL_CONTEXT (decl) = NULL_TREE;
773c5ba7 1569 DECL_INITIAL (decl) = make_node (BLOCK);
1e8e9920 1570
e60a6f7b 1571 t = build_decl (DECL_SOURCE_LOCATION (decl),
1572 RESULT_DECL, NULL_TREE, void_type_node);
1e8e9920 1573 DECL_ARTIFICIAL (t) = 1;
1574 DECL_IGNORED_P (t) = 1;
8e5b4ed6 1575 DECL_CONTEXT (t) = decl;
1e8e9920 1576 DECL_RESULT (decl) = t;
1577
e60a6f7b 1578 t = build_decl (DECL_SOURCE_LOCATION (decl),
1579 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1e8e9920 1580 DECL_ARTIFICIAL (t) = 1;
1581 DECL_ARG_TYPE (t) = ptr_type_node;
773c5ba7 1582 DECL_CONTEXT (t) = current_function_decl;
1e8e9920 1583 TREE_USED (t) = 1;
1584 DECL_ARGUMENTS (decl) = t;
fd6481cf 1585 if (!task_copy)
1586 ctx->receiver_decl = t;
1587 else
1588 {
e60a6f7b 1589 t = build_decl (DECL_SOURCE_LOCATION (decl),
1590 PARM_DECL, get_identifier (".omp_data_o"),
fd6481cf 1591 ptr_type_node);
1592 DECL_ARTIFICIAL (t) = 1;
1593 DECL_ARG_TYPE (t) = ptr_type_node;
1594 DECL_CONTEXT (t) = current_function_decl;
1595 TREE_USED (t) = 1;
86f2ad37 1596 TREE_ADDRESSABLE (t) = 1;
fd6481cf 1597 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1598 DECL_ARGUMENTS (decl) = t;
1599 }
1e8e9920 1600
48e1416a 1601 /* Allocate memory for the function structure. The call to
773c5ba7 1602 allocate_struct_function clobbers CFUN, so we need to restore
1e8e9920 1603 it afterward. */
87d4aa85 1604 push_struct_function (decl);
75a70cf9 1605 cfun->function_end_locus = gimple_location (ctx->stmt);
87d4aa85 1606 pop_cfun ();
1e8e9920 1607}
1608
1e8e9920 1609
1610/* Scan an OpenMP parallel directive. */
1611
1612static void
75a70cf9 1613scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1e8e9920 1614{
1615 omp_context *ctx;
1616 tree name;
75a70cf9 1617 gimple stmt = gsi_stmt (*gsi);
1e8e9920 1618
1619 /* Ignore parallel directives with empty bodies, unless there
1620 are copyin clauses. */
1621 if (optimize > 0
75a70cf9 1622 && empty_body_p (gimple_omp_body (stmt))
1623 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1624 OMP_CLAUSE_COPYIN) == NULL)
1e8e9920 1625 {
75a70cf9 1626 gsi_replace (gsi, gimple_build_nop (), false);
1e8e9920 1627 return;
1628 }
1629
75a70cf9 1630 ctx = new_omp_context (stmt, outer_ctx);
fd6481cf 1631 if (taskreg_nesting_level > 1)
773c5ba7 1632 ctx->is_nested = true;
1e8e9920 1633 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1e8e9920 1634 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1635 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1e8e9920 1636 name = create_tmp_var_name (".omp_data_s");
e60a6f7b 1637 name = build_decl (gimple_location (stmt),
1638 TYPE_DECL, name, ctx->record_type);
1e8e9920 1639 TYPE_NAME (ctx->record_type) = name;
fd6481cf 1640 create_omp_child_function (ctx, false);
75a70cf9 1641 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1e8e9920 1642
75a70cf9 1643 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1644 scan_omp (gimple_omp_body (stmt), ctx);
1e8e9920 1645
1646 if (TYPE_FIELDS (ctx->record_type) == NULL)
1647 ctx->record_type = ctx->receiver_decl = NULL;
1648 else
1649 {
1650 layout_type (ctx->record_type);
1651 fixup_child_record_type (ctx);
1652 }
1653}
1654
fd6481cf 1655/* Scan an OpenMP task directive. */
1656
1657static void
75a70cf9 1658scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
fd6481cf 1659{
1660 omp_context *ctx;
75a70cf9 1661 tree name, t;
1662 gimple stmt = gsi_stmt (*gsi);
389dd41b 1663 location_t loc = gimple_location (stmt);
fd6481cf 1664
1665 /* Ignore task directives with empty bodies. */
1666 if (optimize > 0
75a70cf9 1667 && empty_body_p (gimple_omp_body (stmt)))
fd6481cf 1668 {
75a70cf9 1669 gsi_replace (gsi, gimple_build_nop (), false);
fd6481cf 1670 return;
1671 }
1672
75a70cf9 1673 ctx = new_omp_context (stmt, outer_ctx);
fd6481cf 1674 if (taskreg_nesting_level > 1)
1675 ctx->is_nested = true;
1676 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1677 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1678 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1679 name = create_tmp_var_name (".omp_data_s");
e60a6f7b 1680 name = build_decl (gimple_location (stmt),
1681 TYPE_DECL, name, ctx->record_type);
fd6481cf 1682 TYPE_NAME (ctx->record_type) = name;
1683 create_omp_child_function (ctx, false);
75a70cf9 1684 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
fd6481cf 1685
75a70cf9 1686 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
fd6481cf 1687
1688 if (ctx->srecord_type)
1689 {
1690 name = create_tmp_var_name (".omp_data_a");
e60a6f7b 1691 name = build_decl (gimple_location (stmt),
1692 TYPE_DECL, name, ctx->srecord_type);
fd6481cf 1693 TYPE_NAME (ctx->srecord_type) = name;
1694 create_omp_child_function (ctx, true);
1695 }
1696
75a70cf9 1697 scan_omp (gimple_omp_body (stmt), ctx);
fd6481cf 1698
1699 if (TYPE_FIELDS (ctx->record_type) == NULL)
1700 {
1701 ctx->record_type = ctx->receiver_decl = NULL;
75a70cf9 1702 t = build_int_cst (long_integer_type_node, 0);
1703 gimple_omp_task_set_arg_size (stmt, t);
1704 t = build_int_cst (long_integer_type_node, 1);
1705 gimple_omp_task_set_arg_align (stmt, t);
fd6481cf 1706 }
1707 else
1708 {
1709 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1710 /* Move VLA fields to the end. */
1711 p = &TYPE_FIELDS (ctx->record_type);
1712 while (*p)
1713 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1714 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1715 {
1716 *q = *p;
1717 *p = TREE_CHAIN (*p);
1718 TREE_CHAIN (*q) = NULL_TREE;
1719 q = &TREE_CHAIN (*q);
1720 }
1721 else
1722 p = &TREE_CHAIN (*p);
1723 *p = vla_fields;
1724 layout_type (ctx->record_type);
1725 fixup_child_record_type (ctx);
1726 if (ctx->srecord_type)
1727 layout_type (ctx->srecord_type);
389dd41b 1728 t = fold_convert_loc (loc, long_integer_type_node,
fd6481cf 1729 TYPE_SIZE_UNIT (ctx->record_type));
75a70cf9 1730 gimple_omp_task_set_arg_size (stmt, t);
1731 t = build_int_cst (long_integer_type_node,
fd6481cf 1732 TYPE_ALIGN_UNIT (ctx->record_type));
75a70cf9 1733 gimple_omp_task_set_arg_align (stmt, t);
fd6481cf 1734 }
1735}
1736
1e8e9920 1737
773c5ba7 1738/* Scan an OpenMP loop directive. */
1e8e9920 1739
1740static void
75a70cf9 1741scan_omp_for (gimple stmt, omp_context *outer_ctx)
1e8e9920 1742{
773c5ba7 1743 omp_context *ctx;
75a70cf9 1744 size_t i;
1e8e9920 1745
773c5ba7 1746 ctx = new_omp_context (stmt, outer_ctx);
1e8e9920 1747
75a70cf9 1748 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1e8e9920 1749
75a70cf9 1750 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1751 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
fd6481cf 1752 {
75a70cf9 1753 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1754 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1755 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1756 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
fd6481cf 1757 }
75a70cf9 1758 scan_omp (gimple_omp_body (stmt), ctx);
1e8e9920 1759}
1760
1761/* Scan an OpenMP sections directive. */
1762
1763static void
75a70cf9 1764scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1e8e9920 1765{
1e8e9920 1766 omp_context *ctx;
1767
1768 ctx = new_omp_context (stmt, outer_ctx);
75a70cf9 1769 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1770 scan_omp (gimple_omp_body (stmt), ctx);
1e8e9920 1771}
1772
1773/* Scan an OpenMP single directive. */
1774
1775static void
75a70cf9 1776scan_omp_single (gimple stmt, omp_context *outer_ctx)
1e8e9920 1777{
1e8e9920 1778 omp_context *ctx;
1779 tree name;
1780
1781 ctx = new_omp_context (stmt, outer_ctx);
1782 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1783 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1784 name = create_tmp_var_name (".omp_copy_s");
e60a6f7b 1785 name = build_decl (gimple_location (stmt),
1786 TYPE_DECL, name, ctx->record_type);
1e8e9920 1787 TYPE_NAME (ctx->record_type) = name;
1788
75a70cf9 1789 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1790 scan_omp (gimple_omp_body (stmt), ctx);
1e8e9920 1791
1792 if (TYPE_FIELDS (ctx->record_type) == NULL)
1793 ctx->record_type = NULL;
1794 else
1795 layout_type (ctx->record_type);
1796}
1797
1e8e9920 1798
c1d127dd 1799/* Check OpenMP nesting restrictions. */
1800static void
75a70cf9 1801check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
c1d127dd 1802{
75a70cf9 1803 switch (gimple_code (stmt))
c1d127dd 1804 {
75a70cf9 1805 case GIMPLE_OMP_FOR:
1806 case GIMPLE_OMP_SECTIONS:
1807 case GIMPLE_OMP_SINGLE:
1808 case GIMPLE_CALL:
c1d127dd 1809 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1810 switch (gimple_code (ctx->stmt))
c1d127dd 1811 {
75a70cf9 1812 case GIMPLE_OMP_FOR:
1813 case GIMPLE_OMP_SECTIONS:
1814 case GIMPLE_OMP_SINGLE:
1815 case GIMPLE_OMP_ORDERED:
1816 case GIMPLE_OMP_MASTER:
1817 case GIMPLE_OMP_TASK:
1818 if (is_gimple_call (stmt))
fd6481cf 1819 {
1820 warning (0, "barrier region may not be closely nested inside "
1821 "of work-sharing, critical, ordered, master or "
1822 "explicit task region");
1823 return;
1824 }
c1d127dd 1825 warning (0, "work-sharing region may not be closely nested inside "
fd6481cf 1826 "of work-sharing, critical, ordered, master or explicit "
1827 "task region");
c1d127dd 1828 return;
75a70cf9 1829 case GIMPLE_OMP_PARALLEL:
c1d127dd 1830 return;
1831 default:
1832 break;
1833 }
1834 break;
75a70cf9 1835 case GIMPLE_OMP_MASTER:
c1d127dd 1836 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1837 switch (gimple_code (ctx->stmt))
c1d127dd 1838 {
75a70cf9 1839 case GIMPLE_OMP_FOR:
1840 case GIMPLE_OMP_SECTIONS:
1841 case GIMPLE_OMP_SINGLE:
1842 case GIMPLE_OMP_TASK:
c1d127dd 1843 warning (0, "master region may not be closely nested inside "
fd6481cf 1844 "of work-sharing or explicit task region");
c1d127dd 1845 return;
75a70cf9 1846 case GIMPLE_OMP_PARALLEL:
c1d127dd 1847 return;
1848 default:
1849 break;
1850 }
1851 break;
75a70cf9 1852 case GIMPLE_OMP_ORDERED:
c1d127dd 1853 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1854 switch (gimple_code (ctx->stmt))
c1d127dd 1855 {
75a70cf9 1856 case GIMPLE_OMP_CRITICAL:
1857 case GIMPLE_OMP_TASK:
c1d127dd 1858 warning (0, "ordered region may not be closely nested inside "
fd6481cf 1859 "of critical or explicit task region");
c1d127dd 1860 return;
75a70cf9 1861 case GIMPLE_OMP_FOR:
1862 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
c1d127dd 1863 OMP_CLAUSE_ORDERED) == NULL)
1864 warning (0, "ordered region must be closely nested inside "
1865 "a loop region with an ordered clause");
1866 return;
75a70cf9 1867 case GIMPLE_OMP_PARALLEL:
c1d127dd 1868 return;
1869 default:
1870 break;
1871 }
1872 break;
75a70cf9 1873 case GIMPLE_OMP_CRITICAL:
c1d127dd 1874 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1875 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1876 && (gimple_omp_critical_name (stmt)
1877 == gimple_omp_critical_name (ctx->stmt)))
c1d127dd 1878 {
1879 warning (0, "critical region may not be nested inside a critical "
1880 "region with the same name");
1881 return;
1882 }
1883 break;
1884 default:
1885 break;
1886 }
1887}
1888
1889
75a70cf9 1890/* Helper function scan_omp.
1891
1892 Callback for walk_tree or operators in walk_gimple_stmt used to
1893 scan for OpenMP directives in TP. */
1e8e9920 1894
1895static tree
75a70cf9 1896scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1e8e9920 1897{
4077bf7a 1898 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1899 omp_context *ctx = (omp_context *) wi->info;
1e8e9920 1900 tree t = *tp;
1901
75a70cf9 1902 switch (TREE_CODE (t))
1903 {
1904 case VAR_DECL:
1905 case PARM_DECL:
1906 case LABEL_DECL:
1907 case RESULT_DECL:
1908 if (ctx)
1909 *tp = remap_decl (t, &ctx->cb);
1910 break;
1911
1912 default:
1913 if (ctx && TYPE_P (t))
1914 *tp = remap_type (t, &ctx->cb);
1915 else if (!DECL_P (t))
7cf869dd 1916 {
1917 *walk_subtrees = 1;
1918 if (ctx)
1919 TREE_TYPE (t) = remap_type (TREE_TYPE (t), &ctx->cb);
1920 }
75a70cf9 1921 break;
1922 }
1923
1924 return NULL_TREE;
1925}
1926
1927
1928/* Helper function for scan_omp.
1929
1930 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1931 the current statement in GSI. */
1932
1933static tree
1934scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1935 struct walk_stmt_info *wi)
1936{
1937 gimple stmt = gsi_stmt (*gsi);
1938 omp_context *ctx = (omp_context *) wi->info;
1939
1940 if (gimple_has_location (stmt))
1941 input_location = gimple_location (stmt);
1e8e9920 1942
c1d127dd 1943 /* Check the OpenMP nesting restrictions. */
fd6481cf 1944 if (ctx != NULL)
1945 {
75a70cf9 1946 if (is_gimple_omp (stmt))
1947 check_omp_nesting_restrictions (stmt, ctx);
1948 else if (is_gimple_call (stmt))
fd6481cf 1949 {
75a70cf9 1950 tree fndecl = gimple_call_fndecl (stmt);
fd6481cf 1951 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1952 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
75a70cf9 1953 check_omp_nesting_restrictions (stmt, ctx);
fd6481cf 1954 }
1955 }
c1d127dd 1956
75a70cf9 1957 *handled_ops_p = true;
1958
1959 switch (gimple_code (stmt))
1e8e9920 1960 {
75a70cf9 1961 case GIMPLE_OMP_PARALLEL:
fd6481cf 1962 taskreg_nesting_level++;
75a70cf9 1963 scan_omp_parallel (gsi, ctx);
fd6481cf 1964 taskreg_nesting_level--;
1965 break;
1966
75a70cf9 1967 case GIMPLE_OMP_TASK:
fd6481cf 1968 taskreg_nesting_level++;
75a70cf9 1969 scan_omp_task (gsi, ctx);
fd6481cf 1970 taskreg_nesting_level--;
1e8e9920 1971 break;
1972
75a70cf9 1973 case GIMPLE_OMP_FOR:
1974 scan_omp_for (stmt, ctx);
1e8e9920 1975 break;
1976
75a70cf9 1977 case GIMPLE_OMP_SECTIONS:
1978 scan_omp_sections (stmt, ctx);
1e8e9920 1979 break;
1980
75a70cf9 1981 case GIMPLE_OMP_SINGLE:
1982 scan_omp_single (stmt, ctx);
1e8e9920 1983 break;
1984
75a70cf9 1985 case GIMPLE_OMP_SECTION:
1986 case GIMPLE_OMP_MASTER:
1987 case GIMPLE_OMP_ORDERED:
1988 case GIMPLE_OMP_CRITICAL:
1989 ctx = new_omp_context (stmt, ctx);
1990 scan_omp (gimple_omp_body (stmt), ctx);
1e8e9920 1991 break;
1992
75a70cf9 1993 case GIMPLE_BIND:
1e8e9920 1994 {
1995 tree var;
1e8e9920 1996
75a70cf9 1997 *handled_ops_p = false;
1998 if (ctx)
1999 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
2000 insert_decl_map (&ctx->cb, var, var);
1e8e9920 2001 }
2002 break;
1e8e9920 2003 default:
75a70cf9 2004 *handled_ops_p = false;
1e8e9920 2005 break;
2006 }
2007
2008 return NULL_TREE;
2009}
2010
2011
75a70cf9 2012/* Scan all the statements starting at the current statement. CTX
2013 contains context information about the OpenMP directives and
2014 clauses found during the scan. */
1e8e9920 2015
2016static void
75a70cf9 2017scan_omp (gimple_seq body, omp_context *ctx)
1e8e9920 2018{
2019 location_t saved_location;
2020 struct walk_stmt_info wi;
2021
2022 memset (&wi, 0, sizeof (wi));
1e8e9920 2023 wi.info = ctx;
1e8e9920 2024 wi.want_locations = true;
2025
2026 saved_location = input_location;
75a70cf9 2027 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
1e8e9920 2028 input_location = saved_location;
2029}
2030\f
2031/* Re-gimplification and code generation routines. */
2032
2033/* Build a call to GOMP_barrier. */
2034
79acaae1 2035static tree
2036build_omp_barrier (void)
1e8e9920 2037{
79acaae1 2038 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
1e8e9920 2039}
2040
2041/* If a context was created for STMT when it was scanned, return it. */
2042
2043static omp_context *
75a70cf9 2044maybe_lookup_ctx (gimple stmt)
1e8e9920 2045{
2046 splay_tree_node n;
2047 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2048 return n ? (omp_context *) n->value : NULL;
2049}
2050
773c5ba7 2051
2052/* Find the mapping for DECL in CTX or the immediately enclosing
2053 context that has a mapping for DECL.
2054
2055 If CTX is a nested parallel directive, we may have to use the decl
2056 mappings created in CTX's parent context. Suppose that we have the
2057 following parallel nesting (variable UIDs showed for clarity):
2058
2059 iD.1562 = 0;
2060 #omp parallel shared(iD.1562) -> outer parallel
2061 iD.1562 = iD.1562 + 1;
2062
2063 #omp parallel shared (iD.1562) -> inner parallel
2064 iD.1562 = iD.1562 - 1;
2065
2066 Each parallel structure will create a distinct .omp_data_s structure
2067 for copying iD.1562 in/out of the directive:
2068
2069 outer parallel .omp_data_s.1.i -> iD.1562
2070 inner parallel .omp_data_s.2.i -> iD.1562
2071
2072 A shared variable mapping will produce a copy-out operation before
2073 the parallel directive and a copy-in operation after it. So, in
2074 this case we would have:
2075
2076 iD.1562 = 0;
2077 .omp_data_o.1.i = iD.1562;
2078 #omp parallel shared(iD.1562) -> outer parallel
2079 .omp_data_i.1 = &.omp_data_o.1
2080 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2081
2082 .omp_data_o.2.i = iD.1562; -> **
2083 #omp parallel shared(iD.1562) -> inner parallel
2084 .omp_data_i.2 = &.omp_data_o.2
2085 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2086
2087
2088 ** This is a problem. The symbol iD.1562 cannot be referenced
2089 inside the body of the outer parallel region. But since we are
2090 emitting this copy operation while expanding the inner parallel
2091 directive, we need to access the CTX structure of the outer
2092 parallel directive to get the correct mapping:
2093
2094 .omp_data_o.2.i = .omp_data_i.1->i
2095
2096 Since there may be other workshare or parallel directives enclosing
2097 the parallel directive, it may be necessary to walk up the context
2098 parent chain. This is not a problem in general because nested
2099 parallelism happens only rarely. */
2100
2101static tree
2102lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2103{
2104 tree t;
2105 omp_context *up;
2106
773c5ba7 2107 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2108 t = maybe_lookup_decl (decl, up);
2109
87b31375 2110 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
773c5ba7 2111
c37594c7 2112 return t ? t : decl;
773c5ba7 2113}
2114
2115
f49d7bb5 2116/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2117 in outer contexts. */
2118
2119static tree
2120maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2121{
2122 tree t = NULL;
2123 omp_context *up;
2124
87b31375 2125 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2126 t = maybe_lookup_decl (decl, up);
f49d7bb5 2127
2128 return t ? t : decl;
2129}
2130
2131
1e8e9920 2132/* Construct the initialization value for reduction CLAUSE. */
2133
2134tree
2135omp_reduction_init (tree clause, tree type)
2136{
389dd41b 2137 location_t loc = OMP_CLAUSE_LOCATION (clause);
1e8e9920 2138 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2139 {
2140 case PLUS_EXPR:
2141 case MINUS_EXPR:
2142 case BIT_IOR_EXPR:
2143 case BIT_XOR_EXPR:
2144 case TRUTH_OR_EXPR:
2145 case TRUTH_ORIF_EXPR:
2146 case TRUTH_XOR_EXPR:
2147 case NE_EXPR:
389dd41b 2148 return fold_convert_loc (loc, type, integer_zero_node);
1e8e9920 2149
2150 case MULT_EXPR:
2151 case TRUTH_AND_EXPR:
2152 case TRUTH_ANDIF_EXPR:
2153 case EQ_EXPR:
389dd41b 2154 return fold_convert_loc (loc, type, integer_one_node);
1e8e9920 2155
2156 case BIT_AND_EXPR:
389dd41b 2157 return fold_convert_loc (loc, type, integer_minus_one_node);
1e8e9920 2158
2159 case MAX_EXPR:
2160 if (SCALAR_FLOAT_TYPE_P (type))
2161 {
2162 REAL_VALUE_TYPE max, min;
2163 if (HONOR_INFINITIES (TYPE_MODE (type)))
2164 {
2165 real_inf (&max);
2166 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2167 }
2168 else
2169 real_maxval (&min, 1, TYPE_MODE (type));
2170 return build_real (type, min);
2171 }
2172 else
2173 {
2174 gcc_assert (INTEGRAL_TYPE_P (type));
2175 return TYPE_MIN_VALUE (type);
2176 }
2177
2178 case MIN_EXPR:
2179 if (SCALAR_FLOAT_TYPE_P (type))
2180 {
2181 REAL_VALUE_TYPE max;
2182 if (HONOR_INFINITIES (TYPE_MODE (type)))
2183 real_inf (&max);
2184 else
2185 real_maxval (&max, 0, TYPE_MODE (type));
2186 return build_real (type, max);
2187 }
2188 else
2189 {
2190 gcc_assert (INTEGRAL_TYPE_P (type));
2191 return TYPE_MAX_VALUE (type);
2192 }
2193
2194 default:
2195 gcc_unreachable ();
2196 }
2197}
2198
2199/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2200 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2201 private variables. Initialization statements go in ILIST, while calls
2202 to destructors go in DLIST. */
2203
2204static void
75a70cf9 2205lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
1e4afe3c 2206 omp_context *ctx)
1e8e9920 2207{
75a70cf9 2208 gimple_stmt_iterator diter;
c2f47e15 2209 tree c, dtor, copyin_seq, x, ptr;
1e8e9920 2210 bool copyin_by_ref = false;
f49d7bb5 2211 bool lastprivate_firstprivate = false;
1e8e9920 2212 int pass;
2213
75a70cf9 2214 *dlist = gimple_seq_alloc ();
2215 diter = gsi_start (*dlist);
1e8e9920 2216 copyin_seq = NULL;
2217
2218 /* Do all the fixed sized types in the first pass, and the variable sized
2219 types in the second pass. This makes sure that the scalar arguments to
48e1416a 2220 the variable sized types are processed before we use them in the
1e8e9920 2221 variable sized operations. */
2222 for (pass = 0; pass < 2; ++pass)
2223 {
2224 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2225 {
55d6e7cd 2226 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
1e8e9920 2227 tree var, new_var;
2228 bool by_ref;
389dd41b 2229 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2230
2231 switch (c_kind)
2232 {
2233 case OMP_CLAUSE_PRIVATE:
2234 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2235 continue;
2236 break;
2237 case OMP_CLAUSE_SHARED:
f49d7bb5 2238 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2239 {
2240 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2241 continue;
2242 }
1e8e9920 2243 case OMP_CLAUSE_FIRSTPRIVATE:
1e8e9920 2244 case OMP_CLAUSE_COPYIN:
2245 case OMP_CLAUSE_REDUCTION:
2246 break;
df2c34fc 2247 case OMP_CLAUSE_LASTPRIVATE:
f49d7bb5 2248 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2249 {
2250 lastprivate_firstprivate = true;
2251 if (pass != 0)
2252 continue;
2253 }
df2c34fc 2254 break;
1e8e9920 2255 default:
2256 continue;
2257 }
2258
2259 new_var = var = OMP_CLAUSE_DECL (c);
2260 if (c_kind != OMP_CLAUSE_COPYIN)
2261 new_var = lookup_decl (var, ctx);
2262
2263 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2264 {
2265 if (pass != 0)
2266 continue;
2267 }
1e8e9920 2268 else if (is_variable_sized (var))
2269 {
773c5ba7 2270 /* For variable sized types, we need to allocate the
2271 actual storage here. Call alloca and store the
2272 result in the pointer decl that we created elsewhere. */
1e8e9920 2273 if (pass == 0)
2274 continue;
2275
fd6481cf 2276 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2277 {
75a70cf9 2278 gimple stmt;
2279 tree tmp;
2280
fd6481cf 2281 ptr = DECL_VALUE_EXPR (new_var);
2282 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2283 ptr = TREE_OPERAND (ptr, 0);
2284 gcc_assert (DECL_P (ptr));
2285 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
75a70cf9 2286
2287 /* void *tmp = __builtin_alloca */
2288 stmt
2289 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2290 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2291 gimple_add_tmp_var (tmp);
2292 gimple_call_set_lhs (stmt, tmp);
2293
2294 gimple_seq_add_stmt (ilist, stmt);
2295
389dd41b 2296 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
75a70cf9 2297 gimplify_assign (ptr, x, ilist);
fd6481cf 2298 }
1e8e9920 2299 }
1e8e9920 2300 else if (is_reference (var))
2301 {
773c5ba7 2302 /* For references that are being privatized for Fortran,
2303 allocate new backing storage for the new pointer
2304 variable. This allows us to avoid changing all the
2305 code that expects a pointer to something that expects
2306 a direct variable. Note that this doesn't apply to
2307 C++, since reference types are disallowed in data
df2c34fc 2308 sharing clauses there, except for NRV optimized
2309 return values. */
1e8e9920 2310 if (pass == 0)
2311 continue;
2312
2313 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
fd6481cf 2314 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2315 {
2316 x = build_receiver_ref (var, false, ctx);
389dd41b 2317 x = build_fold_addr_expr_loc (clause_loc, x);
fd6481cf 2318 }
2319 else if (TREE_CONSTANT (x))
1e8e9920 2320 {
2321 const char *name = NULL;
2322 if (DECL_NAME (var))
2323 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2324
df2c34fc 2325 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2326 name);
2327 gimple_add_tmp_var (x);
86f2ad37 2328 TREE_ADDRESSABLE (x) = 1;
389dd41b 2329 x = build_fold_addr_expr_loc (clause_loc, x);
1e8e9920 2330 }
2331 else
2332 {
389dd41b 2333 x = build_call_expr_loc (clause_loc,
2334 built_in_decls[BUILT_IN_ALLOCA], 1, x);
1e8e9920 2335 }
2336
389dd41b 2337 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
75a70cf9 2338 gimplify_assign (new_var, x, ilist);
1e8e9920 2339
389dd41b 2340 new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
1e8e9920 2341 }
2342 else if (c_kind == OMP_CLAUSE_REDUCTION
2343 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2344 {
2345 if (pass == 0)
2346 continue;
2347 }
2348 else if (pass != 0)
2349 continue;
2350
55d6e7cd 2351 switch (OMP_CLAUSE_CODE (c))
1e8e9920 2352 {
2353 case OMP_CLAUSE_SHARED:
f49d7bb5 2354 /* Shared global vars are just accessed directly. */
2355 if (is_global_var (new_var))
2356 break;
1e8e9920 2357 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2358 needs to be delayed until after fixup_child_record_type so
2359 that we get the correct type during the dereference. */
e8a588af 2360 by_ref = use_pointer_for_field (var, ctx);
1e8e9920 2361 x = build_receiver_ref (var, by_ref, ctx);
2362 SET_DECL_VALUE_EXPR (new_var, x);
2363 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2364
2365 /* ??? If VAR is not passed by reference, and the variable
2366 hasn't been initialized yet, then we'll get a warning for
2367 the store into the omp_data_s structure. Ideally, we'd be
48e1416a 2368 able to notice this and not store anything at all, but
1e8e9920 2369 we're generating code too early. Suppress the warning. */
2370 if (!by_ref)
2371 TREE_NO_WARNING (var) = 1;
2372 break;
2373
2374 case OMP_CLAUSE_LASTPRIVATE:
2375 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2376 break;
2377 /* FALLTHRU */
2378
2379 case OMP_CLAUSE_PRIVATE:
fd6481cf 2380 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2381 x = build_outer_var_ref (var, ctx);
2382 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2383 {
2384 if (is_task_ctx (ctx))
2385 x = build_receiver_ref (var, false, ctx);
2386 else
2387 x = build_outer_var_ref (var, ctx);
2388 }
2389 else
2390 x = NULL;
2391 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
1e8e9920 2392 if (x)
2393 gimplify_and_add (x, ilist);
2394 /* FALLTHRU */
2395
2396 do_dtor:
2397 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2398 if (x)
2399 {
75a70cf9 2400 gimple_seq tseq = NULL;
2401
1e8e9920 2402 dtor = x;
75a70cf9 2403 gimplify_stmt (&dtor, &tseq);
2404 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
1e8e9920 2405 }
2406 break;
2407
2408 case OMP_CLAUSE_FIRSTPRIVATE:
fd6481cf 2409 if (is_task_ctx (ctx))
2410 {
2411 if (is_reference (var) || is_variable_sized (var))
2412 goto do_dtor;
2413 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2414 ctx))
2415 || use_pointer_for_field (var, NULL))
2416 {
2417 x = build_receiver_ref (var, false, ctx);
2418 SET_DECL_VALUE_EXPR (new_var, x);
2419 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2420 goto do_dtor;
2421 }
2422 }
1e8e9920 2423 x = build_outer_var_ref (var, ctx);
2424 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2425 gimplify_and_add (x, ilist);
2426 goto do_dtor;
2427 break;
2428
2429 case OMP_CLAUSE_COPYIN:
e8a588af 2430 by_ref = use_pointer_for_field (var, NULL);
1e8e9920 2431 x = build_receiver_ref (var, by_ref, ctx);
2432 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2433 append_to_statement_list (x, &copyin_seq);
2434 copyin_by_ref |= by_ref;
2435 break;
2436
2437 case OMP_CLAUSE_REDUCTION:
2438 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2439 {
fd6481cf 2440 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2441 x = build_outer_var_ref (var, ctx);
2442
2443 if (is_reference (var))
389dd41b 2444 x = build_fold_addr_expr_loc (clause_loc, x);
fd6481cf 2445 SET_DECL_VALUE_EXPR (placeholder, x);
2446 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
75a70cf9 2447 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2448 gimple_seq_add_seq (ilist,
2449 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2450 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
fd6481cf 2451 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
1e8e9920 2452 }
2453 else
2454 {
2455 x = omp_reduction_init (c, TREE_TYPE (new_var));
2456 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
75a70cf9 2457 gimplify_assign (new_var, x, ilist);
1e8e9920 2458 }
2459 break;
2460
2461 default:
2462 gcc_unreachable ();
2463 }
2464 }
2465 }
2466
2467 /* The copyin sequence is not to be executed by the main thread, since
2468 that would result in self-copies. Perhaps not visible to scalars,
2469 but it certainly is to C++ operator=. */
2470 if (copyin_seq)
2471 {
c2f47e15 2472 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
1e8e9920 2473 x = build2 (NE_EXPR, boolean_type_node, x,
2474 build_int_cst (TREE_TYPE (x), 0));
2475 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2476 gimplify_and_add (x, ilist);
2477 }
2478
2479 /* If any copyin variable is passed by reference, we must ensure the
2480 master thread doesn't modify it before it is copied over in all
f49d7bb5 2481 threads. Similarly for variables in both firstprivate and
2482 lastprivate clauses we need to ensure the lastprivate copying
2483 happens after firstprivate copying in all threads. */
2484 if (copyin_by_ref || lastprivate_firstprivate)
79acaae1 2485 gimplify_and_add (build_omp_barrier (), ilist);
1e8e9920 2486}
2487
773c5ba7 2488
1e8e9920 2489/* Generate code to implement the LASTPRIVATE clauses. This is used for
2490 both parallel and workshare constructs. PREDICATE may be NULL if it's
2491 always true. */
2492
2493static void
75a70cf9 2494lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2495 omp_context *ctx)
1e8e9920 2496{
75a70cf9 2497 tree x, c, label = NULL;
fd6481cf 2498 bool par_clauses = false;
1e8e9920 2499
2500 /* Early exit if there are no lastprivate clauses. */
2501 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2502 if (clauses == NULL)
2503 {
2504 /* If this was a workshare clause, see if it had been combined
2505 with its parallel. In that case, look for the clauses on the
2506 parallel statement itself. */
2507 if (is_parallel_ctx (ctx))
2508 return;
2509
2510 ctx = ctx->outer;
2511 if (ctx == NULL || !is_parallel_ctx (ctx))
2512 return;
2513
75a70cf9 2514 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
1e8e9920 2515 OMP_CLAUSE_LASTPRIVATE);
2516 if (clauses == NULL)
2517 return;
fd6481cf 2518 par_clauses = true;
1e8e9920 2519 }
2520
75a70cf9 2521 if (predicate)
2522 {
2523 gimple stmt;
2524 tree label_true, arm1, arm2;
2525
e60a6f7b 2526 label = create_artificial_label (UNKNOWN_LOCATION);
2527 label_true = create_artificial_label (UNKNOWN_LOCATION);
75a70cf9 2528 arm1 = TREE_OPERAND (predicate, 0);
2529 arm2 = TREE_OPERAND (predicate, 1);
2530 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2531 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2532 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2533 label_true, label);
2534 gimple_seq_add_stmt (stmt_list, stmt);
2535 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2536 }
1e8e9920 2537
fd6481cf 2538 for (c = clauses; c ;)
1e8e9920 2539 {
2540 tree var, new_var;
389dd41b 2541 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2542
fd6481cf 2543 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2544 {
2545 var = OMP_CLAUSE_DECL (c);
2546 new_var = lookup_decl (var, ctx);
1e8e9920 2547
75a70cf9 2548 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2549 {
2550 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2551 gimple_seq_add_seq (stmt_list,
2552 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2553 }
2554 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
1e8e9920 2555
fd6481cf 2556 x = build_outer_var_ref (var, ctx);
2557 if (is_reference (var))
389dd41b 2558 new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
fd6481cf 2559 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
75a70cf9 2560 gimplify_and_add (x, stmt_list);
fd6481cf 2561 }
2562 c = OMP_CLAUSE_CHAIN (c);
2563 if (c == NULL && !par_clauses)
2564 {
2565 /* If this was a workshare clause, see if it had been combined
2566 with its parallel. In that case, continue looking for the
2567 clauses also on the parallel statement itself. */
2568 if (is_parallel_ctx (ctx))
2569 break;
2570
2571 ctx = ctx->outer;
2572 if (ctx == NULL || !is_parallel_ctx (ctx))
2573 break;
2574
75a70cf9 2575 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
fd6481cf 2576 OMP_CLAUSE_LASTPRIVATE);
2577 par_clauses = true;
2578 }
1e8e9920 2579 }
2580
75a70cf9 2581 if (label)
2582 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
1e8e9920 2583}
2584
773c5ba7 2585
1e8e9920 2586/* Generate code to implement the REDUCTION clauses. */
2587
2588static void
75a70cf9 2589lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
1e8e9920 2590{
75a70cf9 2591 gimple_seq sub_seq = NULL;
2592 gimple stmt;
2593 tree x, c;
1e8e9920 2594 int count = 0;
2595
2596 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2597 update in that case, otherwise use a lock. */
2598 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
55d6e7cd 2599 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
1e8e9920 2600 {
2601 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2602 {
2603 /* Never use OMP_ATOMIC for array reductions. */
2604 count = -1;
2605 break;
2606 }
2607 count++;
2608 }
2609
2610 if (count == 0)
2611 return;
2612
2613 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2614 {
2615 tree var, ref, new_var;
2616 enum tree_code code;
389dd41b 2617 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2618
55d6e7cd 2619 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
1e8e9920 2620 continue;
2621
2622 var = OMP_CLAUSE_DECL (c);
2623 new_var = lookup_decl (var, ctx);
2624 if (is_reference (var))
389dd41b 2625 new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
1e8e9920 2626 ref = build_outer_var_ref (var, ctx);
2627 code = OMP_CLAUSE_REDUCTION_CODE (c);
773c5ba7 2628
2629 /* reduction(-:var) sums up the partial results, so it acts
2630 identically to reduction(+:var). */
1e8e9920 2631 if (code == MINUS_EXPR)
2632 code = PLUS_EXPR;
2633
2634 if (count == 1)
2635 {
389dd41b 2636 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
1e8e9920 2637
2638 addr = save_expr (addr);
2639 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
389dd41b 2640 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
1e8e9920 2641 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
75a70cf9 2642 gimplify_and_add (x, stmt_seqp);
1e8e9920 2643 return;
2644 }
2645
2646 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2647 {
2648 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2649
2650 if (is_reference (var))
389dd41b 2651 ref = build_fold_addr_expr_loc (clause_loc, ref);
1e8e9920 2652 SET_DECL_VALUE_EXPR (placeholder, ref);
2653 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
75a70cf9 2654 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2655 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2656 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
1e8e9920 2657 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2658 }
2659 else
2660 {
2661 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2662 ref = build_outer_var_ref (var, ctx);
75a70cf9 2663 gimplify_assign (ref, x, &sub_seq);
1e8e9920 2664 }
2665 }
2666
75a70cf9 2667 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2668 gimple_seq_add_stmt (stmt_seqp, stmt);
1e8e9920 2669
75a70cf9 2670 gimple_seq_add_seq (stmt_seqp, sub_seq);
1e8e9920 2671
75a70cf9 2672 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2673 gimple_seq_add_stmt (stmt_seqp, stmt);
1e8e9920 2674}
2675
773c5ba7 2676
1e8e9920 2677/* Generate code to implement the COPYPRIVATE clauses. */
2678
2679static void
75a70cf9 2680lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
1e8e9920 2681 omp_context *ctx)
2682{
2683 tree c;
2684
2685 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2686 {
cb561506 2687 tree var, new_var, ref, x;
1e8e9920 2688 bool by_ref;
389dd41b 2689 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2690
55d6e7cd 2691 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
1e8e9920 2692 continue;
2693
2694 var = OMP_CLAUSE_DECL (c);
e8a588af 2695 by_ref = use_pointer_for_field (var, NULL);
1e8e9920 2696
2697 ref = build_sender_ref (var, ctx);
cb561506 2698 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2699 if (by_ref)
2700 {
2701 x = build_fold_addr_expr_loc (clause_loc, new_var);
2702 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2703 }
75a70cf9 2704 gimplify_assign (ref, x, slist);
1e8e9920 2705
cb561506 2706 ref = build_receiver_ref (var, false, ctx);
2707 if (by_ref)
2708 {
2709 ref = fold_convert_loc (clause_loc,
2710 build_pointer_type (TREE_TYPE (new_var)),
2711 ref);
2712 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2713 }
1e8e9920 2714 if (is_reference (var))
2715 {
cb561506 2716 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
389dd41b 2717 ref = build_fold_indirect_ref_loc (clause_loc, ref);
cb561506 2718 new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
1e8e9920 2719 }
cb561506 2720 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
1e8e9920 2721 gimplify_and_add (x, rlist);
2722 }
2723}
2724
773c5ba7 2725
1e8e9920 2726/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2727 and REDUCTION from the sender (aka parent) side. */
2728
2729static void
75a70cf9 2730lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2731 omp_context *ctx)
1e8e9920 2732{
2733 tree c;
2734
2735 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2736 {
773c5ba7 2737 tree val, ref, x, var;
1e8e9920 2738 bool by_ref, do_in = false, do_out = false;
389dd41b 2739 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2740
55d6e7cd 2741 switch (OMP_CLAUSE_CODE (c))
1e8e9920 2742 {
fd6481cf 2743 case OMP_CLAUSE_PRIVATE:
2744 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2745 break;
2746 continue;
1e8e9920 2747 case OMP_CLAUSE_FIRSTPRIVATE:
2748 case OMP_CLAUSE_COPYIN:
2749 case OMP_CLAUSE_LASTPRIVATE:
2750 case OMP_CLAUSE_REDUCTION:
2751 break;
2752 default:
2753 continue;
2754 }
2755
87b31375 2756 val = OMP_CLAUSE_DECL (c);
2757 var = lookup_decl_in_outer_ctx (val, ctx);
773c5ba7 2758
f49d7bb5 2759 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2760 && is_global_var (var))
2761 continue;
1e8e9920 2762 if (is_variable_sized (val))
2763 continue;
e8a588af 2764 by_ref = use_pointer_for_field (val, NULL);
1e8e9920 2765
55d6e7cd 2766 switch (OMP_CLAUSE_CODE (c))
1e8e9920 2767 {
fd6481cf 2768 case OMP_CLAUSE_PRIVATE:
1e8e9920 2769 case OMP_CLAUSE_FIRSTPRIVATE:
2770 case OMP_CLAUSE_COPYIN:
2771 do_in = true;
2772 break;
2773
2774 case OMP_CLAUSE_LASTPRIVATE:
2775 if (by_ref || is_reference (val))
2776 {
2777 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2778 continue;
2779 do_in = true;
2780 }
2781 else
fd6481cf 2782 {
2783 do_out = true;
2784 if (lang_hooks.decls.omp_private_outer_ref (val))
2785 do_in = true;
2786 }
1e8e9920 2787 break;
2788
2789 case OMP_CLAUSE_REDUCTION:
2790 do_in = true;
2791 do_out = !(by_ref || is_reference (val));
2792 break;
2793
2794 default:
2795 gcc_unreachable ();
2796 }
2797
2798 if (do_in)
2799 {
2800 ref = build_sender_ref (val, ctx);
389dd41b 2801 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
75a70cf9 2802 gimplify_assign (ref, x, ilist);
fd6481cf 2803 if (is_task_ctx (ctx))
2804 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
1e8e9920 2805 }
773c5ba7 2806
1e8e9920 2807 if (do_out)
2808 {
2809 ref = build_sender_ref (val, ctx);
75a70cf9 2810 gimplify_assign (var, ref, olist);
1e8e9920 2811 }
2812 }
2813}
2814
75a70cf9 2815/* Generate code to implement SHARED from the sender (aka parent)
2816 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2817 list things that got automatically shared. */
1e8e9920 2818
2819static void
75a70cf9 2820lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
1e8e9920 2821{
fd6481cf 2822 tree var, ovar, nvar, f, x, record_type;
1e8e9920 2823
2824 if (ctx->record_type == NULL)
2825 return;
773c5ba7 2826
fd6481cf 2827 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2828 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
1e8e9920 2829 {
2830 ovar = DECL_ABSTRACT_ORIGIN (f);
2831 nvar = maybe_lookup_decl (ovar, ctx);
2832 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2833 continue;
2834
773c5ba7 2835 /* If CTX is a nested parallel directive. Find the immediately
2836 enclosing parallel or workshare construct that contains a
2837 mapping for OVAR. */
87b31375 2838 var = lookup_decl_in_outer_ctx (ovar, ctx);
773c5ba7 2839
e8a588af 2840 if (use_pointer_for_field (ovar, ctx))
1e8e9920 2841 {
2842 x = build_sender_ref (ovar, ctx);
773c5ba7 2843 var = build_fold_addr_expr (var);
75a70cf9 2844 gimplify_assign (x, var, ilist);
1e8e9920 2845 }
2846 else
2847 {
2848 x = build_sender_ref (ovar, ctx);
75a70cf9 2849 gimplify_assign (x, var, ilist);
1e8e9920 2850
d2263ebb 2851 if (!TREE_READONLY (var)
2852 /* We don't need to receive a new reference to a result
2853 or parm decl. In fact we may not store to it as we will
2854 invalidate any pending RSO and generate wrong gimple
2855 during inlining. */
2856 && !((TREE_CODE (var) == RESULT_DECL
2857 || TREE_CODE (var) == PARM_DECL)
2858 && DECL_BY_REFERENCE (var)))
fd6481cf 2859 {
2860 x = build_sender_ref (ovar, ctx);
75a70cf9 2861 gimplify_assign (var, x, olist);
fd6481cf 2862 }
1e8e9920 2863 }
2864 }
2865}
2866
75a70cf9 2867
2868/* A convenience function to build an empty GIMPLE_COND with just the
2869 condition. */
2870
2871static gimple
2872gimple_build_cond_empty (tree cond)
2873{
2874 enum tree_code pred_code;
2875 tree lhs, rhs;
2876
2877 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2878 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2879}
2880
2881
48e1416a 2882/* Build the function calls to GOMP_parallel_start etc to actually
773c5ba7 2883 generate the parallel operation. REGION is the parallel region
2884 being expanded. BB is the block where to insert the code. WS_ARGS
2885 will be set if this is a call to a combined parallel+workshare
2886 construct, it contains the list of additional arguments needed by
2887 the workshare construct. */
1e8e9920 2888
2889static void
61e47ac8 2890expand_parallel_call (struct omp_region *region, basic_block bb,
75a70cf9 2891 gimple entry_stmt, tree ws_args)
1e8e9920 2892{
79acaae1 2893 tree t, t1, t2, val, cond, c, clauses;
75a70cf9 2894 gimple_stmt_iterator gsi;
2895 gimple stmt;
773c5ba7 2896 int start_ix;
389dd41b 2897 location_t clause_loc;
773c5ba7 2898
75a70cf9 2899 clauses = gimple_omp_parallel_clauses (entry_stmt);
773c5ba7 2900
334ec2d8 2901 /* Determine what flavor of GOMP_parallel_start we will be
773c5ba7 2902 emitting. */
2903 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2904 if (is_combined_parallel (region))
2905 {
61e47ac8 2906 switch (region->inner->type)
773c5ba7 2907 {
75a70cf9 2908 case GIMPLE_OMP_FOR:
fd6481cf 2909 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
f77459c5 2910 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
fd6481cf 2911 + (region->inner->sched_kind
2912 == OMP_CLAUSE_SCHEDULE_RUNTIME
2913 ? 3 : region->inner->sched_kind);
61e47ac8 2914 break;
75a70cf9 2915 case GIMPLE_OMP_SECTIONS:
61e47ac8 2916 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2917 break;
2918 default:
2919 gcc_unreachable ();
773c5ba7 2920 }
773c5ba7 2921 }
1e8e9920 2922
2923 /* By default, the value of NUM_THREADS is zero (selected at run time)
2924 and there is no conditional. */
2925 cond = NULL_TREE;
2926 val = build_int_cst (unsigned_type_node, 0);
2927
2928 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2929 if (c)
2930 cond = OMP_CLAUSE_IF_EXPR (c);
2931
2932 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2933 if (c)
389dd41b 2934 {
2935 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2936 clause_loc = OMP_CLAUSE_LOCATION (c);
2937 }
2938 else
2939 clause_loc = gimple_location (entry_stmt);
1e8e9920 2940
2941 /* Ensure 'val' is of the correct type. */
389dd41b 2942 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
1e8e9920 2943
2944 /* If we found the clause 'if (cond)', build either
2945 (cond != 0) or (cond ? val : 1u). */
2946 if (cond)
2947 {
75a70cf9 2948 gimple_stmt_iterator gsi;
773c5ba7 2949
2950 cond = gimple_boolify (cond);
2951
1e8e9920 2952 if (integer_zerop (val))
389dd41b 2953 val = fold_build2_loc (clause_loc,
2954 EQ_EXPR, unsigned_type_node, cond,
79acaae1 2955 build_int_cst (TREE_TYPE (cond), 0));
1e8e9920 2956 else
773c5ba7 2957 {
2958 basic_block cond_bb, then_bb, else_bb;
79acaae1 2959 edge e, e_then, e_else;
75a70cf9 2960 tree tmp_then, tmp_else, tmp_join, tmp_var;
79acaae1 2961
2962 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2963 if (gimple_in_ssa_p (cfun))
2964 {
75a70cf9 2965 tmp_then = make_ssa_name (tmp_var, NULL);
2966 tmp_else = make_ssa_name (tmp_var, NULL);
2967 tmp_join = make_ssa_name (tmp_var, NULL);
79acaae1 2968 }
2969 else
2970 {
2971 tmp_then = tmp_var;
2972 tmp_else = tmp_var;
2973 tmp_join = tmp_var;
2974 }
773c5ba7 2975
773c5ba7 2976 e = split_block (bb, NULL);
2977 cond_bb = e->src;
2978 bb = e->dest;
2979 remove_edge (e);
2980
2981 then_bb = create_empty_bb (cond_bb);
2982 else_bb = create_empty_bb (then_bb);
79acaae1 2983 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2984 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
773c5ba7 2985
75a70cf9 2986 stmt = gimple_build_cond_empty (cond);
2987 gsi = gsi_start_bb (cond_bb);
2988 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 2989
75a70cf9 2990 gsi = gsi_start_bb (then_bb);
2991 stmt = gimple_build_assign (tmp_then, val);
2992 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 2993
75a70cf9 2994 gsi = gsi_start_bb (else_bb);
2995 stmt = gimple_build_assign
2996 (tmp_else, build_int_cst (unsigned_type_node, 1));
2997 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 2998
2999 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3000 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
79acaae1 3001 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3002 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
773c5ba7 3003
79acaae1 3004 if (gimple_in_ssa_p (cfun))
3005 {
75a70cf9 3006 gimple phi = create_phi_node (tmp_join, bb);
79acaae1 3007 SSA_NAME_DEF_STMT (tmp_join) = phi;
efbcb6de 3008 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3009 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
79acaae1 3010 }
3011
3012 val = tmp_join;
773c5ba7 3013 }
3014
75a70cf9 3015 gsi = gsi_start_bb (bb);
3016 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3017 false, GSI_CONTINUE_LINKING);
1e8e9920 3018 }
3019
75a70cf9 3020 gsi = gsi_last_bb (bb);
3021 t = gimple_omp_parallel_data_arg (entry_stmt);
1e8e9920 3022 if (t == NULL)
c2f47e15 3023 t1 = null_pointer_node;
1e8e9920 3024 else
c2f47e15 3025 t1 = build_fold_addr_expr (t);
75a70cf9 3026 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
773c5ba7 3027
3028 if (ws_args)
c2f47e15 3029 {
3030 tree args = tree_cons (NULL, t2,
3031 tree_cons (NULL, t1,
3032 tree_cons (NULL, val, ws_args)));
389dd41b 3033 t = build_function_call_expr (UNKNOWN_LOCATION,
3034 built_in_decls[start_ix], args);
c2f47e15 3035 }
3036 else
3037 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
773c5ba7 3038
75a70cf9 3039 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3040 false, GSI_CONTINUE_LINKING);
1e8e9920 3041
75a70cf9 3042 t = gimple_omp_parallel_data_arg (entry_stmt);
1e8e9920 3043 if (t == NULL)
3044 t = null_pointer_node;
3045 else
3046 t = build_fold_addr_expr (t);
389dd41b 3047 t = build_call_expr_loc (gimple_location (entry_stmt),
3048 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
75a70cf9 3049 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3050 false, GSI_CONTINUE_LINKING);
1e8e9920 3051
389dd41b 3052 t = build_call_expr_loc (gimple_location (entry_stmt),
3053 built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
75a70cf9 3054 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3055 false, GSI_CONTINUE_LINKING);
1e8e9920 3056}
3057
773c5ba7 3058
fd6481cf 3059/* Build the function call to GOMP_task to actually
3060 generate the task operation. BB is the block where to insert the code. */
3061
3062static void
75a70cf9 3063expand_task_call (basic_block bb, gimple entry_stmt)
fd6481cf 3064{
3065 tree t, t1, t2, t3, flags, cond, c, clauses;
75a70cf9 3066 gimple_stmt_iterator gsi;
389dd41b 3067 location_t loc = gimple_location (entry_stmt);
fd6481cf 3068
75a70cf9 3069 clauses = gimple_omp_task_clauses (entry_stmt);
fd6481cf 3070
fd6481cf 3071 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3072 if (c)
3073 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3074 else
3075 cond = boolean_true_node;
3076
3077 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3078 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3079
75a70cf9 3080 gsi = gsi_last_bb (bb);
3081 t = gimple_omp_task_data_arg (entry_stmt);
fd6481cf 3082 if (t == NULL)
3083 t2 = null_pointer_node;
3084 else
389dd41b 3085 t2 = build_fold_addr_expr_loc (loc, t);
3086 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
75a70cf9 3087 t = gimple_omp_task_copy_fn (entry_stmt);
fd6481cf 3088 if (t == NULL)
3089 t3 = null_pointer_node;
3090 else
389dd41b 3091 t3 = build_fold_addr_expr_loc (loc, t);
fd6481cf 3092
3093 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
75a70cf9 3094 gimple_omp_task_arg_size (entry_stmt),
3095 gimple_omp_task_arg_align (entry_stmt), cond, flags);
fd6481cf 3096
75a70cf9 3097 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3098 false, GSI_CONTINUE_LINKING);
fd6481cf 3099}
3100
3101
75a70cf9 3102/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3103 catch handler and return it. This prevents programs from violating the
3104 structured block semantics with throws. */
1e8e9920 3105
75a70cf9 3106static gimple_seq
3107maybe_catch_exception (gimple_seq body)
1e8e9920 3108{
e38def9c 3109 gimple g;
3110 tree decl;
1e8e9920 3111
3112 if (!flag_exceptions)
75a70cf9 3113 return body;
1e8e9920 3114
3115 if (lang_protect_cleanup_actions)
e38def9c 3116 decl = lang_protect_cleanup_actions ();
1e8e9920 3117 else
e38def9c 3118 decl = built_in_decls[BUILT_IN_TRAP];
75a70cf9 3119
e38def9c 3120 g = gimple_build_eh_must_not_throw (decl);
3121 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
75a70cf9 3122 GIMPLE_TRY_CATCH);
1e8e9920 3123
e38def9c 3124 return gimple_seq_alloc_with_stmt (g);
1e8e9920 3125}
3126
773c5ba7 3127/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
1e8e9920 3128
773c5ba7 3129static tree
3130list2chain (tree list)
1e8e9920 3131{
773c5ba7 3132 tree t;
1e8e9920 3133
773c5ba7 3134 for (t = list; t; t = TREE_CHAIN (t))
3135 {
3136 tree var = TREE_VALUE (t);
3137 if (TREE_CHAIN (t))
3138 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3139 else
3140 TREE_CHAIN (var) = NULL_TREE;
3141 }
1e8e9920 3142
773c5ba7 3143 return list ? TREE_VALUE (list) : NULL_TREE;
3144}
1e8e9920 3145
1e8e9920 3146
773c5ba7 3147/* Remove barriers in REGION->EXIT's block. Note that this is only
75a70cf9 3148 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3149 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3150 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
773c5ba7 3151 removed. */
1e8e9920 3152
773c5ba7 3153static void
3154remove_exit_barrier (struct omp_region *region)
3155{
75a70cf9 3156 gimple_stmt_iterator gsi;
773c5ba7 3157 basic_block exit_bb;
61e47ac8 3158 edge_iterator ei;
3159 edge e;
75a70cf9 3160 gimple stmt;
4a04f4b4 3161 int any_addressable_vars = -1;
1e8e9920 3162
61e47ac8 3163 exit_bb = region->exit;
1e8e9920 3164
5056ba1a 3165 /* If the parallel region doesn't return, we don't have REGION->EXIT
3166 block at all. */
3167 if (! exit_bb)
3168 return;
3169
75a70cf9 3170 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3171 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
61e47ac8 3172 statements that can appear in between are extremely limited -- no
3173 memory operations at all. Here, we allow nothing at all, so the
75a70cf9 3174 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3175 gsi = gsi_last_bb (exit_bb);
3176 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3177 gsi_prev (&gsi);
3178 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
773c5ba7 3179 return;
1e8e9920 3180
61e47ac8 3181 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3182 {
75a70cf9 3183 gsi = gsi_last_bb (e->src);
3184 if (gsi_end_p (gsi))
61e47ac8 3185 continue;
75a70cf9 3186 stmt = gsi_stmt (gsi);
4a04f4b4 3187 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3188 && !gimple_omp_return_nowait_p (stmt))
3189 {
3190 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3191 in many cases. If there could be tasks queued, the barrier
3192 might be needed to let the tasks run before some local
3193 variable of the parallel that the task uses as shared
3194 runs out of scope. The task can be spawned either
3195 from within current function (this would be easy to check)
3196 or from some function it calls and gets passed an address
3197 of such a variable. */
3198 if (any_addressable_vars < 0)
3199 {
3200 gimple parallel_stmt = last_stmt (region->entry);
3201 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3202 tree local_decls = DECL_STRUCT_FUNCTION (child_fun)->local_decls;
3203 tree block;
3204
3205 any_addressable_vars = 0;
3206 for (; local_decls; local_decls = TREE_CHAIN (local_decls))
3207 if (TREE_ADDRESSABLE (TREE_VALUE (local_decls)))
3208 {
3209 any_addressable_vars = 1;
3210 break;
3211 }
3212 for (block = gimple_block (stmt);
3213 !any_addressable_vars
3214 && block
3215 && TREE_CODE (block) == BLOCK;
3216 block = BLOCK_SUPERCONTEXT (block))
3217 {
3218 for (local_decls = BLOCK_VARS (block);
3219 local_decls;
3220 local_decls = TREE_CHAIN (local_decls))
3221 if (TREE_ADDRESSABLE (local_decls))
3222 {
3223 any_addressable_vars = 1;
3224 break;
3225 }
3226 if (block == gimple_block (parallel_stmt))
3227 break;
3228 }
3229 }
3230 if (!any_addressable_vars)
3231 gimple_omp_return_set_nowait (stmt);
3232 }
61e47ac8 3233 }
1e8e9920 3234}
3235
61e47ac8 3236static void
3237remove_exit_barriers (struct omp_region *region)
3238{
75a70cf9 3239 if (region->type == GIMPLE_OMP_PARALLEL)
61e47ac8 3240 remove_exit_barrier (region);
3241
3242 if (region->inner)
3243 {
3244 region = region->inner;
3245 remove_exit_barriers (region);
3246 while (region->next)
3247 {
3248 region = region->next;
3249 remove_exit_barriers (region);
3250 }
3251 }
3252}
773c5ba7 3253
658b4427 3254/* Optimize omp_get_thread_num () and omp_get_num_threads ()
3255 calls. These can't be declared as const functions, but
3256 within one parallel body they are constant, so they can be
3257 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
fd6481cf 3258 which are declared const. Similarly for task body, except
3259 that in untied task omp_get_thread_num () can change at any task
3260 scheduling point. */
658b4427 3261
3262static void
75a70cf9 3263optimize_omp_library_calls (gimple entry_stmt)
658b4427 3264{
3265 basic_block bb;
75a70cf9 3266 gimple_stmt_iterator gsi;
658b4427 3267 tree thr_num_id
3268 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3269 tree num_thr_id
3270 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
75a70cf9 3271 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3272 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
fd6481cf 3273 OMP_CLAUSE_UNTIED) != NULL);
658b4427 3274
3275 FOR_EACH_BB (bb)
75a70cf9 3276 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
658b4427 3277 {
75a70cf9 3278 gimple call = gsi_stmt (gsi);
658b4427 3279 tree decl;
3280
75a70cf9 3281 if (is_gimple_call (call)
3282 && (decl = gimple_call_fndecl (call))
658b4427 3283 && DECL_EXTERNAL (decl)
3284 && TREE_PUBLIC (decl)
3285 && DECL_INITIAL (decl) == NULL)
3286 {
3287 tree built_in;
3288
3289 if (DECL_NAME (decl) == thr_num_id)
fd6481cf 3290 {
3291 /* In #pragma omp task untied omp_get_thread_num () can change
3292 during the execution of the task region. */
3293 if (untied_task)
3294 continue;
3295 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3296 }
658b4427 3297 else if (DECL_NAME (decl) == num_thr_id)
3298 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3299 else
3300 continue;
3301
3302 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
75a70cf9 3303 || gimple_call_num_args (call) != 0)
658b4427 3304 continue;
3305
3306 if (flag_exceptions && !TREE_NOTHROW (decl))
3307 continue;
3308
3309 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
1ea6a73c 3310 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3311 TREE_TYPE (TREE_TYPE (built_in))))
658b4427 3312 continue;
3313
0acacf9e 3314 gimple_call_set_fndecl (call, built_in);
658b4427 3315 }
3316 }
3317}
3318
fd6481cf 3319/* Expand the OpenMP parallel or task directive starting at REGION. */
1e8e9920 3320
3321static void
fd6481cf 3322expand_omp_taskreg (struct omp_region *region)
1e8e9920 3323{
773c5ba7 3324 basic_block entry_bb, exit_bb, new_bb;
87d4aa85 3325 struct function *child_cfun;
1d22f541 3326 tree child_fn, block, t, ws_args, *tp;
ba3a7ba0 3327 tree save_current;
75a70cf9 3328 gimple_stmt_iterator gsi;
3329 gimple entry_stmt, stmt;
773c5ba7 3330 edge e;
3331
61e47ac8 3332 entry_stmt = last_stmt (region->entry);
75a70cf9 3333 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
773c5ba7 3334 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
b3a3ddec 3335 /* If this function has been already instrumented, make sure
3336 the child function isn't instrumented again. */
3337 child_cfun->after_tree_profile = cfun->after_tree_profile;
773c5ba7 3338
61e47ac8 3339 entry_bb = region->entry;
3340 exit_bb = region->exit;
773c5ba7 3341
773c5ba7 3342 if (is_combined_parallel (region))
61e47ac8 3343 ws_args = region->ws_args;
773c5ba7 3344 else
3345 ws_args = NULL_TREE;
1e8e9920 3346
61e47ac8 3347 if (child_cfun->cfg)
1e8e9920 3348 {
773c5ba7 3349 /* Due to inlining, it may happen that we have already outlined
3350 the region, in which case all we need to do is make the
3351 sub-graph unreachable and emit the parallel call. */
3352 edge entry_succ_e, exit_succ_e;
75a70cf9 3353 gimple_stmt_iterator gsi;
773c5ba7 3354
3355 entry_succ_e = single_succ_edge (entry_bb);
773c5ba7 3356
75a70cf9 3357 gsi = gsi_last_bb (entry_bb);
3358 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3359 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3360 gsi_remove (&gsi, true);
773c5ba7 3361
3362 new_bb = entry_bb;
03ed154b 3363 if (exit_bb)
3364 {
3365 exit_succ_e = single_succ_edge (exit_bb);
3366 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3367 }
79acaae1 3368 remove_edge_and_dominated_blocks (entry_succ_e);
1e8e9920 3369 }
773c5ba7 3370 else
3371 {
3372 /* If the parallel region needs data sent from the parent
3480139d 3373 function, then the very first statement (except possible
3374 tree profile counter updates) of the parallel body
773c5ba7 3375 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3376 &.OMP_DATA_O is passed as an argument to the child function,
3377 we need to replace it with the argument as seen by the child
3378 function.
3379
3380 In most cases, this will end up being the identity assignment
3381 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3382 a function call that has been inlined, the original PARM_DECL
3383 .OMP_DATA_I may have been converted into a different local
3384 variable. In which case, we need to keep the assignment. */
75a70cf9 3385 if (gimple_omp_taskreg_data_arg (entry_stmt))
773c5ba7 3386 {
3387 basic_block entry_succ_bb = single_succ (entry_bb);
75a70cf9 3388 gimple_stmt_iterator gsi;
3389 tree arg, narg;
3390 gimple parcopy_stmt = NULL;
1e8e9920 3391
75a70cf9 3392 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3480139d 3393 {
75a70cf9 3394 gimple stmt;
3480139d 3395
75a70cf9 3396 gcc_assert (!gsi_end_p (gsi));
3397 stmt = gsi_stmt (gsi);
3398 if (gimple_code (stmt) != GIMPLE_ASSIGN)
cc6b725b 3399 continue;
3400
75a70cf9 3401 if (gimple_num_ops (stmt) == 2)
3480139d 3402 {
75a70cf9 3403 tree arg = gimple_assign_rhs1 (stmt);
3404
3405 /* We're ignore the subcode because we're
3406 effectively doing a STRIP_NOPS. */
3407
3408 if (TREE_CODE (arg) == ADDR_EXPR
3409 && TREE_OPERAND (arg, 0)
3410 == gimple_omp_taskreg_data_arg (entry_stmt))
3411 {
3412 parcopy_stmt = stmt;
3413 break;
3414 }
3480139d 3415 }
3416 }
79acaae1 3417
75a70cf9 3418 gcc_assert (parcopy_stmt != NULL);
79acaae1 3419 arg = DECL_ARGUMENTS (child_fn);
3420
3421 if (!gimple_in_ssa_p (cfun))
3422 {
75a70cf9 3423 if (gimple_assign_lhs (parcopy_stmt) == arg)
3424 gsi_remove (&gsi, true);
79acaae1 3425 else
75a70cf9 3426 {
3427 /* ?? Is setting the subcode really necessary ?? */
3428 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3429 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3430 }
79acaae1 3431 }
3432 else
3433 {
3434 /* If we are in ssa form, we must load the value from the default
3435 definition of the argument. That should not be defined now,
3436 since the argument is not used uninitialized. */
3437 gcc_assert (gimple_default_def (cfun, arg) == NULL);
75a70cf9 3438 narg = make_ssa_name (arg, gimple_build_nop ());
79acaae1 3439 set_default_def (arg, narg);
75a70cf9 3440 /* ?? Is setting the subcode really necessary ?? */
3441 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3442 gimple_assign_set_rhs1 (parcopy_stmt, narg);
79acaae1 3443 update_stmt (parcopy_stmt);
3444 }
773c5ba7 3445 }
3446
3447 /* Declare local variables needed in CHILD_CFUN. */
3448 block = DECL_INITIAL (child_fn);
edb7afe8 3449 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
e1a7ccb9 3450 /* The gimplifier could record temporaries in parallel/task block
3451 rather than in containing function's local_decls chain,
3452 which would mean cgraph missed finalizing them. Do it now. */
3453 for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
3454 if (TREE_CODE (t) == VAR_DECL
3455 && TREE_STATIC (t)
3456 && !DECL_EXTERNAL (t))
3457 varpool_finalize_decl (t);
75a70cf9 3458 DECL_SAVED_TREE (child_fn) = NULL;
3459 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
1d22f541 3460 TREE_USED (block) = 1;
773c5ba7 3461
79acaae1 3462 /* Reset DECL_CONTEXT on function arguments. */
773c5ba7 3463 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3464 DECL_CONTEXT (t) = child_fn;
3465
75a70cf9 3466 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3467 so that it can be moved to the child function. */
3468 gsi = gsi_last_bb (entry_bb);
3469 stmt = gsi_stmt (gsi);
3470 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3471 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3472 gsi_remove (&gsi, true);
3473 e = split_block (entry_bb, stmt);
773c5ba7 3474 entry_bb = e->dest;
3475 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3476
75a70cf9 3477 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
5056ba1a 3478 if (exit_bb)
3479 {
75a70cf9 3480 gsi = gsi_last_bb (exit_bb);
3481 gcc_assert (!gsi_end_p (gsi)
3482 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3483 stmt = gimple_build_return (NULL);
3484 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3485 gsi_remove (&gsi, true);
5056ba1a 3486 }
79acaae1 3487
3488 /* Move the parallel region into CHILD_CFUN. */
48e1416a 3489
79acaae1 3490 if (gimple_in_ssa_p (cfun))
3491 {
3492 push_cfun (child_cfun);
bcaa2770 3493 init_tree_ssa (child_cfun);
79acaae1 3494 init_ssa_operands ();
3495 cfun->gimple_df->in_ssa_p = true;
3496 pop_cfun ();
1d22f541 3497 block = NULL_TREE;
79acaae1 3498 }
1d22f541 3499 else
75a70cf9 3500 block = gimple_block (entry_stmt);
1d22f541 3501
3502 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
79acaae1 3503 if (exit_bb)
3504 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3505
1d22f541 3506 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3507 for (tp = &child_cfun->local_decls; *tp; )
3508 if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3509 tp = &TREE_CHAIN (*tp);
3510 else
3511 *tp = TREE_CHAIN (*tp);
3512
79acaae1 3513 /* Inform the callgraph about the new function. */
3514 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3515 = cfun->curr_properties;
3516 cgraph_add_new_function (child_fn, true);
3517
3518 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3519 fixed in a following pass. */
3520 push_cfun (child_cfun);
ba3a7ba0 3521 save_current = current_function_decl;
3522 current_function_decl = child_fn;
658b4427 3523 if (optimize)
fd6481cf 3524 optimize_omp_library_calls (entry_stmt);
79acaae1 3525 rebuild_cgraph_edges ();
fbe86b1b 3526
3527 /* Some EH regions might become dead, see PR34608. If
3528 pass_cleanup_cfg isn't the first pass to happen with the
3529 new child, these dead EH edges might cause problems.
3530 Clean them up now. */
3531 if (flag_exceptions)
3532 {
3533 basic_block bb;
fbe86b1b 3534 bool changed = false;
3535
fbe86b1b 3536 FOR_EACH_BB (bb)
75a70cf9 3537 changed |= gimple_purge_dead_eh_edges (bb);
fbe86b1b 3538 if (changed)
3539 cleanup_tree_cfg ();
fbe86b1b 3540 }
dd277d48 3541 if (gimple_in_ssa_p (cfun))
3542 update_ssa (TODO_update_ssa);
ba3a7ba0 3543 current_function_decl = save_current;
79acaae1 3544 pop_cfun ();
773c5ba7 3545 }
48e1416a 3546
773c5ba7 3547 /* Emit a library call to launch the children threads. */
75a70cf9 3548 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
fd6481cf 3549 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3550 else
3551 expand_task_call (new_bb, entry_stmt);
28c92cbb 3552 update_ssa (TODO_update_ssa_only_virtuals);
1e8e9920 3553}
3554
773c5ba7 3555
3556/* A subroutine of expand_omp_for. Generate code for a parallel
1e8e9920 3557 loop with any schedule. Given parameters:
3558
3559 for (V = N1; V cond N2; V += STEP) BODY;
3560
3561 where COND is "<" or ">", we generate pseudocode
3562
3563 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
773c5ba7 3564 if (more) goto L0; else goto L3;
1e8e9920 3565 L0:
3566 V = istart0;
3567 iend = iend0;
3568 L1:
3569 BODY;
3570 V += STEP;
773c5ba7 3571 if (V cond iend) goto L1; else goto L2;
1e8e9920 3572 L2:
773c5ba7 3573 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3574 L3:
1e8e9920 3575
773c5ba7 3576 If this is a combined omp parallel loop, instead of the call to
fd6481cf 3577 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3578
3579 For collapsed loops, given parameters:
3580 collapse(3)
3581 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3582 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3583 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3584 BODY;
3585
3586 we generate pseudocode
3587
3588 if (cond3 is <)
3589 adj = STEP3 - 1;
3590 else
3591 adj = STEP3 + 1;
3592 count3 = (adj + N32 - N31) / STEP3;
3593 if (cond2 is <)
3594 adj = STEP2 - 1;
3595 else
3596 adj = STEP2 + 1;
3597 count2 = (adj + N22 - N21) / STEP2;
3598 if (cond1 is <)
3599 adj = STEP1 - 1;
3600 else
3601 adj = STEP1 + 1;
3602 count1 = (adj + N12 - N11) / STEP1;
3603 count = count1 * count2 * count3;
3604 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3605 if (more) goto L0; else goto L3;
3606 L0:
3607 V = istart0;
3608 T = V;
3609 V3 = N31 + (T % count3) * STEP3;
3610 T = T / count3;
3611 V2 = N21 + (T % count2) * STEP2;
3612 T = T / count2;
3613 V1 = N11 + T * STEP1;
3614 iend = iend0;
3615 L1:
3616 BODY;
3617 V += 1;
3618 if (V < iend) goto L10; else goto L2;
3619 L10:
3620 V3 += STEP3;
3621 if (V3 cond3 N32) goto L1; else goto L11;
3622 L11:
3623 V3 = N31;
3624 V2 += STEP2;
3625 if (V2 cond2 N22) goto L1; else goto L12;
3626 L12:
3627 V2 = N21;
3628 V1 += STEP1;
3629 goto L1;
3630 L2:
3631 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3632 L3:
3633
3634 */
1e8e9920 3635
61e47ac8 3636static void
773c5ba7 3637expand_omp_for_generic (struct omp_region *region,
3638 struct omp_for_data *fd,
1e8e9920 3639 enum built_in_function start_fn,
3640 enum built_in_function next_fn)
3641{
75a70cf9 3642 tree type, istart0, iend0, iend;
fd6481cf 3643 tree t, vmain, vback, bias = NULL_TREE;
3644 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
03ed154b 3645 basic_block l2_bb = NULL, l3_bb = NULL;
75a70cf9 3646 gimple_stmt_iterator gsi;
3647 gimple stmt;
773c5ba7 3648 bool in_combined_parallel = is_combined_parallel (region);
ac6e3339 3649 bool broken_loop = region->cont == NULL;
79acaae1 3650 edge e, ne;
fd6481cf 3651 tree *counts = NULL;
3652 int i;
ac6e3339 3653
3654 gcc_assert (!broken_loop || !in_combined_parallel);
fd6481cf 3655 gcc_assert (fd->iter_type == long_integer_type_node
3656 || !in_combined_parallel);
1e8e9920 3657
fd6481cf 3658 type = TREE_TYPE (fd->loop.v);
3659 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3660 iend0 = create_tmp_var (fd->iter_type, ".iend0");
6d63fc03 3661 TREE_ADDRESSABLE (istart0) = 1;
3662 TREE_ADDRESSABLE (iend0) = 1;
79acaae1 3663 if (gimple_in_ssa_p (cfun))
3664 {
3665 add_referenced_var (istart0);
3666 add_referenced_var (iend0);
3667 }
1e8e9920 3668
fd6481cf 3669 /* See if we need to bias by LLONG_MIN. */
3670 if (fd->iter_type == long_long_unsigned_type_node
3671 && TREE_CODE (type) == INTEGER_TYPE
3672 && !TYPE_UNSIGNED (type))
3673 {
3674 tree n1, n2;
3675
3676 if (fd->loop.cond_code == LT_EXPR)
3677 {
3678 n1 = fd->loop.n1;
3679 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3680 }
3681 else
3682 {
3683 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3684 n2 = fd->loop.n1;
3685 }
3686 if (TREE_CODE (n1) != INTEGER_CST
3687 || TREE_CODE (n2) != INTEGER_CST
3688 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3689 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3690 }
3691
61e47ac8 3692 entry_bb = region->entry;
03ed154b 3693 cont_bb = region->cont;
fd6481cf 3694 collapse_bb = NULL;
ac6e3339 3695 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3696 gcc_assert (broken_loop
3697 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3698 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3699 l1_bb = single_succ (l0_bb);
3700 if (!broken_loop)
03ed154b 3701 {
3702 l2_bb = create_empty_bb (cont_bb);
ac6e3339 3703 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3704 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
03ed154b 3705 }
ac6e3339 3706 else
3707 l2_bb = NULL;
3708 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3709 exit_bb = region->exit;
773c5ba7 3710
75a70cf9 3711 gsi = gsi_last_bb (entry_bb);
fd6481cf 3712
75a70cf9 3713 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
fd6481cf 3714 if (fd->collapse > 1)
3715 {
3716 /* collapsed loops need work for expansion in SSA form. */
3717 gcc_assert (!gimple_in_ssa_p (cfun));
3718 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3719 for (i = 0; i < fd->collapse; i++)
3720 {
3721 tree itype = TREE_TYPE (fd->loops[i].v);
3722
3723 if (POINTER_TYPE_P (itype))
3724 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3725 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3726 ? -1 : 1));
3727 t = fold_build2 (PLUS_EXPR, itype,
3728 fold_convert (itype, fd->loops[i].step), t);
3729 t = fold_build2 (PLUS_EXPR, itype, t,
3730 fold_convert (itype, fd->loops[i].n2));
3731 t = fold_build2 (MINUS_EXPR, itype, t,
3732 fold_convert (itype, fd->loops[i].n1));
3733 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3734 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3735 fold_build1 (NEGATE_EXPR, itype, t),
3736 fold_build1 (NEGATE_EXPR, itype,
3737 fold_convert (itype,
3738 fd->loops[i].step)));
3739 else
3740 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3741 fold_convert (itype, fd->loops[i].step));
3742 t = fold_convert (type, t);
3743 if (TREE_CODE (t) == INTEGER_CST)
3744 counts[i] = t;
3745 else
3746 {
3747 counts[i] = create_tmp_var (type, ".count");
75a70cf9 3748 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3749 true, GSI_SAME_STMT);
3750 stmt = gimple_build_assign (counts[i], t);
3751 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
fd6481cf 3752 }
3753 if (SSA_VAR_P (fd->loop.n2))
3754 {
3755 if (i == 0)
75a70cf9 3756 t = counts[0];
fd6481cf 3757 else
3758 {
3759 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
75a70cf9 3760 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3761 true, GSI_SAME_STMT);
fd6481cf 3762 }
75a70cf9 3763 stmt = gimple_build_assign (fd->loop.n2, t);
3764 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
fd6481cf 3765 }
3766 }
3767 }
79acaae1 3768 if (in_combined_parallel)
3769 {
3770 /* In a combined parallel loop, emit a call to
3771 GOMP_loop_foo_next. */
3772 t = build_call_expr (built_in_decls[next_fn], 2,
3773 build_fold_addr_expr (istart0),
3774 build_fold_addr_expr (iend0));
3775 }
3776 else
1e8e9920 3777 {
c2f47e15 3778 tree t0, t1, t2, t3, t4;
773c5ba7 3779 /* If this is not a combined parallel loop, emit a call to
3780 GOMP_loop_foo_start in ENTRY_BB. */
c2f47e15 3781 t4 = build_fold_addr_expr (iend0);
3782 t3 = build_fold_addr_expr (istart0);
fd6481cf 3783 t2 = fold_convert (fd->iter_type, fd->loop.step);
c799f233 3784 if (POINTER_TYPE_P (type)
3785 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3786 {
3787 /* Avoid casting pointers to integer of a different size. */
3788 tree itype
3789 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3790 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3791 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3792 }
3793 else
3794 {
3795 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3796 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3797 }
fd6481cf 3798 if (bias)
1e8e9920 3799 {
fd6481cf 3800 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3801 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3802 }
3803 if (fd->iter_type == long_integer_type_node)
3804 {
3805 if (fd->chunk_size)
3806 {
3807 t = fold_convert (fd->iter_type, fd->chunk_size);
3808 t = build_call_expr (built_in_decls[start_fn], 6,
3809 t0, t1, t2, t, t3, t4);
3810 }
3811 else
3812 t = build_call_expr (built_in_decls[start_fn], 5,
3813 t0, t1, t2, t3, t4);
1e8e9920 3814 }
c2f47e15 3815 else
fd6481cf 3816 {
3817 tree t5;
3818 tree c_bool_type;
3819
3820 /* The GOMP_loop_ull_*start functions have additional boolean
3821 argument, true for < loops and false for > loops.
3822 In Fortran, the C bool type can be different from
3823 boolean_type_node. */
3824 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3825 t5 = build_int_cst (c_bool_type,
3826 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3827 if (fd->chunk_size)
3828 {
3829 t = fold_convert (fd->iter_type, fd->chunk_size);
3830 t = build_call_expr (built_in_decls[start_fn], 7,
3831 t5, t0, t1, t2, t, t3, t4);
3832 }
3833 else
3834 t = build_call_expr (built_in_decls[start_fn], 6,
3835 t5, t0, t1, t2, t3, t4);
3836 }
1e8e9920 3837 }
fd6481cf 3838 if (TREE_TYPE (t) != boolean_type_node)
3839 t = fold_build2 (NE_EXPR, boolean_type_node,
3840 t, build_int_cst (TREE_TYPE (t), 0));
75a70cf9 3841 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3842 true, GSI_SAME_STMT);
3843 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
79acaae1 3844
75a70cf9 3845 /* Remove the GIMPLE_OMP_FOR statement. */
3846 gsi_remove (&gsi, true);
1e8e9920 3847
773c5ba7 3848 /* Iteration setup for sequential loop goes in L0_BB. */
75a70cf9 3849 gsi = gsi_start_bb (l0_bb);
1efcacec 3850 t = istart0;
fd6481cf 3851 if (bias)
1efcacec 3852 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3853 if (POINTER_TYPE_P (type))
3854 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3855 0), t);
3856 t = fold_convert (type, t);
75a70cf9 3857 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3858 false, GSI_CONTINUE_LINKING);
3859 stmt = gimple_build_assign (fd->loop.v, t);
3860 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1e8e9920 3861
1efcacec 3862 t = iend0;
fd6481cf 3863 if (bias)
1efcacec 3864 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3865 if (POINTER_TYPE_P (type))
3866 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3867 0), t);
3868 t = fold_convert (type, t);
75a70cf9 3869 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3870 false, GSI_CONTINUE_LINKING);
fd6481cf 3871 if (fd->collapse > 1)
3872 {
3873 tree tem = create_tmp_var (type, ".tem");
3874
75a70cf9 3875 stmt = gimple_build_assign (tem, fd->loop.v);
3876 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3877 for (i = fd->collapse - 1; i >= 0; i--)
3878 {
3879 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3880 itype = vtype;
3881 if (POINTER_TYPE_P (vtype))
3882 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3883 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3884 t = fold_convert (itype, t);
c821ef7d 3885 t = fold_build2 (MULT_EXPR, itype, t,
3886 fold_convert (itype, fd->loops[i].step));
fd6481cf 3887 if (POINTER_TYPE_P (vtype))
3888 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3889 fd->loops[i].n1, fold_convert (sizetype, t));
3890 else
3891 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
75a70cf9 3892 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3893 false, GSI_CONTINUE_LINKING);
3894 stmt = gimple_build_assign (fd->loops[i].v, t);
3895 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3896 if (i != 0)
3897 {
3898 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
75a70cf9 3899 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3900 false, GSI_CONTINUE_LINKING);
3901 stmt = gimple_build_assign (tem, t);
3902 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3903 }
3904 }
3905 }
773c5ba7 3906
ac6e3339 3907 if (!broken_loop)
03ed154b 3908 {
ac6e3339 3909 /* Code to control the increment and predicate for the sequential
3910 loop goes in the CONT_BB. */
75a70cf9 3911 gsi = gsi_last_bb (cont_bb);
3912 stmt = gsi_stmt (gsi);
3913 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3914 vmain = gimple_omp_continue_control_use (stmt);
3915 vback = gimple_omp_continue_control_def (stmt);
79acaae1 3916
fd6481cf 3917 if (POINTER_TYPE_P (type))
3918 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3919 fold_convert (sizetype, fd->loop.step));
3920 else
3921 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
75a70cf9 3922 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3923 true, GSI_SAME_STMT);
3924 stmt = gimple_build_assign (vback, t);
3925 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3926
fd6481cf 3927 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
75a70cf9 3928 stmt = gimple_build_cond_empty (t);
3929 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
773c5ba7 3930
75a70cf9 3931 /* Remove GIMPLE_OMP_CONTINUE. */
3932 gsi_remove (&gsi, true);
773c5ba7 3933
fd6481cf 3934 if (fd->collapse > 1)
3935 {
3936 basic_block last_bb, bb;
3937
3938 last_bb = cont_bb;
3939 for (i = fd->collapse - 1; i >= 0; i--)
3940 {
3941 tree vtype = TREE_TYPE (fd->loops[i].v);
3942
3943 bb = create_empty_bb (last_bb);
75a70cf9 3944 gsi = gsi_start_bb (bb);
fd6481cf 3945
3946 if (i < fd->collapse - 1)
3947 {
3948 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3949 e->probability = REG_BR_PROB_BASE / 8;
3950
75a70cf9 3951 t = fd->loops[i + 1].n1;
3952 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3953 false, GSI_CONTINUE_LINKING);
3954 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3955 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3956 }
3957 else
3958 collapse_bb = bb;
3959
3960 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3961
3962 if (POINTER_TYPE_P (vtype))
3963 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3964 fd->loops[i].v,
3965 fold_convert (sizetype, fd->loops[i].step));
3966 else
3967 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3968 fd->loops[i].step);
75a70cf9 3969 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3970 false, GSI_CONTINUE_LINKING);
3971 stmt = gimple_build_assign (fd->loops[i].v, t);
3972 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3973
3974 if (i > 0)
3975 {
75a70cf9 3976 t = fd->loops[i].n2;
3977 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3978 false, GSI_CONTINUE_LINKING);
fd6481cf 3979 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
75a70cf9 3980 fd->loops[i].v, t);
3981 stmt = gimple_build_cond_empty (t);
3982 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3983 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3984 e->probability = REG_BR_PROB_BASE * 7 / 8;
3985 }
3986 else
3987 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3988 last_bb = bb;
3989 }
3990 }
3991
ac6e3339 3992 /* Emit code to get the next parallel iteration in L2_BB. */
75a70cf9 3993 gsi = gsi_start_bb (l2_bb);
773c5ba7 3994
ac6e3339 3995 t = build_call_expr (built_in_decls[next_fn], 2,
3996 build_fold_addr_expr (istart0),
3997 build_fold_addr_expr (iend0));
75a70cf9 3998 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3999 false, GSI_CONTINUE_LINKING);
fd6481cf 4000 if (TREE_TYPE (t) != boolean_type_node)
4001 t = fold_build2 (NE_EXPR, boolean_type_node,
4002 t, build_int_cst (TREE_TYPE (t), 0));
75a70cf9 4003 stmt = gimple_build_cond_empty (t);
4004 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
ac6e3339 4005 }
1e8e9920 4006
61e47ac8 4007 /* Add the loop cleanup function. */
75a70cf9 4008 gsi = gsi_last_bb (exit_bb);
4009 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
61e47ac8 4010 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
4011 else
4012 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
75a70cf9 4013 stmt = gimple_build_call (t, 0);
4014 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4015 gsi_remove (&gsi, true);
773c5ba7 4016
4017 /* Connect the new blocks. */
79acaae1 4018 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4019 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
1e8e9920 4020
ac6e3339 4021 if (!broken_loop)
4022 {
75a70cf9 4023 gimple_seq phis;
4024
79acaae1 4025 e = find_edge (cont_bb, l3_bb);
4026 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4027
75a70cf9 4028 phis = phi_nodes (l3_bb);
4029 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4030 {
4031 gimple phi = gsi_stmt (gsi);
4032 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4033 PHI_ARG_DEF_FROM_EDGE (phi, e));
4034 }
79acaae1 4035 remove_edge (e);
4036
ac6e3339 4037 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
fd6481cf 4038 if (fd->collapse > 1)
4039 {
4040 e = find_edge (cont_bb, l1_bb);
4041 remove_edge (e);
4042 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4043 }
4044 else
4045 {
4046 e = find_edge (cont_bb, l1_bb);
4047 e->flags = EDGE_TRUE_VALUE;
4048 }
4049 e->probability = REG_BR_PROB_BASE * 7 / 8;
4050 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
ac6e3339 4051 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
79acaae1 4052
4053 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4054 recompute_dominator (CDI_DOMINATORS, l2_bb));
4055 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4056 recompute_dominator (CDI_DOMINATORS, l3_bb));
4057 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4058 recompute_dominator (CDI_DOMINATORS, l0_bb));
4059 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4060 recompute_dominator (CDI_DOMINATORS, l1_bb));
ac6e3339 4061 }
1e8e9920 4062}
4063
4064
773c5ba7 4065/* A subroutine of expand_omp_for. Generate code for a parallel
4066 loop with static schedule and no specified chunk size. Given
4067 parameters:
1e8e9920 4068
4069 for (V = N1; V cond N2; V += STEP) BODY;
4070
4071 where COND is "<" or ">", we generate pseudocode
4072
4073 if (cond is <)
4074 adj = STEP - 1;
4075 else
4076 adj = STEP + 1;
fd6481cf 4077 if ((__typeof (V)) -1 > 0 && cond is >)
4078 n = -(adj + N2 - N1) / -STEP;
4079 else
4080 n = (adj + N2 - N1) / STEP;
1e8e9920 4081 q = n / nthreads;
4082 q += (q * nthreads != n);
4083 s0 = q * threadid;
4084 e0 = min(s0 + q, n);
79acaae1 4085 V = s0 * STEP + N1;
1e8e9920 4086 if (s0 >= e0) goto L2; else goto L0;
4087 L0:
1e8e9920 4088 e = e0 * STEP + N1;
4089 L1:
4090 BODY;
4091 V += STEP;
4092 if (V cond e) goto L1;
1e8e9920 4093 L2:
4094*/
4095
61e47ac8 4096static void
773c5ba7 4097expand_omp_for_static_nochunk (struct omp_region *region,
4098 struct omp_for_data *fd)
1e8e9920 4099{
63f88450 4100 tree n, q, s0, e0, e, t, nthreads, threadid;
fd6481cf 4101 tree type, itype, vmain, vback;
61e47ac8 4102 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4103 basic_block fin_bb;
75a70cf9 4104 gimple_stmt_iterator gsi;
4105 gimple stmt;
1e8e9920 4106
fd6481cf 4107 itype = type = TREE_TYPE (fd->loop.v);
4108 if (POINTER_TYPE_P (type))
4109 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
1e8e9920 4110
61e47ac8 4111 entry_bb = region->entry;
61e47ac8 4112 cont_bb = region->cont;
ac6e3339 4113 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4114 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4115 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4116 body_bb = single_succ (seq_start_bb);
4117 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4118 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4119 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
61e47ac8 4120 exit_bb = region->exit;
4121
773c5ba7 4122 /* Iteration space partitioning goes in ENTRY_BB. */
75a70cf9 4123 gsi = gsi_last_bb (entry_bb);
4124 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
61e47ac8 4125
c2f47e15 4126 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
fd6481cf 4127 t = fold_convert (itype, t);
75a70cf9 4128 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4129 true, GSI_SAME_STMT);
48e1416a 4130
c2f47e15 4131 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
fd6481cf 4132 t = fold_convert (itype, t);
75a70cf9 4133 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4134 true, GSI_SAME_STMT);
1e8e9920 4135
fd6481cf 4136 fd->loop.n1
75a70cf9 4137 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4138 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4139 fd->loop.n2
75a70cf9 4140 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4141 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4142 fd->loop.step
75a70cf9 4143 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4144 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4145
4146 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4147 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4148 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4149 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4150 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4151 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4152 fold_build1 (NEGATE_EXPR, itype, t),
4153 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4154 else
4155 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4156 t = fold_convert (itype, t);
75a70cf9 4157 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 4158
fd6481cf 4159 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
75a70cf9 4160 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 4161
fd6481cf 4162 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4163 t = fold_build2 (NE_EXPR, itype, t, n);
4164 t = fold_build2 (PLUS_EXPR, itype, q, t);
75a70cf9 4165 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 4166
fd6481cf 4167 t = build2 (MULT_EXPR, itype, q, threadid);
75a70cf9 4168 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 4169
fd6481cf 4170 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4171 t = fold_build2 (MIN_EXPR, itype, t, n);
75a70cf9 4172 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
79acaae1 4173
1e8e9920 4174 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
75a70cf9 4175 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
773c5ba7 4176
75a70cf9 4177 /* Remove the GIMPLE_OMP_FOR statement. */
4178 gsi_remove (&gsi, true);
773c5ba7 4179
4180 /* Setup code for sequential iteration goes in SEQ_START_BB. */
75a70cf9 4181 gsi = gsi_start_bb (seq_start_bb);
1e8e9920 4182
fd6481cf 4183 t = fold_convert (itype, s0);
4184 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4185 if (POINTER_TYPE_P (type))
4186 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4187 fold_convert (sizetype, t));
4188 else
4189 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4190 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4191 false, GSI_CONTINUE_LINKING);
4192 stmt = gimple_build_assign (fd->loop.v, t);
4193 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
48e1416a 4194
fd6481cf 4195 t = fold_convert (itype, e0);
4196 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4197 if (POINTER_TYPE_P (type))
4198 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4199 fold_convert (sizetype, t));
4200 else
4201 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4202 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4203 false, GSI_CONTINUE_LINKING);
1e8e9920 4204
75a70cf9 4205 /* The code controlling the sequential loop replaces the
4206 GIMPLE_OMP_CONTINUE. */
4207 gsi = gsi_last_bb (cont_bb);
4208 stmt = gsi_stmt (gsi);
4209 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4210 vmain = gimple_omp_continue_control_use (stmt);
4211 vback = gimple_omp_continue_control_def (stmt);
79acaae1 4212
fd6481cf 4213 if (POINTER_TYPE_P (type))
4214 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4215 fold_convert (sizetype, fd->loop.step));
4216 else
4217 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
75a70cf9 4218 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4219 true, GSI_SAME_STMT);
4220 stmt = gimple_build_assign (vback, t);
4221 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
79acaae1 4222
fd6481cf 4223 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
75a70cf9 4224 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
1e8e9920 4225
75a70cf9 4226 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4227 gsi_remove (&gsi, true);
773c5ba7 4228
75a70cf9 4229 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4230 gsi = gsi_last_bb (exit_bb);
4231 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4232 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4233 false, GSI_SAME_STMT);
4234 gsi_remove (&gsi, true);
773c5ba7 4235
4236 /* Connect all the blocks. */
ac6e3339 4237 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4238 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
79acaae1 4239
ac6e3339 4240 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
61e47ac8 4241 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
48e1416a 4242
79acaae1 4243 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4244 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4245 recompute_dominator (CDI_DOMINATORS, body_bb));
4246 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4247 recompute_dominator (CDI_DOMINATORS, fin_bb));
1e8e9920 4248}
4249
773c5ba7 4250
4251/* A subroutine of expand_omp_for. Generate code for a parallel
4252 loop with static schedule and a specified chunk size. Given
4253 parameters:
1e8e9920 4254
4255 for (V = N1; V cond N2; V += STEP) BODY;
4256
4257 where COND is "<" or ">", we generate pseudocode
4258
4259 if (cond is <)
4260 adj = STEP - 1;
4261 else
4262 adj = STEP + 1;
fd6481cf 4263 if ((__typeof (V)) -1 > 0 && cond is >)
4264 n = -(adj + N2 - N1) / -STEP;
4265 else
4266 n = (adj + N2 - N1) / STEP;
1e8e9920 4267 trip = 0;
79acaae1 4268 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4269 here so that V is defined
4270 if the loop is not entered
1e8e9920 4271 L0:
4272 s0 = (trip * nthreads + threadid) * CHUNK;
4273 e0 = min(s0 + CHUNK, n);
4274 if (s0 < n) goto L1; else goto L4;
4275 L1:
4276 V = s0 * STEP + N1;
4277 e = e0 * STEP + N1;
4278 L2:
4279 BODY;
4280 V += STEP;
4281 if (V cond e) goto L2; else goto L3;
4282 L3:
4283 trip += 1;
4284 goto L0;
4285 L4:
1e8e9920 4286*/
4287
61e47ac8 4288static void
75a70cf9 4289expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
1e8e9920 4290{
75a70cf9 4291 tree n, s0, e0, e, t;
79acaae1 4292 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
75a70cf9 4293 tree type, itype, v_main, v_back, v_extra;
773c5ba7 4294 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
61e47ac8 4295 basic_block trip_update_bb, cont_bb, fin_bb;
75a70cf9 4296 gimple_stmt_iterator si;
4297 gimple stmt;
4298 edge se;
1e8e9920 4299
fd6481cf 4300 itype = type = TREE_TYPE (fd->loop.v);
4301 if (POINTER_TYPE_P (type))
4302 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
1e8e9920 4303
61e47ac8 4304 entry_bb = region->entry;
ac6e3339 4305 se = split_block (entry_bb, last_stmt (entry_bb));
4306 entry_bb = se->src;
4307 iter_part_bb = se->dest;
61e47ac8 4308 cont_bb = region->cont;
ac6e3339 4309 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4310 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4311 == FALLTHRU_EDGE (cont_bb)->dest);
4312 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4313 body_bb = single_succ (seq_start_bb);
4314 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4315 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4316 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4317 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
61e47ac8 4318 exit_bb = region->exit;
773c5ba7 4319
773c5ba7 4320 /* Trip and adjustment setup goes in ENTRY_BB. */
75a70cf9 4321 si = gsi_last_bb (entry_bb);
4322 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
773c5ba7 4323
c2f47e15 4324 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
fd6481cf 4325 t = fold_convert (itype, t);
75a70cf9 4326 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4327 true, GSI_SAME_STMT);
48e1416a 4328
c2f47e15 4329 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
fd6481cf 4330 t = fold_convert (itype, t);
75a70cf9 4331 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4332 true, GSI_SAME_STMT);
79acaae1 4333
fd6481cf 4334 fd->loop.n1
75a70cf9 4335 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4336 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4337 fd->loop.n2
75a70cf9 4338 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4339 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4340 fd->loop.step
75a70cf9 4341 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4342 true, NULL_TREE, true, GSI_SAME_STMT);
79acaae1 4343 fd->chunk_size
75a70cf9 4344 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4345 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4346
4347 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4348 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4349 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4350 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4351 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4352 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4353 fold_build1 (NEGATE_EXPR, itype, t),
4354 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4355 else
4356 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4357 t = fold_convert (itype, t);
75a70cf9 4358 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4359 true, GSI_SAME_STMT);
79acaae1 4360
fd6481cf 4361 trip_var = create_tmp_var (itype, ".trip");
79acaae1 4362 if (gimple_in_ssa_p (cfun))
4363 {
4364 add_referenced_var (trip_var);
75a70cf9 4365 trip_init = make_ssa_name (trip_var, NULL);
4366 trip_main = make_ssa_name (trip_var, NULL);
4367 trip_back = make_ssa_name (trip_var, NULL);
79acaae1 4368 }
1e8e9920 4369 else
79acaae1 4370 {
4371 trip_init = trip_var;
4372 trip_main = trip_var;
4373 trip_back = trip_var;
4374 }
1e8e9920 4375
75a70cf9 4376 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4377 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
773c5ba7 4378
fd6481cf 4379 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4380 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4381 if (POINTER_TYPE_P (type))
4382 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4383 fold_convert (sizetype, t));
4384 else
4385 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4386 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4387 true, GSI_SAME_STMT);
79acaae1 4388
75a70cf9 4389 /* Remove the GIMPLE_OMP_FOR. */
4390 gsi_remove (&si, true);
773c5ba7 4391
4392 /* Iteration space partitioning goes in ITER_PART_BB. */
75a70cf9 4393 si = gsi_last_bb (iter_part_bb);
1e8e9920 4394
fd6481cf 4395 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4396 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4397 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
75a70cf9 4398 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4399 false, GSI_CONTINUE_LINKING);
1e8e9920 4400
fd6481cf 4401 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4402 t = fold_build2 (MIN_EXPR, itype, t, n);
75a70cf9 4403 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4404 false, GSI_CONTINUE_LINKING);
1e8e9920 4405
4406 t = build2 (LT_EXPR, boolean_type_node, s0, n);
75a70cf9 4407 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
773c5ba7 4408
4409 /* Setup code for sequential iteration goes in SEQ_START_BB. */
75a70cf9 4410 si = gsi_start_bb (seq_start_bb);
1e8e9920 4411
fd6481cf 4412 t = fold_convert (itype, s0);
4413 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4414 if (POINTER_TYPE_P (type))
4415 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4416 fold_convert (sizetype, t));
4417 else
4418 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4419 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4420 false, GSI_CONTINUE_LINKING);
4421 stmt = gimple_build_assign (fd->loop.v, t);
4422 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
1e8e9920 4423
fd6481cf 4424 t = fold_convert (itype, e0);
4425 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4426 if (POINTER_TYPE_P (type))
4427 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4428 fold_convert (sizetype, t));
4429 else
4430 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4431 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4432 false, GSI_CONTINUE_LINKING);
1e8e9920 4433
61e47ac8 4434 /* The code controlling the sequential loop goes in CONT_BB,
75a70cf9 4435 replacing the GIMPLE_OMP_CONTINUE. */
4436 si = gsi_last_bb (cont_bb);
4437 stmt = gsi_stmt (si);
4438 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4439 v_main = gimple_omp_continue_control_use (stmt);
4440 v_back = gimple_omp_continue_control_def (stmt);
79acaae1 4441
fd6481cf 4442 if (POINTER_TYPE_P (type))
4443 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4444 fold_convert (sizetype, fd->loop.step));
4445 else
75a70cf9 4446 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4447 stmt = gimple_build_assign (v_back, t);
4448 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
79acaae1 4449
fd6481cf 4450 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
75a70cf9 4451 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
48e1416a 4452
75a70cf9 4453 /* Remove GIMPLE_OMP_CONTINUE. */
4454 gsi_remove (&si, true);
773c5ba7 4455
4456 /* Trip update code goes into TRIP_UPDATE_BB. */
75a70cf9 4457 si = gsi_start_bb (trip_update_bb);
1e8e9920 4458
fd6481cf 4459 t = build_int_cst (itype, 1);
4460 t = build2 (PLUS_EXPR, itype, trip_main, t);
75a70cf9 4461 stmt = gimple_build_assign (trip_back, t);
4462 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
1e8e9920 4463
75a70cf9 4464 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4465 si = gsi_last_bb (exit_bb);
4466 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4467 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4468 false, GSI_SAME_STMT);
4469 gsi_remove (&si, true);
1e8e9920 4470
773c5ba7 4471 /* Connect the new blocks. */
ac6e3339 4472 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4473 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
79acaae1 4474
ac6e3339 4475 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4476 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
79acaae1 4477
ac6e3339 4478 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
79acaae1 4479
4480 if (gimple_in_ssa_p (cfun))
4481 {
75a70cf9 4482 gimple_stmt_iterator psi;
4483 gimple phi;
4484 edge re, ene;
4485 edge_var_map_vector head;
4486 edge_var_map *vm;
4487 size_t i;
4488
79acaae1 4489 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4490 remove arguments of the phi nodes in fin_bb. We need to create
4491 appropriate phi nodes in iter_part_bb instead. */
4492 se = single_pred_edge (fin_bb);
4493 re = single_succ_edge (trip_update_bb);
75a70cf9 4494 head = redirect_edge_var_map_vector (re);
79acaae1 4495 ene = single_succ_edge (entry_bb);
4496
75a70cf9 4497 psi = gsi_start_phis (fin_bb);
4498 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4499 gsi_next (&psi), ++i)
79acaae1 4500 {
75a70cf9 4501 gimple nphi;
efbcb6de 4502 source_location locus;
75a70cf9 4503
4504 phi = gsi_stmt (psi);
4505 t = gimple_phi_result (phi);
4506 gcc_assert (t == redirect_edge_var_map_result (vm));
79acaae1 4507 nphi = create_phi_node (t, iter_part_bb);
4508 SSA_NAME_DEF_STMT (t) = nphi;
4509
4510 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
efbcb6de 4511 locus = gimple_phi_arg_location_from_edge (phi, se);
4512
fd6481cf 4513 /* A special case -- fd->loop.v is not yet computed in
4514 iter_part_bb, we need to use v_extra instead. */
4515 if (t == fd->loop.v)
79acaae1 4516 t = v_extra;
efbcb6de 4517 add_phi_arg (nphi, t, ene, locus);
4518 locus = redirect_edge_var_map_location (vm);
4519 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
75a70cf9 4520 }
4521 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4522 redirect_edge_var_map_clear (re);
4523 while (1)
4524 {
4525 psi = gsi_start_phis (fin_bb);
4526 if (gsi_end_p (psi))
4527 break;
4528 remove_phi_node (&psi, false);
79acaae1 4529 }
79acaae1 4530
4531 /* Make phi node for trip. */
4532 phi = create_phi_node (trip_main, iter_part_bb);
4533 SSA_NAME_DEF_STMT (trip_main) = phi;
efbcb6de 4534 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4535 UNKNOWN_LOCATION);
4536 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4537 UNKNOWN_LOCATION);
79acaae1 4538 }
4539
4540 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4541 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4542 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4543 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4544 recompute_dominator (CDI_DOMINATORS, fin_bb));
4545 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4546 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4547 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4548 recompute_dominator (CDI_DOMINATORS, body_bb));
1e8e9920 4549}
4550
1e8e9920 4551
773c5ba7 4552/* Expand the OpenMP loop defined by REGION. */
1e8e9920 4553
773c5ba7 4554static void
4555expand_omp_for (struct omp_region *region)
4556{
4557 struct omp_for_data fd;
fd6481cf 4558 struct omp_for_data_loop *loops;
1e8e9920 4559
fd6481cf 4560 loops
4561 = (struct omp_for_data_loop *)
75a70cf9 4562 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
fd6481cf 4563 * sizeof (struct omp_for_data_loop));
fd6481cf 4564 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
f77459c5 4565 region->sched_kind = fd.sched_kind;
1e8e9920 4566
b3a3ddec 4567 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4568 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4569 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4570 if (region->cont)
4571 {
4572 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4573 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4574 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4575 }
4576
03ed154b 4577 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4578 && !fd.have_ordered
fd6481cf 4579 && fd.collapse == 1
ac6e3339 4580 && region->cont != NULL)
1e8e9920 4581 {
4582 if (fd.chunk_size == NULL)
61e47ac8 4583 expand_omp_for_static_nochunk (region, &fd);
1e8e9920 4584 else
61e47ac8 4585 expand_omp_for_static_chunk (region, &fd);
1e8e9920 4586 }
4587 else
4588 {
fd6481cf 4589 int fn_index, start_ix, next_ix;
4590
4591 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4592 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
75a70cf9 4593 ? 3 : fd.sched_kind;
fd6481cf 4594 fn_index += fd.have_ordered * 4;
4595 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4596 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4597 if (fd.iter_type == long_long_unsigned_type_node)
4598 {
4599 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4600 - BUILT_IN_GOMP_LOOP_STATIC_START;
4601 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4602 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4603 }
b9c74b4d 4604 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4605 (enum built_in_function) next_ix);
1e8e9920 4606 }
28c92cbb 4607
4608 update_ssa (TODO_update_ssa_only_virtuals);
1e8e9920 4609}
4610
1e8e9920 4611
4612/* Expand code for an OpenMP sections directive. In pseudo code, we generate
4613
1e8e9920 4614 v = GOMP_sections_start (n);
4615 L0:
4616 switch (v)
4617 {
4618 case 0:
4619 goto L2;
4620 case 1:
4621 section 1;
4622 goto L1;
4623 case 2:
4624 ...
4625 case n:
4626 ...
1e8e9920 4627 default:
4628 abort ();
4629 }
4630 L1:
4631 v = GOMP_sections_next ();
4632 goto L0;
4633 L2:
4634 reduction;
4635
773c5ba7 4636 If this is a combined parallel sections, replace the call to
79acaae1 4637 GOMP_sections_start with call to GOMP_sections_next. */
1e8e9920 4638
4639static void
773c5ba7 4640expand_omp_sections (struct omp_region *region)
1e8e9920 4641{
f018d957 4642 tree t, u, vin = NULL, vmain, vnext, l2;
75a70cf9 4643 VEC (tree,heap) *label_vec;
4644 unsigned len;
ac6e3339 4645 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
75a70cf9 4646 gimple_stmt_iterator si, switch_si;
4647 gimple sections_stmt, stmt, cont;
9884aaf8 4648 edge_iterator ei;
4649 edge e;
61e47ac8 4650 struct omp_region *inner;
75a70cf9 4651 unsigned i, casei;
ac6e3339 4652 bool exit_reachable = region->cont != NULL;
1e8e9920 4653
ac6e3339 4654 gcc_assert (exit_reachable == (region->exit != NULL));
61e47ac8 4655 entry_bb = region->entry;
ac6e3339 4656 l0_bb = single_succ (entry_bb);
61e47ac8 4657 l1_bb = region->cont;
ac6e3339 4658 l2_bb = region->exit;
4659 if (exit_reachable)
03ed154b 4660 {
295e9e85 4661 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
75a70cf9 4662 l2 = gimple_block_label (l2_bb);
9884aaf8 4663 else
4664 {
4665 /* This can happen if there are reductions. */
4666 len = EDGE_COUNT (l0_bb->succs);
4667 gcc_assert (len > 0);
4668 e = EDGE_SUCC (l0_bb, len - 1);
75a70cf9 4669 si = gsi_last_bb (e->dest);
6d5a0fbe 4670 l2 = NULL_TREE;
75a70cf9 4671 if (gsi_end_p (si)
4672 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4673 l2 = gimple_block_label (e->dest);
9884aaf8 4674 else
4675 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4676 {
75a70cf9 4677 si = gsi_last_bb (e->dest);
4678 if (gsi_end_p (si)
4679 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
9884aaf8 4680 {
75a70cf9 4681 l2 = gimple_block_label (e->dest);
9884aaf8 4682 break;
4683 }
4684 }
4685 }
03ed154b 4686 default_bb = create_empty_bb (l1_bb->prev_bb);
03ed154b 4687 }
4688 else
4689 {
ac6e3339 4690 default_bb = create_empty_bb (l0_bb);
75a70cf9 4691 l2 = gimple_block_label (default_bb);
03ed154b 4692 }
773c5ba7 4693
4694 /* We will build a switch() with enough cases for all the
75a70cf9 4695 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
773c5ba7 4696 and a default case to abort if something goes wrong. */
ac6e3339 4697 len = EDGE_COUNT (l0_bb->succs);
75a70cf9 4698
4699 /* Use VEC_quick_push on label_vec throughout, since we know the size
4700 in advance. */
4701 label_vec = VEC_alloc (tree, heap, len);
1e8e9920 4702
61e47ac8 4703 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
75a70cf9 4704 GIMPLE_OMP_SECTIONS statement. */
4705 si = gsi_last_bb (entry_bb);
4706 sections_stmt = gsi_stmt (si);
4707 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4708 vin = gimple_omp_sections_control (sections_stmt);
773c5ba7 4709 if (!is_combined_parallel (region))
1e8e9920 4710 {
773c5ba7 4711 /* If we are not inside a combined parallel+sections region,
4712 call GOMP_sections_start. */
ac6e3339 4713 t = build_int_cst (unsigned_type_node,
4714 exit_reachable ? len - 1 : len);
1e8e9920 4715 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
75a70cf9 4716 stmt = gimple_build_call (u, 1, t);
1e8e9920 4717 }
79acaae1 4718 else
4719 {
4720 /* Otherwise, call GOMP_sections_next. */
4721 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
75a70cf9 4722 stmt = gimple_build_call (u, 0);
79acaae1 4723 }
75a70cf9 4724 gimple_call_set_lhs (stmt, vin);
4725 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4726 gsi_remove (&si, true);
4727
4728 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4729 L0_BB. */
4730 switch_si = gsi_last_bb (l0_bb);
4731 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
79acaae1 4732 if (exit_reachable)
4733 {
4734 cont = last_stmt (l1_bb);
75a70cf9 4735 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4736 vmain = gimple_omp_continue_control_use (cont);
4737 vnext = gimple_omp_continue_control_def (cont);
79acaae1 4738 }
4739 else
4740 {
4741 vmain = vin;
4742 vnext = NULL_TREE;
4743 }
1e8e9920 4744
ac6e3339 4745 i = 0;
4746 if (exit_reachable)
4747 {
4748 t = build3 (CASE_LABEL_EXPR, void_type_node,
4749 build_int_cst (unsigned_type_node, 0), NULL, l2);
75a70cf9 4750 VEC_quick_push (tree, label_vec, t);
ac6e3339 4751 i++;
4752 }
03ed154b 4753
75a70cf9 4754 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
ac6e3339 4755 for (inner = region->inner, casei = 1;
4756 inner;
4757 inner = inner->next, i++, casei++)
1e8e9920 4758 {
773c5ba7 4759 basic_block s_entry_bb, s_exit_bb;
4760
9884aaf8 4761 /* Skip optional reduction region. */
75a70cf9 4762 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
9884aaf8 4763 {
4764 --i;
4765 --casei;
4766 continue;
4767 }
4768
61e47ac8 4769 s_entry_bb = inner->entry;
4770 s_exit_bb = inner->exit;
1e8e9920 4771
75a70cf9 4772 t = gimple_block_label (s_entry_bb);
ac6e3339 4773 u = build_int_cst (unsigned_type_node, casei);
1e8e9920 4774 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
75a70cf9 4775 VEC_quick_push (tree, label_vec, u);
61e47ac8 4776
75a70cf9 4777 si = gsi_last_bb (s_entry_bb);
4778 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4779 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4780 gsi_remove (&si, true);
61e47ac8 4781 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
03ed154b 4782
4783 if (s_exit_bb == NULL)
4784 continue;
4785
75a70cf9 4786 si = gsi_last_bb (s_exit_bb);
4787 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4788 gsi_remove (&si, true);
03ed154b 4789
773c5ba7 4790 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
1e8e9920 4791 }
4792
773c5ba7 4793 /* Error handling code goes in DEFAULT_BB. */
75a70cf9 4794 t = gimple_block_label (default_bb);
1e8e9920 4795 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
61e47ac8 4796 make_edge (l0_bb, default_bb, 0);
1e8e9920 4797
75a70cf9 4798 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4799 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4800 gsi_remove (&switch_si, true);
4801 VEC_free (tree, heap, label_vec);
4802
4803 si = gsi_start_bb (default_bb);
4804 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4805 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
773c5ba7 4806
ac6e3339 4807 if (exit_reachable)
03ed154b 4808 {
ac6e3339 4809 /* Code to get the next section goes in L1_BB. */
75a70cf9 4810 si = gsi_last_bb (l1_bb);
4811 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
1e8e9920 4812
75a70cf9 4813 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4814 gimple_call_set_lhs (stmt, vnext);
4815 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4816 gsi_remove (&si, true);
773c5ba7 4817
ac6e3339 4818 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4819
75a70cf9 4820 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4821 si = gsi_last_bb (l2_bb);
4822 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
03ed154b 4823 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4824 else
4825 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
75a70cf9 4826 stmt = gimple_build_call (t, 0);
4827 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4828 gsi_remove (&si, true);
03ed154b 4829 }
773c5ba7 4830
79acaae1 4831 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
773c5ba7 4832}
1e8e9920 4833
1e8e9920 4834
61e47ac8 4835/* Expand code for an OpenMP single directive. We've already expanded
4836 much of the code, here we simply place the GOMP_barrier call. */
4837
4838static void
4839expand_omp_single (struct omp_region *region)
4840{
4841 basic_block entry_bb, exit_bb;
75a70cf9 4842 gimple_stmt_iterator si;
61e47ac8 4843 bool need_barrier = false;
4844
4845 entry_bb = region->entry;
4846 exit_bb = region->exit;
4847
75a70cf9 4848 si = gsi_last_bb (entry_bb);
61e47ac8 4849 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4850 be removed. We need to ensure that the thread that entered the single
4851 does not exit before the data is copied out by the other threads. */
75a70cf9 4852 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
61e47ac8 4853 OMP_CLAUSE_COPYPRIVATE))
4854 need_barrier = true;
75a70cf9 4855 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4856 gsi_remove (&si, true);
61e47ac8 4857 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4858
75a70cf9 4859 si = gsi_last_bb (exit_bb);
4860 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4861 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4862 false, GSI_SAME_STMT);
4863 gsi_remove (&si, true);
61e47ac8 4864 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4865}
4866
4867
4868/* Generic expansion for OpenMP synchronization directives: master,
4869 ordered and critical. All we need to do here is remove the entry
4870 and exit markers for REGION. */
773c5ba7 4871
4872static void
4873expand_omp_synch (struct omp_region *region)
4874{
4875 basic_block entry_bb, exit_bb;
75a70cf9 4876 gimple_stmt_iterator si;
773c5ba7 4877
61e47ac8 4878 entry_bb = region->entry;
4879 exit_bb = region->exit;
773c5ba7 4880
75a70cf9 4881 si = gsi_last_bb (entry_bb);
4882 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4883 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4884 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4885 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4886 gsi_remove (&si, true);
773c5ba7 4887 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4888
03ed154b 4889 if (exit_bb)
4890 {
75a70cf9 4891 si = gsi_last_bb (exit_bb);
4892 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4893 gsi_remove (&si, true);
03ed154b 4894 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4895 }
773c5ba7 4896}
1e8e9920 4897
cb7f680b 4898/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4899 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4900 size of the data type, and thus usable to find the index of the builtin
4901 decl. Returns false if the expression is not of the proper form. */
4902
4903static bool
4904expand_omp_atomic_fetch_op (basic_block load_bb,
4905 tree addr, tree loaded_val,
4906 tree stored_val, int index)
4907{
4908 enum built_in_function base;
4909 tree decl, itype, call;
4910 enum insn_code *optab;
4911 tree rhs;
4912 basic_block store_bb = single_succ (load_bb);
75a70cf9 4913 gimple_stmt_iterator gsi;
4914 gimple stmt;
389dd41b 4915 location_t loc;
cb7f680b 4916
4917 /* We expect to find the following sequences:
48e1416a 4918
cb7f680b 4919 load_bb:
75a70cf9 4920 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
cb7f680b 4921
4922 store_bb:
4923 val = tmp OP something; (or: something OP tmp)
48e1416a 4924 GIMPLE_OMP_STORE (val)
cb7f680b 4925
48e1416a 4926 ???FIXME: Allow a more flexible sequence.
cb7f680b 4927 Perhaps use data flow to pick the statements.
48e1416a 4928
cb7f680b 4929 */
4930
75a70cf9 4931 gsi = gsi_after_labels (store_bb);
4932 stmt = gsi_stmt (gsi);
389dd41b 4933 loc = gimple_location (stmt);
75a70cf9 4934 if (!is_gimple_assign (stmt))
cb7f680b 4935 return false;
75a70cf9 4936 gsi_next (&gsi);
4937 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
cb7f680b 4938 return false;
4939
75a70cf9 4940 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
cb7f680b 4941 return false;
4942
cb7f680b 4943 /* Check for one of the supported fetch-op operations. */
75a70cf9 4944 switch (gimple_assign_rhs_code (stmt))
cb7f680b 4945 {
4946 case PLUS_EXPR:
4947 case POINTER_PLUS_EXPR:
4948 base = BUILT_IN_FETCH_AND_ADD_N;
4949 optab = sync_add_optab;
4950 break;
4951 case MINUS_EXPR:
4952 base = BUILT_IN_FETCH_AND_SUB_N;
4953 optab = sync_add_optab;
4954 break;
4955 case BIT_AND_EXPR:
4956 base = BUILT_IN_FETCH_AND_AND_N;
4957 optab = sync_and_optab;
4958 break;
4959 case BIT_IOR_EXPR:
4960 base = BUILT_IN_FETCH_AND_OR_N;
4961 optab = sync_ior_optab;
4962 break;
4963 case BIT_XOR_EXPR:
4964 base = BUILT_IN_FETCH_AND_XOR_N;
4965 optab = sync_xor_optab;
4966 break;
4967 default:
4968 return false;
4969 }
4970 /* Make sure the expression is of the proper form. */
75a70cf9 4971 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4972 rhs = gimple_assign_rhs2 (stmt);
4973 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4974 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4975 rhs = gimple_assign_rhs1 (stmt);
cb7f680b 4976 else
4977 return false;
4978
4979 decl = built_in_decls[base + index + 1];
4980 itype = TREE_TYPE (TREE_TYPE (decl));
4981
4982 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4983 return false;
4984
75a70cf9 4985 gsi = gsi_last_bb (load_bb);
4986 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
389dd41b 4987 call = build_call_expr_loc (loc,
4988 decl, 2, addr,
4989 fold_convert_loc (loc, itype, rhs));
4990 call = fold_convert_loc (loc, void_type_node, call);
75a70cf9 4991 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
4992 gsi_remove (&gsi, true);
cb7f680b 4993
75a70cf9 4994 gsi = gsi_last_bb (store_bb);
4995 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
4996 gsi_remove (&gsi, true);
4997 gsi = gsi_last_bb (store_bb);
4998 gsi_remove (&gsi, true);
cb7f680b 4999
5000 if (gimple_in_ssa_p (cfun))
5001 update_ssa (TODO_update_ssa_no_phi);
5002
5003 return true;
5004}
5005
5006/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5007
5008 oldval = *addr;
5009 repeat:
5010 newval = rhs; // with oldval replacing *addr in rhs
5011 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5012 if (oldval != newval)
5013 goto repeat;
5014
5015 INDEX is log2 of the size of the data type, and thus usable to find the
5016 index of the builtin decl. */
5017
5018static bool
5019expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5020 tree addr, tree loaded_val, tree stored_val,
5021 int index)
5022{
790368c5 5023 tree loadedi, storedi, initial, new_storedi, old_vali;
cb7f680b 5024 tree type, itype, cmpxchg, iaddr;
75a70cf9 5025 gimple_stmt_iterator si;
cb7f680b 5026 basic_block loop_header = single_succ (load_bb);
75a70cf9 5027 gimple phi, stmt;
cb7f680b 5028 edge e;
5029
5030 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
5031 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5032 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5033
5034 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
5035 return false;
5036
75a70cf9 5037 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5038 si = gsi_last_bb (load_bb);
5039 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5040
790368c5 5041 /* For floating-point values, we'll need to view-convert them to integers
5042 so that we can perform the atomic compare and swap. Simplify the
5043 following code by always setting up the "i"ntegral variables. */
5044 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5045 {
75a70cf9 5046 tree iaddr_val;
5047
8115f0af 5048 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5049 true), NULL);
75a70cf9 5050 iaddr_val
5051 = force_gimple_operand_gsi (&si,
5052 fold_convert (TREE_TYPE (iaddr), addr),
5053 false, NULL_TREE, true, GSI_SAME_STMT);
5054 stmt = gimple_build_assign (iaddr, iaddr_val);
5055 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
790368c5 5056 loadedi = create_tmp_var (itype, NULL);
5057 if (gimple_in_ssa_p (cfun))
5058 {
5059 add_referenced_var (iaddr);
5060 add_referenced_var (loadedi);
5061 loadedi = make_ssa_name (loadedi, NULL);
5062 }
5063 }
5064 else
5065 {
5066 iaddr = addr;
5067 loadedi = loaded_val;
5068 }
75a70cf9 5069
5070 initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
5071 true, NULL_TREE, true, GSI_SAME_STMT);
790368c5 5072
5073 /* Move the value to the LOADEDI temporary. */
cb7f680b 5074 if (gimple_in_ssa_p (cfun))
5075 {
75a70cf9 5076 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
790368c5 5077 phi = create_phi_node (loadedi, loop_header);
5078 SSA_NAME_DEF_STMT (loadedi) = phi;
cb7f680b 5079 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5080 initial);
5081 }
5082 else
75a70cf9 5083 gsi_insert_before (&si,
5084 gimple_build_assign (loadedi, initial),
5085 GSI_SAME_STMT);
790368c5 5086 if (loadedi != loaded_val)
5087 {
75a70cf9 5088 gimple_stmt_iterator gsi2;
5089 tree x;
790368c5 5090
5091 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
75a70cf9 5092 gsi2 = gsi_start_bb (loop_header);
790368c5 5093 if (gimple_in_ssa_p (cfun))
5094 {
75a70cf9 5095 gimple stmt;
5096 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5097 true, GSI_SAME_STMT);
5098 stmt = gimple_build_assign (loaded_val, x);
5099 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
790368c5 5100 }
5101 else
5102 {
75a70cf9 5103 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5104 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5105 true, GSI_SAME_STMT);
790368c5 5106 }
5107 }
75a70cf9 5108 gsi_remove (&si, true);
cb7f680b 5109
75a70cf9 5110 si = gsi_last_bb (store_bb);
5111 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
cb7f680b 5112
790368c5 5113 if (iaddr == addr)
5114 storedi = stored_val;
cb7f680b 5115 else
790368c5 5116 storedi =
75a70cf9 5117 force_gimple_operand_gsi (&si,
790368c5 5118 build1 (VIEW_CONVERT_EXPR, itype,
5119 stored_val), true, NULL_TREE, true,
75a70cf9 5120 GSI_SAME_STMT);
cb7f680b 5121
5122 /* Build the compare&swap statement. */
5123 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
75a70cf9 5124 new_storedi = force_gimple_operand_gsi (&si,
87f9ffa4 5125 fold_convert (TREE_TYPE (loadedi),
5126 new_storedi),
cb7f680b 5127 true, NULL_TREE,
75a70cf9 5128 true, GSI_SAME_STMT);
cb7f680b 5129
5130 if (gimple_in_ssa_p (cfun))
5131 old_vali = loadedi;
5132 else
5133 {
87f9ffa4 5134 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
790368c5 5135 if (gimple_in_ssa_p (cfun))
5136 add_referenced_var (old_vali);
75a70cf9 5137 stmt = gimple_build_assign (old_vali, loadedi);
5138 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5139
75a70cf9 5140 stmt = gimple_build_assign (loadedi, new_storedi);
5141 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5142 }
5143
5144 /* Note that we always perform the comparison as an integer, even for
48e1416a 5145 floating point. This allows the atomic operation to properly
cb7f680b 5146 succeed even with NaNs and -0.0. */
75a70cf9 5147 stmt = gimple_build_cond_empty
5148 (build2 (NE_EXPR, boolean_type_node,
5149 new_storedi, old_vali));
5150 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5151
5152 /* Update cfg. */
5153 e = single_succ_edge (store_bb);
5154 e->flags &= ~EDGE_FALLTHRU;
5155 e->flags |= EDGE_FALSE_VALUE;
5156
5157 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5158
790368c5 5159 /* Copy the new value to loadedi (we already did that before the condition
cb7f680b 5160 if we are not in SSA). */
5161 if (gimple_in_ssa_p (cfun))
5162 {
75a70cf9 5163 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
790368c5 5164 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
cb7f680b 5165 }
5166
75a70cf9 5167 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5168 gsi_remove (&si, true);
cb7f680b 5169
5170 if (gimple_in_ssa_p (cfun))
5171 update_ssa (TODO_update_ssa_no_phi);
5172
5173 return true;
5174}
5175
5176/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5177
5178 GOMP_atomic_start ();
5179 *addr = rhs;
5180 GOMP_atomic_end ();
5181
5182 The result is not globally atomic, but works so long as all parallel
5183 references are within #pragma omp atomic directives. According to
5184 responses received from omp@openmp.org, appears to be within spec.
5185 Which makes sense, since that's how several other compilers handle
48e1416a 5186 this situation as well.
75a70cf9 5187 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5188 expanding. STORED_VAL is the operand of the matching
5189 GIMPLE_OMP_ATOMIC_STORE.
cb7f680b 5190
48e1416a 5191 We replace
5192 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
cb7f680b 5193 loaded_val = *addr;
5194
5195 and replace
75a70cf9 5196 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
48e1416a 5197 *addr = stored_val;
cb7f680b 5198*/
5199
5200static bool
5201expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5202 tree addr, tree loaded_val, tree stored_val)
5203{
75a70cf9 5204 gimple_stmt_iterator si;
5205 gimple stmt;
cb7f680b 5206 tree t;
5207
75a70cf9 5208 si = gsi_last_bb (load_bb);
5209 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
cb7f680b 5210
5211 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
389dd41b 5212 t = build_function_call_expr (UNKNOWN_LOCATION, t, 0);
75a70cf9 5213 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
cb7f680b 5214
75a70cf9 5215 stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
5216 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5217 gsi_remove (&si, true);
cb7f680b 5218
75a70cf9 5219 si = gsi_last_bb (store_bb);
5220 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
cb7f680b 5221
75a70cf9 5222 stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
cb7f680b 5223 stored_val);
75a70cf9 5224 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5225
5226 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
389dd41b 5227 t = build_function_call_expr (UNKNOWN_LOCATION, t, 0);
75a70cf9 5228 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5229 gsi_remove (&si, true);
cb7f680b 5230
5231 if (gimple_in_ssa_p (cfun))
5232 update_ssa (TODO_update_ssa_no_phi);
5233 return true;
5234}
5235
48e1416a 5236/* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5237 using expand_omp_atomic_fetch_op. If it failed, we try to
cb7f680b 5238 call expand_omp_atomic_pipeline, and if it fails too, the
5239 ultimate fallback is wrapping the operation in a mutex
48e1416a 5240 (expand_omp_atomic_mutex). REGION is the atomic region built
5241 by build_omp_regions_1(). */
cb7f680b 5242
5243static void
5244expand_omp_atomic (struct omp_region *region)
5245{
5246 basic_block load_bb = region->entry, store_bb = region->exit;
75a70cf9 5247 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5248 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5249 tree addr = gimple_omp_atomic_load_rhs (load);
5250 tree stored_val = gimple_omp_atomic_store_val (store);
cb7f680b 5251 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5252 HOST_WIDE_INT index;
5253
5254 /* Make sure the type is one of the supported sizes. */
5255 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5256 index = exact_log2 (index);
5257 if (index >= 0 && index <= 4)
5258 {
5259 unsigned int align = TYPE_ALIGN_UNIT (type);
5260
5261 /* __sync builtins require strict data alignment. */
5262 if (exact_log2 (align) >= index)
5263 {
5264 /* When possible, use specialized atomic update functions. */
5265 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5266 && store_bb == single_succ (load_bb))
5267 {
5268 if (expand_omp_atomic_fetch_op (load_bb, addr,
5269 loaded_val, stored_val, index))
5270 return;
5271 }
5272
5273 /* If we don't have specialized __sync builtins, try and implement
5274 as a compare and swap loop. */
5275 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5276 loaded_val, stored_val, index))
5277 return;
5278 }
5279 }
5280
5281 /* The ultimate fallback is wrapping the operation in a mutex. */
5282 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5283}
5284
1e8e9920 5285
773c5ba7 5286/* Expand the parallel region tree rooted at REGION. Expansion
5287 proceeds in depth-first order. Innermost regions are expanded
5288 first. This way, parallel regions that require a new function to
75a70cf9 5289 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
773c5ba7 5290 internal dependencies in their body. */
5291
5292static void
5293expand_omp (struct omp_region *region)
5294{
5295 while (region)
5296 {
1d22f541 5297 location_t saved_location;
5298
d1d5b012 5299 /* First, determine whether this is a combined parallel+workshare
5300 region. */
75a70cf9 5301 if (region->type == GIMPLE_OMP_PARALLEL)
d1d5b012 5302 determine_parallel_type (region);
5303
773c5ba7 5304 if (region->inner)
5305 expand_omp (region->inner);
5306
1d22f541 5307 saved_location = input_location;
75a70cf9 5308 if (gimple_has_location (last_stmt (region->entry)))
5309 input_location = gimple_location (last_stmt (region->entry));
1d22f541 5310
61e47ac8 5311 switch (region->type)
773c5ba7 5312 {
75a70cf9 5313 case GIMPLE_OMP_PARALLEL:
5314 case GIMPLE_OMP_TASK:
fd6481cf 5315 expand_omp_taskreg (region);
5316 break;
5317
75a70cf9 5318 case GIMPLE_OMP_FOR:
61e47ac8 5319 expand_omp_for (region);
5320 break;
773c5ba7 5321
75a70cf9 5322 case GIMPLE_OMP_SECTIONS:
61e47ac8 5323 expand_omp_sections (region);
5324 break;
773c5ba7 5325
75a70cf9 5326 case GIMPLE_OMP_SECTION:
61e47ac8 5327 /* Individual omp sections are handled together with their
75a70cf9 5328 parent GIMPLE_OMP_SECTIONS region. */
61e47ac8 5329 break;
773c5ba7 5330
75a70cf9 5331 case GIMPLE_OMP_SINGLE:
61e47ac8 5332 expand_omp_single (region);
5333 break;
773c5ba7 5334
75a70cf9 5335 case GIMPLE_OMP_MASTER:
5336 case GIMPLE_OMP_ORDERED:
5337 case GIMPLE_OMP_CRITICAL:
61e47ac8 5338 expand_omp_synch (region);
5339 break;
773c5ba7 5340
75a70cf9 5341 case GIMPLE_OMP_ATOMIC_LOAD:
cb7f680b 5342 expand_omp_atomic (region);
5343 break;
5344
61e47ac8 5345 default:
5346 gcc_unreachable ();
5347 }
cc5982dc 5348
1d22f541 5349 input_location = saved_location;
773c5ba7 5350 region = region->next;
5351 }
5352}
5353
5354
5355/* Helper for build_omp_regions. Scan the dominator tree starting at
28c92cbb 5356 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5357 true, the function ends once a single tree is built (otherwise, whole
5358 forest of OMP constructs may be built). */
773c5ba7 5359
5360static void
28c92cbb 5361build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5362 bool single_tree)
773c5ba7 5363{
75a70cf9 5364 gimple_stmt_iterator gsi;
5365 gimple stmt;
773c5ba7 5366 basic_block son;
5367
75a70cf9 5368 gsi = gsi_last_bb (bb);
5369 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
773c5ba7 5370 {
5371 struct omp_region *region;
75a70cf9 5372 enum gimple_code code;
773c5ba7 5373
75a70cf9 5374 stmt = gsi_stmt (gsi);
5375 code = gimple_code (stmt);
5376 if (code == GIMPLE_OMP_RETURN)
773c5ba7 5377 {
5378 /* STMT is the return point out of region PARENT. Mark it
5379 as the exit point and make PARENT the immediately
5380 enclosing region. */
5381 gcc_assert (parent);
5382 region = parent;
61e47ac8 5383 region->exit = bb;
773c5ba7 5384 parent = parent->outer;
773c5ba7 5385 }
75a70cf9 5386 else if (code == GIMPLE_OMP_ATOMIC_STORE)
cb7f680b 5387 {
75a70cf9 5388 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5389 GIMPLE_OMP_RETURN, but matches with
5390 GIMPLE_OMP_ATOMIC_LOAD. */
cb7f680b 5391 gcc_assert (parent);
75a70cf9 5392 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
cb7f680b 5393 region = parent;
5394 region->exit = bb;
5395 parent = parent->outer;
5396 }
5397
75a70cf9 5398 else if (code == GIMPLE_OMP_CONTINUE)
61e47ac8 5399 {
5400 gcc_assert (parent);
5401 parent->cont = bb;
5402 }
75a70cf9 5403 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
ac6e3339 5404 {
75a70cf9 5405 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5406 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5407 ;
ac6e3339 5408 }
773c5ba7 5409 else
5410 {
5411 /* Otherwise, this directive becomes the parent for a new
5412 region. */
61e47ac8 5413 region = new_omp_region (bb, code, parent);
773c5ba7 5414 parent = region;
5415 }
773c5ba7 5416 }
5417
28c92cbb 5418 if (single_tree && !parent)
5419 return;
5420
773c5ba7 5421 for (son = first_dom_son (CDI_DOMINATORS, bb);
5422 son;
5423 son = next_dom_son (CDI_DOMINATORS, son))
28c92cbb 5424 build_omp_regions_1 (son, parent, single_tree);
5425}
5426
5427/* Builds the tree of OMP regions rooted at ROOT, storing it to
5428 root_omp_region. */
5429
5430static void
5431build_omp_regions_root (basic_block root)
5432{
5433 gcc_assert (root_omp_region == NULL);
5434 build_omp_regions_1 (root, NULL, true);
5435 gcc_assert (root_omp_region != NULL);
773c5ba7 5436}
5437
28c92cbb 5438/* Expands omp construct (and its subconstructs) starting in HEAD. */
5439
5440void
5441omp_expand_local (basic_block head)
5442{
5443 build_omp_regions_root (head);
5444 if (dump_file && (dump_flags & TDF_DETAILS))
5445 {
5446 fprintf (dump_file, "\nOMP region tree\n\n");
5447 dump_omp_region (dump_file, root_omp_region, 0);
5448 fprintf (dump_file, "\n");
5449 }
5450
5451 remove_exit_barriers (root_omp_region);
5452 expand_omp (root_omp_region);
5453
5454 free_omp_regions ();
5455}
773c5ba7 5456
5457/* Scan the CFG and build a tree of OMP regions. Return the root of
5458 the OMP region tree. */
5459
5460static void
5461build_omp_regions (void)
5462{
61e47ac8 5463 gcc_assert (root_omp_region == NULL);
773c5ba7 5464 calculate_dominance_info (CDI_DOMINATORS);
28c92cbb 5465 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
773c5ba7 5466}
5467
773c5ba7 5468/* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5469
2a1990e9 5470static unsigned int
773c5ba7 5471execute_expand_omp (void)
5472{
5473 build_omp_regions ();
5474
61e47ac8 5475 if (!root_omp_region)
5476 return 0;
773c5ba7 5477
61e47ac8 5478 if (dump_file)
5479 {
5480 fprintf (dump_file, "\nOMP region tree\n\n");
5481 dump_omp_region (dump_file, root_omp_region, 0);
5482 fprintf (dump_file, "\n");
773c5ba7 5483 }
61e47ac8 5484
5485 remove_exit_barriers (root_omp_region);
5486
5487 expand_omp (root_omp_region);
5488
61e47ac8 5489 cleanup_tree_cfg ();
5490
5491 free_omp_regions ();
5492
2a1990e9 5493 return 0;
773c5ba7 5494}
5495
79acaae1 5496/* OMP expansion -- the default pass, run before creation of SSA form. */
5497
773c5ba7 5498static bool
5499gate_expand_omp (void)
5500{
852f689e 5501 return (flag_openmp != 0 && !seen_error ());
773c5ba7 5502}
5503
48e1416a 5504struct gimple_opt_pass pass_expand_omp =
773c5ba7 5505{
20099e35 5506 {
5507 GIMPLE_PASS,
773c5ba7 5508 "ompexp", /* name */
5509 gate_expand_omp, /* gate */
5510 execute_expand_omp, /* execute */
5511 NULL, /* sub */
5512 NULL, /* next */
5513 0, /* static_pass_number */
0b1615c1 5514 TV_NONE, /* tv_id */
773c5ba7 5515 PROP_gimple_any, /* properties_required */
41709826 5516 0, /* properties_provided */
773c5ba7 5517 0, /* properties_destroyed */
5518 0, /* todo_flags_start */
20099e35 5519 TODO_dump_func /* todo_flags_finish */
5520 }
773c5ba7 5521};
5522\f
5523/* Routines to lower OpenMP directives into OMP-GIMPLE. */
5524
75a70cf9 5525/* Lower the OpenMP sections directive in the current statement in GSI_P.
5526 CTX is the enclosing OMP context for the current statement. */
773c5ba7 5527
5528static void
75a70cf9 5529lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 5530{
75a70cf9 5531 tree block, control;
5532 gimple_stmt_iterator tgsi;
773c5ba7 5533 unsigned i, len;
75a70cf9 5534 gimple stmt, new_stmt, bind, t;
5535 gimple_seq ilist, dlist, olist, new_body, body;
dac18d1a 5536 struct gimplify_ctx gctx;
773c5ba7 5537
75a70cf9 5538 stmt = gsi_stmt (*gsi_p);
773c5ba7 5539
dac18d1a 5540 push_gimplify_context (&gctx);
773c5ba7 5541
5542 dlist = NULL;
5543 ilist = NULL;
75a70cf9 5544 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5545 &ilist, &dlist, ctx);
773c5ba7 5546
75a70cf9 5547 tgsi = gsi_start (gimple_omp_body (stmt));
5548 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
773c5ba7 5549 continue;
5550
75a70cf9 5551 tgsi = gsi_start (gimple_omp_body (stmt));
5552 body = NULL;
5553 for (i = 0; i < len; i++, gsi_next (&tgsi))
773c5ba7 5554 {
5555 omp_context *sctx;
75a70cf9 5556 gimple sec_start;
773c5ba7 5557
75a70cf9 5558 sec_start = gsi_stmt (tgsi);
773c5ba7 5559 sctx = maybe_lookup_ctx (sec_start);
5560 gcc_assert (sctx);
5561
75a70cf9 5562 gimple_seq_add_stmt (&body, sec_start);
61e47ac8 5563
75a70cf9 5564 lower_omp (gimple_omp_body (sec_start), sctx);
5565 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5566 gimple_omp_set_body (sec_start, NULL);
773c5ba7 5567
5568 if (i == len - 1)
5569 {
75a70cf9 5570 gimple_seq l = NULL;
5571 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
773c5ba7 5572 &l, ctx);
75a70cf9 5573 gimple_seq_add_seq (&body, l);
5574 gimple_omp_section_set_last (sec_start);
773c5ba7 5575 }
48e1416a 5576
75a70cf9 5577 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
773c5ba7 5578 }
1e8e9920 5579
5580 block = make_node (BLOCK);
75a70cf9 5581 bind = gimple_build_bind (NULL, body, block);
1e8e9920 5582
75a70cf9 5583 olist = NULL;
5584 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
773c5ba7 5585
1d22f541 5586 block = make_node (BLOCK);
75a70cf9 5587 new_stmt = gimple_build_bind (NULL, NULL, block);
773c5ba7 5588
1d22f541 5589 pop_gimplify_context (new_stmt);
75a70cf9 5590 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5591 BLOCK_VARS (block) = gimple_bind_vars (bind);
1d22f541 5592 if (BLOCK_VARS (block))
5593 TREE_USED (block) = 1;
5594
75a70cf9 5595 new_body = NULL;
5596 gimple_seq_add_seq (&new_body, ilist);
5597 gimple_seq_add_stmt (&new_body, stmt);
5598 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5599 gimple_seq_add_stmt (&new_body, bind);
61e47ac8 5600
ac6e3339 5601 control = create_tmp_var (unsigned_type_node, ".section");
75a70cf9 5602 t = gimple_build_omp_continue (control, control);
5603 gimple_omp_sections_set_control (stmt, control);
5604 gimple_seq_add_stmt (&new_body, t);
61e47ac8 5605
75a70cf9 5606 gimple_seq_add_seq (&new_body, olist);
5607 gimple_seq_add_seq (&new_body, dlist);
773c5ba7 5608
75a70cf9 5609 new_body = maybe_catch_exception (new_body);
aade31a0 5610
75a70cf9 5611 t = gimple_build_omp_return
5612 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5613 OMP_CLAUSE_NOWAIT));
5614 gimple_seq_add_stmt (&new_body, t);
61e47ac8 5615
75a70cf9 5616 gimple_bind_set_body (new_stmt, new_body);
5617 gimple_omp_set_body (stmt, NULL);
773c5ba7 5618
75a70cf9 5619 gsi_replace (gsi_p, new_stmt, true);
1e8e9920 5620}
5621
5622
773c5ba7 5623/* A subroutine of lower_omp_single. Expand the simple form of
75a70cf9 5624 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
1e8e9920 5625
5626 if (GOMP_single_start ())
5627 BODY;
5628 [ GOMP_barrier (); ] -> unless 'nowait' is present.
773c5ba7 5629
5630 FIXME. It may be better to delay expanding the logic of this until
5631 pass_expand_omp. The expanded logic may make the job more difficult
5632 to a synchronization analysis pass. */
1e8e9920 5633
5634static void
75a70cf9 5635lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
1e8e9920 5636{
e60a6f7b 5637 location_t loc = gimple_location (single_stmt);
5638 tree tlabel = create_artificial_label (loc);
5639 tree flabel = create_artificial_label (loc);
75a70cf9 5640 gimple call, cond;
5641 tree lhs, decl;
5642
5643 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5644 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5645 call = gimple_build_call (decl, 0);
5646 gimple_call_set_lhs (call, lhs);
5647 gimple_seq_add_stmt (pre_p, call);
5648
5649 cond = gimple_build_cond (EQ_EXPR, lhs,
389dd41b 5650 fold_convert_loc (loc, TREE_TYPE (lhs),
5651 boolean_true_node),
75a70cf9 5652 tlabel, flabel);
5653 gimple_seq_add_stmt (pre_p, cond);
5654 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5655 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5656 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
1e8e9920 5657}
5658
773c5ba7 5659
5660/* A subroutine of lower_omp_single. Expand the simple form of
75a70cf9 5661 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
1e8e9920 5662
5663 #pragma omp single copyprivate (a, b, c)
5664
5665 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5666
5667 {
5668 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5669 {
5670 BODY;
5671 copyout.a = a;
5672 copyout.b = b;
5673 copyout.c = c;
5674 GOMP_single_copy_end (&copyout);
5675 }
5676 else
5677 {
5678 a = copyout_p->a;
5679 b = copyout_p->b;
5680 c = copyout_p->c;
5681 }
5682 GOMP_barrier ();
5683 }
773c5ba7 5684
5685 FIXME. It may be better to delay expanding the logic of this until
5686 pass_expand_omp. The expanded logic may make the job more difficult
5687 to a synchronization analysis pass. */
1e8e9920 5688
5689static void
75a70cf9 5690lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
1e8e9920 5691{
75a70cf9 5692 tree ptr_type, t, l0, l1, l2;
5693 gimple_seq copyin_seq;
e60a6f7b 5694 location_t loc = gimple_location (single_stmt);
1e8e9920 5695
5696 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5697
5698 ptr_type = build_pointer_type (ctx->record_type);
5699 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5700
e60a6f7b 5701 l0 = create_artificial_label (loc);
5702 l1 = create_artificial_label (loc);
5703 l2 = create_artificial_label (loc);
1e8e9920 5704
389dd41b 5705 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5706 t = fold_convert_loc (loc, ptr_type, t);
75a70cf9 5707 gimplify_assign (ctx->receiver_decl, t, pre_p);
1e8e9920 5708
5709 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5710 build_int_cst (ptr_type, 0));
5711 t = build3 (COND_EXPR, void_type_node, t,
5712 build_and_jump (&l0), build_and_jump (&l1));
5713 gimplify_and_add (t, pre_p);
5714
75a70cf9 5715 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
1e8e9920 5716
75a70cf9 5717 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
1e8e9920 5718
5719 copyin_seq = NULL;
75a70cf9 5720 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
1e8e9920 5721 &copyin_seq, ctx);
5722
389dd41b 5723 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5724 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
5725 1, t);
1e8e9920 5726 gimplify_and_add (t, pre_p);
5727
5728 t = build_and_jump (&l2);
5729 gimplify_and_add (t, pre_p);
5730
75a70cf9 5731 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
1e8e9920 5732
75a70cf9 5733 gimple_seq_add_seq (pre_p, copyin_seq);
1e8e9920 5734
75a70cf9 5735 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
1e8e9920 5736}
5737
773c5ba7 5738
1e8e9920 5739/* Expand code for an OpenMP single directive. */
5740
5741static void
75a70cf9 5742lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 5743{
75a70cf9 5744 tree block;
5745 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5746 gimple_seq bind_body, dlist;
dac18d1a 5747 struct gimplify_ctx gctx;
1e8e9920 5748
dac18d1a 5749 push_gimplify_context (&gctx);
1e8e9920 5750
75a70cf9 5751 bind_body = NULL;
5752 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5753 &bind_body, &dlist, ctx);
5754 lower_omp (gimple_omp_body (single_stmt), ctx);
1e8e9920 5755
75a70cf9 5756 gimple_seq_add_stmt (&bind_body, single_stmt);
1e8e9920 5757
5758 if (ctx->record_type)
75a70cf9 5759 lower_omp_single_copy (single_stmt, &bind_body, ctx);
1e8e9920 5760 else
75a70cf9 5761 lower_omp_single_simple (single_stmt, &bind_body);
5762
5763 gimple_omp_set_body (single_stmt, NULL);
1e8e9920 5764
75a70cf9 5765 gimple_seq_add_seq (&bind_body, dlist);
61e47ac8 5766
75a70cf9 5767 bind_body = maybe_catch_exception (bind_body);
61e47ac8 5768
48e1416a 5769 t = gimple_build_omp_return
75a70cf9 5770 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5771 OMP_CLAUSE_NOWAIT));
5772 gimple_seq_add_stmt (&bind_body, t);
aade31a0 5773
75a70cf9 5774 block = make_node (BLOCK);
5775 bind = gimple_build_bind (NULL, bind_body, block);
61e47ac8 5776
1e8e9920 5777 pop_gimplify_context (bind);
773c5ba7 5778
75a70cf9 5779 gimple_bind_append_vars (bind, ctx->block_vars);
5780 BLOCK_VARS (block) = ctx->block_vars;
5781 gsi_replace (gsi_p, bind, true);
1d22f541 5782 if (BLOCK_VARS (block))
5783 TREE_USED (block) = 1;
1e8e9920 5784}
5785
773c5ba7 5786
1e8e9920 5787/* Expand code for an OpenMP master directive. */
5788
5789static void
75a70cf9 5790lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 5791{
75a70cf9 5792 tree block, lab = NULL, x;
5793 gimple stmt = gsi_stmt (*gsi_p), bind;
389dd41b 5794 location_t loc = gimple_location (stmt);
75a70cf9 5795 gimple_seq tseq;
dac18d1a 5796 struct gimplify_ctx gctx;
1e8e9920 5797
dac18d1a 5798 push_gimplify_context (&gctx);
1e8e9920 5799
5800 block = make_node (BLOCK);
75a70cf9 5801 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5802 block);
61e47ac8 5803
389dd41b 5804 x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
1e8e9920 5805 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5806 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
75a70cf9 5807 tseq = NULL;
5808 gimplify_and_add (x, &tseq);
5809 gimple_bind_add_seq (bind, tseq);
1e8e9920 5810
75a70cf9 5811 lower_omp (gimple_omp_body (stmt), ctx);
5812 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5813 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5814 gimple_omp_set_body (stmt, NULL);
1e8e9920 5815
75a70cf9 5816 gimple_bind_add_stmt (bind, gimple_build_label (lab));
61e47ac8 5817
75a70cf9 5818 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
61e47ac8 5819
1e8e9920 5820 pop_gimplify_context (bind);
773c5ba7 5821
75a70cf9 5822 gimple_bind_append_vars (bind, ctx->block_vars);
5823 BLOCK_VARS (block) = ctx->block_vars;
5824 gsi_replace (gsi_p, bind, true);
1e8e9920 5825}
5826
773c5ba7 5827
1e8e9920 5828/* Expand code for an OpenMP ordered directive. */
5829
5830static void
75a70cf9 5831lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 5832{
75a70cf9 5833 tree block;
5834 gimple stmt = gsi_stmt (*gsi_p), bind, x;
dac18d1a 5835 struct gimplify_ctx gctx;
1e8e9920 5836
dac18d1a 5837 push_gimplify_context (&gctx);
1e8e9920 5838
5839 block = make_node (BLOCK);
75a70cf9 5840 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5841 block);
61e47ac8 5842
75a70cf9 5843 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5844 gimple_bind_add_stmt (bind, x);
1e8e9920 5845
75a70cf9 5846 lower_omp (gimple_omp_body (stmt), ctx);
5847 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5848 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5849 gimple_omp_set_body (stmt, NULL);
1e8e9920 5850
75a70cf9 5851 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5852 gimple_bind_add_stmt (bind, x);
61e47ac8 5853
75a70cf9 5854 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
61e47ac8 5855
1e8e9920 5856 pop_gimplify_context (bind);
773c5ba7 5857
75a70cf9 5858 gimple_bind_append_vars (bind, ctx->block_vars);
5859 BLOCK_VARS (block) = gimple_bind_vars (bind);
5860 gsi_replace (gsi_p, bind, true);
1e8e9920 5861}
5862
1e8e9920 5863
75a70cf9 5864/* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
1e8e9920 5865 substitution of a couple of function calls. But in the NAMED case,
5866 requires that languages coordinate a symbol name. It is therefore
5867 best put here in common code. */
5868
5869static GTY((param1_is (tree), param2_is (tree)))
5870 splay_tree critical_name_mutexes;
5871
5872static void
75a70cf9 5873lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 5874{
75a70cf9 5875 tree block;
5876 tree name, lock, unlock;
5877 gimple stmt = gsi_stmt (*gsi_p), bind;
389dd41b 5878 location_t loc = gimple_location (stmt);
75a70cf9 5879 gimple_seq tbody;
dac18d1a 5880 struct gimplify_ctx gctx;
1e8e9920 5881
75a70cf9 5882 name = gimple_omp_critical_name (stmt);
1e8e9920 5883 if (name)
5884 {
c2f47e15 5885 tree decl;
1e8e9920 5886 splay_tree_node n;
5887
5888 if (!critical_name_mutexes)
5889 critical_name_mutexes
5890 = splay_tree_new_ggc (splay_tree_compare_pointers);
5891
5892 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5893 if (n == NULL)
5894 {
5895 char *new_str;
5896
5897 decl = create_tmp_var_raw (ptr_type_node, NULL);
5898
5899 new_str = ACONCAT ((".gomp_critical_user_",
5900 IDENTIFIER_POINTER (name), NULL));
5901 DECL_NAME (decl) = get_identifier (new_str);
5902 TREE_PUBLIC (decl) = 1;
5903 TREE_STATIC (decl) = 1;
5904 DECL_COMMON (decl) = 1;
5905 DECL_ARTIFICIAL (decl) = 1;
5906 DECL_IGNORED_P (decl) = 1;
1d416bd7 5907 varpool_finalize_decl (decl);
1e8e9920 5908
5909 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5910 (splay_tree_value) decl);
5911 }
5912 else
5913 decl = (tree) n->value;
5914
1e8e9920 5915 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
389dd41b 5916 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
1e8e9920 5917
1e8e9920 5918 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
389dd41b 5919 unlock = build_call_expr_loc (loc, unlock, 1,
5920 build_fold_addr_expr_loc (loc, decl));
1e8e9920 5921 }
5922 else
5923 {
5924 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
389dd41b 5925 lock = build_call_expr_loc (loc, lock, 0);
1e8e9920 5926
5927 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
389dd41b 5928 unlock = build_call_expr_loc (loc, unlock, 0);
1e8e9920 5929 }
5930
dac18d1a 5931 push_gimplify_context (&gctx);
1e8e9920 5932
5933 block = make_node (BLOCK);
75a70cf9 5934 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
61e47ac8 5935
75a70cf9 5936 tbody = gimple_bind_body (bind);
5937 gimplify_and_add (lock, &tbody);
5938 gimple_bind_set_body (bind, tbody);
1e8e9920 5939
75a70cf9 5940 lower_omp (gimple_omp_body (stmt), ctx);
5941 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5942 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5943 gimple_omp_set_body (stmt, NULL);
1e8e9920 5944
75a70cf9 5945 tbody = gimple_bind_body (bind);
5946 gimplify_and_add (unlock, &tbody);
5947 gimple_bind_set_body (bind, tbody);
61e47ac8 5948
75a70cf9 5949 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
1e8e9920 5950
5951 pop_gimplify_context (bind);
75a70cf9 5952 gimple_bind_append_vars (bind, ctx->block_vars);
5953 BLOCK_VARS (block) = gimple_bind_vars (bind);
5954 gsi_replace (gsi_p, bind, true);
773c5ba7 5955}
5956
5957
5958/* A subroutine of lower_omp_for. Generate code to emit the predicate
5959 for a lastprivate clause. Given a loop control predicate of (V
5960 cond N2), we gate the clause on (!(V cond N2)). The lowered form
1e4afe3c 5961 is appended to *DLIST, iterator initialization is appended to
5962 *BODY_P. */
773c5ba7 5963
5964static void
75a70cf9 5965lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5966 gimple_seq *dlist, struct omp_context *ctx)
773c5ba7 5967{
75a70cf9 5968 tree clauses, cond, vinit;
773c5ba7 5969 enum tree_code cond_code;
75a70cf9 5970 gimple_seq stmts;
48e1416a 5971
fd6481cf 5972 cond_code = fd->loop.cond_code;
773c5ba7 5973 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5974
5975 /* When possible, use a strict equality expression. This can let VRP
5976 type optimizations deduce the value and remove a copy. */
fd6481cf 5977 if (host_integerp (fd->loop.step, 0))
773c5ba7 5978 {
fd6481cf 5979 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
773c5ba7 5980 if (step == 1 || step == -1)
5981 cond_code = EQ_EXPR;
5982 }
5983
fd6481cf 5984 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
773c5ba7 5985
75a70cf9 5986 clauses = gimple_omp_for_clauses (fd->for_stmt);
1e4afe3c 5987 stmts = NULL;
5988 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
75a70cf9 5989 if (!gimple_seq_empty_p (stmts))
1e4afe3c 5990 {
75a70cf9 5991 gimple_seq_add_seq (&stmts, *dlist);
fd6481cf 5992 *dlist = stmts;
1e4afe3c 5993
5994 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
fd6481cf 5995 vinit = fd->loop.n1;
1e4afe3c 5996 if (cond_code == EQ_EXPR
fd6481cf 5997 && host_integerp (fd->loop.n2, 0)
5998 && ! integer_zerop (fd->loop.n2))
5999 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
1e4afe3c 6000
6001 /* Initialize the iterator variable, so that threads that don't execute
6002 any iterations don't execute the lastprivate clauses by accident. */
75a70cf9 6003 gimplify_assign (fd->loop.v, vinit, body_p);
1e4afe3c 6004 }
773c5ba7 6005}
6006
6007
6008/* Lower code for an OpenMP loop directive. */
6009
6010static void
75a70cf9 6011lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 6012{
75a70cf9 6013 tree *rhs_p, block;
773c5ba7 6014 struct omp_for_data fd;
75a70cf9 6015 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
f018d957 6016 gimple_seq omp_for_body, body, dlist;
75a70cf9 6017 size_t i;
dac18d1a 6018 struct gimplify_ctx gctx;
773c5ba7 6019
dac18d1a 6020 push_gimplify_context (&gctx);
773c5ba7 6021
75a70cf9 6022 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6023 lower_omp (gimple_omp_body (stmt), ctx);
773c5ba7 6024
1d22f541 6025 block = make_node (BLOCK);
75a70cf9 6026 new_stmt = gimple_build_bind (NULL, NULL, block);
1d22f541 6027
773c5ba7 6028 /* Move declaration of temporaries in the loop body before we make
6029 it go away. */
75a70cf9 6030 omp_for_body = gimple_omp_body (stmt);
6031 if (!gimple_seq_empty_p (omp_for_body)
6032 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6033 {
6034 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6035 gimple_bind_append_vars (new_stmt, vars);
6036 }
773c5ba7 6037
75a70cf9 6038 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
773c5ba7 6039 dlist = NULL;
75a70cf9 6040 body = NULL;
6041 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6042 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
773c5ba7 6043
6044 /* Lower the header expressions. At this point, we can assume that
6045 the header is of the form:
6046
6047 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6048
6049 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6050 using the .omp_data_s mapping, if needed. */
75a70cf9 6051 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
fd6481cf 6052 {
75a70cf9 6053 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
fd6481cf 6054 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 6055 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 6056
75a70cf9 6057 rhs_p = gimple_omp_for_final_ptr (stmt, i);
fd6481cf 6058 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 6059 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 6060
75a70cf9 6061 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
fd6481cf 6062 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 6063 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 6064 }
773c5ba7 6065
6066 /* Once lowered, extract the bounds and clauses. */
fd6481cf 6067 extract_omp_for_data (stmt, &fd, NULL);
773c5ba7 6068
75a70cf9 6069 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
773c5ba7 6070
75a70cf9 6071 gimple_seq_add_stmt (&body, stmt);
6072 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
61e47ac8 6073
75a70cf9 6074 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6075 fd.loop.v));
61e47ac8 6076
773c5ba7 6077 /* After the loop, add exit clauses. */
75a70cf9 6078 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6079 gimple_seq_add_seq (&body, dlist);
773c5ba7 6080
75a70cf9 6081 body = maybe_catch_exception (body);
aade31a0 6082
61e47ac8 6083 /* Region exit marker goes at the end of the loop body. */
75a70cf9 6084 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
773c5ba7 6085
1d22f541 6086 pop_gimplify_context (new_stmt);
75a70cf9 6087
6088 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6089 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
1d22f541 6090 if (BLOCK_VARS (block))
6091 TREE_USED (block) = 1;
773c5ba7 6092
75a70cf9 6093 gimple_bind_set_body (new_stmt, body);
6094 gimple_omp_set_body (stmt, NULL);
6095 gimple_omp_for_set_pre_body (stmt, NULL);
6096 gsi_replace (gsi_p, new_stmt, true);
1e8e9920 6097}
6098
48e1416a 6099/* Callback for walk_stmts. Check if the current statement only contains
75a70cf9 6100 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
de7ef844 6101
6102static tree
75a70cf9 6103check_combined_parallel (gimple_stmt_iterator *gsi_p,
6104 bool *handled_ops_p,
6105 struct walk_stmt_info *wi)
de7ef844 6106{
4077bf7a 6107 int *info = (int *) wi->info;
75a70cf9 6108 gimple stmt = gsi_stmt (*gsi_p);
de7ef844 6109
75a70cf9 6110 *handled_ops_p = true;
6111 switch (gimple_code (stmt))
de7ef844 6112 {
75a70cf9 6113 WALK_SUBSTMTS;
6114
6115 case GIMPLE_OMP_FOR:
6116 case GIMPLE_OMP_SECTIONS:
de7ef844 6117 *info = *info == 0 ? 1 : -1;
6118 break;
6119 default:
6120 *info = -1;
6121 break;
6122 }
6123 return NULL;
6124}
773c5ba7 6125
fd6481cf 6126struct omp_taskcopy_context
6127{
6128 /* This field must be at the beginning, as we do "inheritance": Some
6129 callback functions for tree-inline.c (e.g., omp_copy_decl)
6130 receive a copy_body_data pointer that is up-casted to an
6131 omp_context pointer. */
6132 copy_body_data cb;
6133 omp_context *ctx;
6134};
6135
6136static tree
6137task_copyfn_copy_decl (tree var, copy_body_data *cb)
6138{
6139 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6140
6141 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6142 return create_tmp_var (TREE_TYPE (var), NULL);
6143
6144 return var;
6145}
6146
6147static tree
6148task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6149{
6150 tree name, new_fields = NULL, type, f;
6151
6152 type = lang_hooks.types.make_type (RECORD_TYPE);
6153 name = DECL_NAME (TYPE_NAME (orig_type));
e60a6f7b 6154 name = build_decl (gimple_location (tcctx->ctx->stmt),
6155 TYPE_DECL, name, type);
fd6481cf 6156 TYPE_NAME (type) = name;
6157
6158 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6159 {
6160 tree new_f = copy_node (f);
6161 DECL_CONTEXT (new_f) = type;
6162 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6163 TREE_CHAIN (new_f) = new_fields;
75a70cf9 6164 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6165 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6166 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6167 &tcctx->cb, NULL);
fd6481cf 6168 new_fields = new_f;
6169 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6170 }
6171 TYPE_FIELDS (type) = nreverse (new_fields);
6172 layout_type (type);
6173 return type;
6174}
6175
6176/* Create task copyfn. */
6177
6178static void
75a70cf9 6179create_task_copyfn (gimple task_stmt, omp_context *ctx)
fd6481cf 6180{
6181 struct function *child_cfun;
6182 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6183 tree record_type, srecord_type, bind, list;
6184 bool record_needs_remap = false, srecord_needs_remap = false;
6185 splay_tree_node n;
6186 struct omp_taskcopy_context tcctx;
dac18d1a 6187 struct gimplify_ctx gctx;
389dd41b 6188 location_t loc = gimple_location (task_stmt);
fd6481cf 6189
75a70cf9 6190 child_fn = gimple_omp_task_copy_fn (task_stmt);
fd6481cf 6191 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6192 gcc_assert (child_cfun->cfg == NULL);
6193 child_cfun->dont_save_pending_sizes_p = 1;
6194 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6195
6196 /* Reset DECL_CONTEXT on function arguments. */
6197 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6198 DECL_CONTEXT (t) = child_fn;
6199
6200 /* Populate the function. */
dac18d1a 6201 push_gimplify_context (&gctx);
fd6481cf 6202 current_function_decl = child_fn;
6203
6204 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6205 TREE_SIDE_EFFECTS (bind) = 1;
6206 list = NULL;
6207 DECL_SAVED_TREE (child_fn) = bind;
75a70cf9 6208 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
fd6481cf 6209
6210 /* Remap src and dst argument types if needed. */
6211 record_type = ctx->record_type;
6212 srecord_type = ctx->srecord_type;
6213 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6214 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6215 {
6216 record_needs_remap = true;
6217 break;
6218 }
6219 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6220 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6221 {
6222 srecord_needs_remap = true;
6223 break;
6224 }
6225
6226 if (record_needs_remap || srecord_needs_remap)
6227 {
6228 memset (&tcctx, '\0', sizeof (tcctx));
6229 tcctx.cb.src_fn = ctx->cb.src_fn;
6230 tcctx.cb.dst_fn = child_fn;
6231 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6232 tcctx.cb.dst_node = tcctx.cb.src_node;
6233 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6234 tcctx.cb.copy_decl = task_copyfn_copy_decl;
e38def9c 6235 tcctx.cb.eh_lp_nr = 0;
fd6481cf 6236 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6237 tcctx.cb.decl_map = pointer_map_create ();
6238 tcctx.ctx = ctx;
6239
6240 if (record_needs_remap)
6241 record_type = task_copyfn_remap_type (&tcctx, record_type);
6242 if (srecord_needs_remap)
6243 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6244 }
6245 else
6246 tcctx.cb.decl_map = NULL;
6247
6248 push_cfun (child_cfun);
6249
6250 arg = DECL_ARGUMENTS (child_fn);
6251 TREE_TYPE (arg) = build_pointer_type (record_type);
6252 sarg = TREE_CHAIN (arg);
6253 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6254
6255 /* First pass: initialize temporaries used in record_type and srecord_type
6256 sizes and field offsets. */
6257 if (tcctx.cb.decl_map)
75a70cf9 6258 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 6259 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6260 {
6261 tree *p;
6262
6263 decl = OMP_CLAUSE_DECL (c);
6264 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6265 if (p == NULL)
6266 continue;
6267 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6268 sf = (tree) n->value;
6269 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
389dd41b 6270 src = build_fold_indirect_ref_loc (loc, sarg);
fd6481cf 6271 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
75a70cf9 6272 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
fd6481cf 6273 append_to_statement_list (t, &list);
6274 }
6275
6276 /* Second pass: copy shared var pointers and copy construct non-VLA
6277 firstprivate vars. */
75a70cf9 6278 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 6279 switch (OMP_CLAUSE_CODE (c))
6280 {
6281 case OMP_CLAUSE_SHARED:
6282 decl = OMP_CLAUSE_DECL (c);
6283 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6284 if (n == NULL)
6285 break;
6286 f = (tree) n->value;
6287 if (tcctx.cb.decl_map)
6288 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6289 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6290 sf = (tree) n->value;
6291 if (tcctx.cb.decl_map)
6292 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
389dd41b 6293 src = build_fold_indirect_ref_loc (loc, sarg);
fd6481cf 6294 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
389dd41b 6295 dst = build_fold_indirect_ref_loc (loc, arg);
fd6481cf 6296 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
75a70cf9 6297 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
fd6481cf 6298 append_to_statement_list (t, &list);
6299 break;
6300 case OMP_CLAUSE_FIRSTPRIVATE:
6301 decl = OMP_CLAUSE_DECL (c);
6302 if (is_variable_sized (decl))
6303 break;
6304 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6305 if (n == NULL)
6306 break;
6307 f = (tree) n->value;
6308 if (tcctx.cb.decl_map)
6309 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6310 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6311 if (n != NULL)
6312 {
6313 sf = (tree) n->value;
6314 if (tcctx.cb.decl_map)
6315 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
389dd41b 6316 src = build_fold_indirect_ref_loc (loc, sarg);
fd6481cf 6317 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6318 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
389dd41b 6319 src = build_fold_indirect_ref_loc (loc, src);
fd6481cf 6320 }
6321 else
6322 src = decl;
389dd41b 6323 dst = build_fold_indirect_ref_loc (loc, arg);
fd6481cf 6324 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6325 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6326 append_to_statement_list (t, &list);
6327 break;
6328 case OMP_CLAUSE_PRIVATE:
6329 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6330 break;
6331 decl = OMP_CLAUSE_DECL (c);
6332 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6333 f = (tree) n->value;
6334 if (tcctx.cb.decl_map)
6335 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6336 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6337 if (n != NULL)
6338 {
6339 sf = (tree) n->value;
6340 if (tcctx.cb.decl_map)
6341 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
389dd41b 6342 src = build_fold_indirect_ref_loc (loc, sarg);
fd6481cf 6343 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6344 if (use_pointer_for_field (decl, NULL))
389dd41b 6345 src = build_fold_indirect_ref_loc (loc, src);
fd6481cf 6346 }
6347 else
6348 src = decl;
389dd41b 6349 dst = build_fold_indirect_ref_loc (loc, arg);
fd6481cf 6350 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
75a70cf9 6351 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
fd6481cf 6352 append_to_statement_list (t, &list);
6353 break;
6354 default:
6355 break;
6356 }
6357
6358 /* Last pass: handle VLA firstprivates. */
6359 if (tcctx.cb.decl_map)
75a70cf9 6360 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 6361 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6362 {
6363 tree ind, ptr, df;
6364
6365 decl = OMP_CLAUSE_DECL (c);
6366 if (!is_variable_sized (decl))
6367 continue;
6368 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6369 if (n == NULL)
6370 continue;
6371 f = (tree) n->value;
6372 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6373 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6374 ind = DECL_VALUE_EXPR (decl);
6375 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6376 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6377 n = splay_tree_lookup (ctx->sfield_map,
6378 (splay_tree_key) TREE_OPERAND (ind, 0));
6379 sf = (tree) n->value;
6380 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
389dd41b 6381 src = build_fold_indirect_ref_loc (loc, sarg);
fd6481cf 6382 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
389dd41b 6383 src = build_fold_indirect_ref_loc (loc, src);
6384 dst = build_fold_indirect_ref_loc (loc, arg);
fd6481cf 6385 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6386 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6387 append_to_statement_list (t, &list);
6388 n = splay_tree_lookup (ctx->field_map,
6389 (splay_tree_key) TREE_OPERAND (ind, 0));
6390 df = (tree) n->value;
6391 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
389dd41b 6392 ptr = build_fold_indirect_ref_loc (loc, arg);
fd6481cf 6393 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
75a70cf9 6394 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
389dd41b 6395 build_fold_addr_expr_loc (loc, dst));
fd6481cf 6396 append_to_statement_list (t, &list);
6397 }
6398
6399 t = build1 (RETURN_EXPR, void_type_node, NULL);
6400 append_to_statement_list (t, &list);
6401
6402 if (tcctx.cb.decl_map)
6403 pointer_map_destroy (tcctx.cb.decl_map);
6404 pop_gimplify_context (NULL);
6405 BIND_EXPR_BODY (bind) = list;
6406 pop_cfun ();
6407 current_function_decl = ctx->cb.src_fn;
6408}
6409
75a70cf9 6410/* Lower the OpenMP parallel or task directive in the current statement
6411 in GSI_P. CTX holds context information for the directive. */
773c5ba7 6412
6413static void
75a70cf9 6414lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 6415{
75a70cf9 6416 tree clauses;
6417 tree child_fn, t;
6418 gimple stmt = gsi_stmt (*gsi_p);
6419 gimple par_bind, bind;
6420 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
dac18d1a 6421 struct gimplify_ctx gctx;
389dd41b 6422 location_t loc = gimple_location (stmt);
773c5ba7 6423
75a70cf9 6424 clauses = gimple_omp_taskreg_clauses (stmt);
6425 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6426 par_body = gimple_bind_body (par_bind);
773c5ba7 6427 child_fn = ctx->cb.dst_fn;
75a70cf9 6428 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6429 && !gimple_omp_parallel_combined_p (stmt))
de7ef844 6430 {
6431 struct walk_stmt_info wi;
6432 int ws_num = 0;
6433
6434 memset (&wi, 0, sizeof (wi));
de7ef844 6435 wi.info = &ws_num;
6436 wi.val_only = true;
75a70cf9 6437 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
de7ef844 6438 if (ws_num == 1)
75a70cf9 6439 gimple_omp_parallel_set_combined_p (stmt, true);
de7ef844 6440 }
fd6481cf 6441 if (ctx->srecord_type)
6442 create_task_copyfn (stmt, ctx);
773c5ba7 6443
dac18d1a 6444 push_gimplify_context (&gctx);
773c5ba7 6445
75a70cf9 6446 par_olist = NULL;
6447 par_ilist = NULL;
773c5ba7 6448 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
75a70cf9 6449 lower_omp (par_body, ctx);
6450 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
fd6481cf 6451 lower_reduction_clauses (clauses, &par_olist, ctx);
773c5ba7 6452
6453 /* Declare all the variables created by mapping and the variables
6454 declared in the scope of the parallel body. */
6455 record_vars_into (ctx->block_vars, child_fn);
75a70cf9 6456 record_vars_into (gimple_bind_vars (par_bind), child_fn);
773c5ba7 6457
6458 if (ctx->record_type)
6459 {
fd6481cf 6460 ctx->sender_decl
6461 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6462 : ctx->record_type, ".omp_data_o");
86f2ad37 6463 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
75a70cf9 6464 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
773c5ba7 6465 }
6466
75a70cf9 6467 olist = NULL;
6468 ilist = NULL;
773c5ba7 6469 lower_send_clauses (clauses, &ilist, &olist, ctx);
6470 lower_send_shared_vars (&ilist, &olist, ctx);
6471
6472 /* Once all the expansions are done, sequence all the different
75a70cf9 6473 fragments inside gimple_omp_body. */
773c5ba7 6474
75a70cf9 6475 new_body = NULL;
773c5ba7 6476
6477 if (ctx->record_type)
6478 {
389dd41b 6479 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
cc6b725b 6480 /* fixup_child_record_type might have changed receiver_decl's type. */
389dd41b 6481 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
75a70cf9 6482 gimple_seq_add_stmt (&new_body,
6483 gimple_build_assign (ctx->receiver_decl, t));
773c5ba7 6484 }
6485
75a70cf9 6486 gimple_seq_add_seq (&new_body, par_ilist);
6487 gimple_seq_add_seq (&new_body, par_body);
6488 gimple_seq_add_seq (&new_body, par_olist);
6489 new_body = maybe_catch_exception (new_body);
6490 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6491 gimple_omp_set_body (stmt, new_body);
773c5ba7 6492
75a70cf9 6493 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6494 gimple_bind_add_stmt (bind, stmt);
1d22f541 6495 if (ilist || olist)
6496 {
75a70cf9 6497 gimple_seq_add_stmt (&ilist, bind);
6498 gimple_seq_add_seq (&ilist, olist);
6499 bind = gimple_build_bind (NULL, ilist, NULL);
1d22f541 6500 }
773c5ba7 6501
75a70cf9 6502 gsi_replace (gsi_p, bind, true);
773c5ba7 6503
75a70cf9 6504 pop_gimplify_context (NULL);
773c5ba7 6505}
6506
a4890dc9 6507/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
75a70cf9 6508 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6509 of OpenMP context, but with task_shared_vars set. */
46515c92 6510
6511static tree
75a70cf9 6512lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6513 void *data)
46515c92 6514{
a4890dc9 6515 tree t = *tp;
46515c92 6516
a4890dc9 6517 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
75a70cf9 6518 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
9f49e155 6519 return t;
6520
6521 if (task_shared_vars
6522 && DECL_P (t)
6523 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
a4890dc9 6524 return t;
46515c92 6525
a4890dc9 6526 /* If a global variable has been privatized, TREE_CONSTANT on
6527 ADDR_EXPR might be wrong. */
75a70cf9 6528 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
a4890dc9 6529 recompute_tree_invariant_for_addr_expr (t);
46515c92 6530
a4890dc9 6531 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6532 return NULL_TREE;
46515c92 6533}
773c5ba7 6534
a4890dc9 6535static void
75a70cf9 6536lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 6537{
75a70cf9 6538 gimple stmt = gsi_stmt (*gsi_p);
6539 struct walk_stmt_info wi;
1e8e9920 6540
75a70cf9 6541 if (gimple_has_location (stmt))
6542 input_location = gimple_location (stmt);
a4890dc9 6543
75a70cf9 6544 if (task_shared_vars)
6545 memset (&wi, '\0', sizeof (wi));
a4890dc9 6546
773c5ba7 6547 /* If we have issued syntax errors, avoid doing any heavy lifting.
6548 Just replace the OpenMP directives with a NOP to avoid
6549 confusing RTL expansion. */
852f689e 6550 if (seen_error () && is_gimple_omp (stmt))
773c5ba7 6551 {
75a70cf9 6552 gsi_replace (gsi_p, gimple_build_nop (), true);
a4890dc9 6553 return;
773c5ba7 6554 }
6555
75a70cf9 6556 switch (gimple_code (stmt))
1e8e9920 6557 {
75a70cf9 6558 case GIMPLE_COND:
fd6481cf 6559 if ((ctx || task_shared_vars)
75a70cf9 6560 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6561 ctx ? NULL : &wi, NULL)
6562 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6563 ctx ? NULL : &wi, NULL)))
6564 gimple_regimplify_operands (stmt, gsi_p);
a4890dc9 6565 break;
75a70cf9 6566 case GIMPLE_CATCH:
6567 lower_omp (gimple_catch_handler (stmt), ctx);
a4890dc9 6568 break;
75a70cf9 6569 case GIMPLE_EH_FILTER:
6570 lower_omp (gimple_eh_filter_failure (stmt), ctx);
a4890dc9 6571 break;
75a70cf9 6572 case GIMPLE_TRY:
6573 lower_omp (gimple_try_eval (stmt), ctx);
6574 lower_omp (gimple_try_cleanup (stmt), ctx);
a4890dc9 6575 break;
75a70cf9 6576 case GIMPLE_BIND:
6577 lower_omp (gimple_bind_body (stmt), ctx);
a4890dc9 6578 break;
75a70cf9 6579 case GIMPLE_OMP_PARALLEL:
6580 case GIMPLE_OMP_TASK:
6581 ctx = maybe_lookup_ctx (stmt);
6582 lower_omp_taskreg (gsi_p, ctx);
a4890dc9 6583 break;
75a70cf9 6584 case GIMPLE_OMP_FOR:
6585 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6586 gcc_assert (ctx);
75a70cf9 6587 lower_omp_for (gsi_p, ctx);
1e8e9920 6588 break;
75a70cf9 6589 case GIMPLE_OMP_SECTIONS:
6590 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6591 gcc_assert (ctx);
75a70cf9 6592 lower_omp_sections (gsi_p, ctx);
1e8e9920 6593 break;
75a70cf9 6594 case GIMPLE_OMP_SINGLE:
6595 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6596 gcc_assert (ctx);
75a70cf9 6597 lower_omp_single (gsi_p, ctx);
1e8e9920 6598 break;
75a70cf9 6599 case GIMPLE_OMP_MASTER:
6600 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6601 gcc_assert (ctx);
75a70cf9 6602 lower_omp_master (gsi_p, ctx);
1e8e9920 6603 break;
75a70cf9 6604 case GIMPLE_OMP_ORDERED:
6605 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6606 gcc_assert (ctx);
75a70cf9 6607 lower_omp_ordered (gsi_p, ctx);
1e8e9920 6608 break;
75a70cf9 6609 case GIMPLE_OMP_CRITICAL:
6610 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6611 gcc_assert (ctx);
75a70cf9 6612 lower_omp_critical (gsi_p, ctx);
6613 break;
6614 case GIMPLE_OMP_ATOMIC_LOAD:
6615 if ((ctx || task_shared_vars)
6616 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6617 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6618 gimple_regimplify_operands (stmt, gsi_p);
1e8e9920 6619 break;
a4890dc9 6620 default:
fd6481cf 6621 if ((ctx || task_shared_vars)
75a70cf9 6622 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6623 ctx ? NULL : &wi))
6624 gimple_regimplify_operands (stmt, gsi_p);
1e8e9920 6625 break;
1e8e9920 6626 }
1e8e9920 6627}
6628
6629static void
75a70cf9 6630lower_omp (gimple_seq body, omp_context *ctx)
1e8e9920 6631{
1d22f541 6632 location_t saved_location = input_location;
75a70cf9 6633 gimple_stmt_iterator gsi = gsi_start (body);
6634 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6635 lower_omp_1 (&gsi, ctx);
1d22f541 6636 input_location = saved_location;
1e8e9920 6637}
6638\f
6639/* Main entry point. */
6640
2a1990e9 6641static unsigned int
1e8e9920 6642execute_lower_omp (void)
6643{
75a70cf9 6644 gimple_seq body;
6645
41709826 6646 /* This pass always runs, to provide PROP_gimple_lomp.
6647 But there is nothing to do unless -fopenmp is given. */
6648 if (flag_openmp == 0)
6649 return 0;
6650
1e8e9920 6651 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6652 delete_omp_context);
6653
75a70cf9 6654 body = gimple_body (current_function_decl);
6655 scan_omp (body, NULL);
fd6481cf 6656 gcc_assert (taskreg_nesting_level == 0);
1e8e9920 6657
6658 if (all_contexts->root)
fd6481cf 6659 {
dac18d1a 6660 struct gimplify_ctx gctx;
6661
fd6481cf 6662 if (task_shared_vars)
dac18d1a 6663 push_gimplify_context (&gctx);
75a70cf9 6664 lower_omp (body, NULL);
fd6481cf 6665 if (task_shared_vars)
6666 pop_gimplify_context (NULL);
6667 }
1e8e9920 6668
773c5ba7 6669 if (all_contexts)
6670 {
6671 splay_tree_delete (all_contexts);
6672 all_contexts = NULL;
6673 }
fd6481cf 6674 BITMAP_FREE (task_shared_vars);
2a1990e9 6675 return 0;
1e8e9920 6676}
6677
48e1416a 6678struct gimple_opt_pass pass_lower_omp =
1e8e9920 6679{
20099e35 6680 {
6681 GIMPLE_PASS,
1e8e9920 6682 "omplower", /* name */
41709826 6683 NULL, /* gate */
1e8e9920 6684 execute_lower_omp, /* execute */
6685 NULL, /* sub */
6686 NULL, /* next */
6687 0, /* static_pass_number */
0b1615c1 6688 TV_NONE, /* tv_id */
1e8e9920 6689 PROP_gimple_any, /* properties_required */
6690 PROP_gimple_lomp, /* properties_provided */
6691 0, /* properties_destroyed */
6692 0, /* todo_flags_start */
20099e35 6693 TODO_dump_func /* todo_flags_finish */
6694 }
1e8e9920 6695};
1e8e9920 6696\f
6697/* The following is a utility to diagnose OpenMP structured block violations.
61e47ac8 6698 It is not part of the "omplower" pass, as that's invoked too late. It
6699 should be invoked by the respective front ends after gimplification. */
1e8e9920 6700
6701static splay_tree all_labels;
6702
6703/* Check for mismatched contexts and generate an error if needed. Return
6704 true if an error is detected. */
6705
6706static bool
75a70cf9 6707diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6708 gimple branch_ctx, gimple label_ctx)
1e8e9920 6709{
75a70cf9 6710 if (label_ctx == branch_ctx)
1e8e9920 6711 return false;
6712
48e1416a 6713
75a70cf9 6714 /*
6715 Previously we kept track of the label's entire context in diagnose_sb_[12]
6716 so we could traverse it and issue a correct "exit" or "enter" error
6717 message upon a structured block violation.
6718
6719 We built the context by building a list with tree_cons'ing, but there is
6720 no easy counterpart in gimple tuples. It seems like far too much work
6721 for issuing exit/enter error messages. If someone really misses the
6722 distinct error message... patches welcome.
6723 */
48e1416a 6724
75a70cf9 6725#if 0
1e8e9920 6726 /* Try to avoid confusing the user by producing and error message
f0b5f617 6727 with correct "exit" or "enter" verbiage. We prefer "exit"
1e8e9920 6728 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6729 if (branch_ctx == NULL)
6730 exit_p = false;
6731 else
6732 {
6733 while (label_ctx)
6734 {
6735 if (TREE_VALUE (label_ctx) == branch_ctx)
6736 {
6737 exit_p = false;
6738 break;
6739 }
6740 label_ctx = TREE_CHAIN (label_ctx);
6741 }
6742 }
6743
6744 if (exit_p)
6745 error ("invalid exit from OpenMP structured block");
6746 else
6747 error ("invalid entry to OpenMP structured block");
75a70cf9 6748#endif
1e8e9920 6749
75a70cf9 6750 /* If it's obvious we have an invalid entry, be specific about the error. */
6751 if (branch_ctx == NULL)
6752 error ("invalid entry to OpenMP structured block");
6753 else
6754 /* Otherwise, be vague and lazy, but efficient. */
6755 error ("invalid branch to/from an OpenMP structured block");
6756
6757 gsi_replace (gsi_p, gimple_build_nop (), false);
1e8e9920 6758 return true;
6759}
6760
6761/* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
75a70cf9 6762 where each label is found. */
1e8e9920 6763
6764static tree
75a70cf9 6765diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6766 struct walk_stmt_info *wi)
1e8e9920 6767{
75a70cf9 6768 gimple context = (gimple) wi->info;
6769 gimple inner_context;
6770 gimple stmt = gsi_stmt (*gsi_p);
1e8e9920 6771
75a70cf9 6772 *handled_ops_p = true;
6773
6774 switch (gimple_code (stmt))
1e8e9920 6775 {
75a70cf9 6776 WALK_SUBSTMTS;
48e1416a 6777
75a70cf9 6778 case GIMPLE_OMP_PARALLEL:
6779 case GIMPLE_OMP_TASK:
6780 case GIMPLE_OMP_SECTIONS:
6781 case GIMPLE_OMP_SINGLE:
6782 case GIMPLE_OMP_SECTION:
6783 case GIMPLE_OMP_MASTER:
6784 case GIMPLE_OMP_ORDERED:
6785 case GIMPLE_OMP_CRITICAL:
6786 /* The minimal context here is just the current OMP construct. */
6787 inner_context = stmt;
1e8e9920 6788 wi->info = inner_context;
75a70cf9 6789 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
1e8e9920 6790 wi->info = context;
6791 break;
6792
75a70cf9 6793 case GIMPLE_OMP_FOR:
6794 inner_context = stmt;
1e8e9920 6795 wi->info = inner_context;
75a70cf9 6796 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6797 walk them. */
6798 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6799 diagnose_sb_1, NULL, wi);
6800 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
1e8e9920 6801 wi->info = context;
6802 break;
6803
75a70cf9 6804 case GIMPLE_LABEL:
6805 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
1e8e9920 6806 (splay_tree_value) context);
6807 break;
6808
6809 default:
6810 break;
6811 }
6812
6813 return NULL_TREE;
6814}
6815
6816/* Pass 2: Check each branch and see if its context differs from that of
6817 the destination label's context. */
6818
6819static tree
75a70cf9 6820diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6821 struct walk_stmt_info *wi)
1e8e9920 6822{
75a70cf9 6823 gimple context = (gimple) wi->info;
1e8e9920 6824 splay_tree_node n;
75a70cf9 6825 gimple stmt = gsi_stmt (*gsi_p);
1e8e9920 6826
75a70cf9 6827 *handled_ops_p = true;
6828
6829 switch (gimple_code (stmt))
1e8e9920 6830 {
75a70cf9 6831 WALK_SUBSTMTS;
6832
6833 case GIMPLE_OMP_PARALLEL:
6834 case GIMPLE_OMP_TASK:
6835 case GIMPLE_OMP_SECTIONS:
6836 case GIMPLE_OMP_SINGLE:
6837 case GIMPLE_OMP_SECTION:
6838 case GIMPLE_OMP_MASTER:
6839 case GIMPLE_OMP_ORDERED:
6840 case GIMPLE_OMP_CRITICAL:
6841 wi->info = stmt;
6842 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
1e8e9920 6843 wi->info = context;
6844 break;
6845
75a70cf9 6846 case GIMPLE_OMP_FOR:
6847 wi->info = stmt;
6848 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6849 walk them. */
6850 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6851 diagnose_sb_2, NULL, wi);
6852 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
1e8e9920 6853 wi->info = context;
6854 break;
6855
0e1818e7 6856 case GIMPLE_COND:
6857 {
6858 tree lab = gimple_cond_true_label (stmt);
6859 if (lab)
6860 {
6861 n = splay_tree_lookup (all_labels,
6862 (splay_tree_key) lab);
6863 diagnose_sb_0 (gsi_p, context,
6864 n ? (gimple) n->value : NULL);
6865 }
6866 lab = gimple_cond_false_label (stmt);
6867 if (lab)
6868 {
6869 n = splay_tree_lookup (all_labels,
6870 (splay_tree_key) lab);
6871 diagnose_sb_0 (gsi_p, context,
6872 n ? (gimple) n->value : NULL);
6873 }
6874 }
6875 break;
6876
75a70cf9 6877 case GIMPLE_GOTO:
1e8e9920 6878 {
75a70cf9 6879 tree lab = gimple_goto_dest (stmt);
1e8e9920 6880 if (TREE_CODE (lab) != LABEL_DECL)
6881 break;
6882
6883 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
75a70cf9 6884 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
1e8e9920 6885 }
6886 break;
6887
75a70cf9 6888 case GIMPLE_SWITCH:
1e8e9920 6889 {
75a70cf9 6890 unsigned int i;
6891 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
1e8e9920 6892 {
75a70cf9 6893 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
1e8e9920 6894 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
75a70cf9 6895 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
1e8e9920 6896 break;
6897 }
6898 }
6899 break;
6900
75a70cf9 6901 case GIMPLE_RETURN:
6902 diagnose_sb_0 (gsi_p, context, NULL);
1e8e9920 6903 break;
6904
6905 default:
6906 break;
6907 }
6908
6909 return NULL_TREE;
6910}
6911
bfec3452 6912static unsigned int
6913diagnose_omp_structured_block_errors (void)
1e8e9920 6914{
1e8e9920 6915 struct walk_stmt_info wi;
bfec3452 6916 gimple_seq body = gimple_body (current_function_decl);
1e8e9920 6917
6918 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6919
6920 memset (&wi, 0, sizeof (wi));
75a70cf9 6921 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
1e8e9920 6922
6923 memset (&wi, 0, sizeof (wi));
1e8e9920 6924 wi.want_locations = true;
75a70cf9 6925 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
1e8e9920 6926
6927 splay_tree_delete (all_labels);
6928 all_labels = NULL;
6929
bfec3452 6930 return 0;
1e8e9920 6931}
6932
bfec3452 6933static bool
6934gate_diagnose_omp_blocks (void)
6935{
6936 return flag_openmp != 0;
6937}
6938
6939struct gimple_opt_pass pass_diagnose_omp_blocks =
6940{
6941 {
6942 GIMPLE_PASS,
53b5ae07 6943 "*diagnose_omp_blocks", /* name */
bfec3452 6944 gate_diagnose_omp_blocks, /* gate */
6945 diagnose_omp_structured_block_errors, /* execute */
6946 NULL, /* sub */
6947 NULL, /* next */
6948 0, /* static_pass_number */
6949 TV_NONE, /* tv_id */
6950 PROP_gimple_any, /* properties_required */
6951 0, /* properties_provided */
6952 0, /* properties_destroyed */
6953 0, /* todo_flags_start */
6954 0, /* todo_flags_finish */
6955 }
6956};
6957
1e8e9920 6958#include "gt-omp-low.h"