]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/omp-low.c
2012-07-16 Vasiliy Fofanov <fofanov@adacore.com>
[thirdparty/gcc.git] / gcc / omp-low.c
CommitLineData
1e8e9920 1/* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
0416ca72 6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
7cf0dbf3 7 Free Software Foundation, Inc.
1e8e9920 8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
8c4c00c1 13Software Foundation; either version 3, or (at your option) any later
1e8e9920 14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
8c4c00c1 22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
1e8e9920 24
25#include "config.h"
26#include "system.h"
27#include "coretypes.h"
28#include "tm.h"
29#include "tree.h"
30#include "rtl.h"
75a70cf9 31#include "gimple.h"
32#include "tree-iterator.h"
1e8e9920 33#include "tree-inline.h"
34#include "langhooks.h"
852f689e 35#include "diagnostic-core.h"
1e8e9920 36#include "tree-flow.h"
37#include "timevar.h"
38#include "flags.h"
39#include "function.h"
40#include "expr.h"
1e8e9920 41#include "tree-pass.h"
42#include "ggc.h"
43#include "except.h"
e3022db7 44#include "splay-tree.h"
cb7f680b 45#include "optabs.h"
46#include "cfgloop.h"
1e8e9920 47
75a70cf9 48
48e1416a 49/* Lowering of OpenMP parallel and workshare constructs proceeds in two
1e8e9920 50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
334ec2d8 53 re-gimplifying things when variables have been replaced with complex
1e8e9920 54 expressions.
55
d134bccc 56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
1e8e9920 59
60/* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63typedef struct omp_context
64{
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
75a70cf9 73 gimple stmt;
1e8e9920 74
48e1416a 75 /* Map variables to fields in a structure that allows communication
1e8e9920 76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
fd6481cf 82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
1e8e9920 90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
1e8e9920 103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
1e8e9920 105} omp_context;
106
107
fd6481cf 108struct omp_for_data_loop
109{
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112};
113
773c5ba7 114/* A structure describing the main elements of a parallel loop. */
1e8e9920 115
773c5ba7 116struct omp_for_data
1e8e9920 117{
fd6481cf 118 struct omp_for_data_loop loop;
75a70cf9 119 tree chunk_size;
120 gimple for_stmt;
fd6481cf 121 tree pre, iter_type;
122 int collapse;
1e8e9920 123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
fd6481cf 125 struct omp_for_data_loop *loops;
1e8e9920 126};
127
773c5ba7 128
1e8e9920 129static splay_tree all_contexts;
fd6481cf 130static int taskreg_nesting_level;
61e47ac8 131struct omp_region *root_omp_region;
fd6481cf 132static bitmap task_shared_vars;
1e8e9920 133
ab129075 134static void scan_omp (gimple_seq *, omp_context *);
75a70cf9 135static tree scan_omp_1_op (tree *, int *, void *);
136
137#define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
4c0315d0 142 case GIMPLE_TRANSACTION: \
75a70cf9 143 /* The sub-statements for these should be walked. */ \
144 *handled_ops_p = false; \
145 break;
146
147/* Convenience function for calling scan_omp_1_op on tree operands. */
148
149static inline tree
150scan_omp_op (tree *tp, omp_context *ctx)
151{
152 struct walk_stmt_info wi;
153
154 memset (&wi, 0, sizeof (wi));
155 wi.info = ctx;
156 wi.want_locations = true;
157
158 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
159}
160
e3a19533 161static void lower_omp (gimple_seq *, omp_context *);
f49d7bb5 162static tree lookup_decl_in_outer_ctx (tree, omp_context *);
163static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
1e8e9920 164
165/* Find an OpenMP clause of type KIND within CLAUSES. */
166
79acaae1 167tree
590c3166 168find_omp_clause (tree clauses, enum omp_clause_code kind)
1e8e9920 169{
170 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
55d6e7cd 171 if (OMP_CLAUSE_CODE (clauses) == kind)
1e8e9920 172 return clauses;
173
174 return NULL_TREE;
175}
176
177/* Return true if CTX is for an omp parallel. */
178
179static inline bool
180is_parallel_ctx (omp_context *ctx)
181{
75a70cf9 182 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
1e8e9920 183}
184
773c5ba7 185
fd6481cf 186/* Return true if CTX is for an omp task. */
187
188static inline bool
189is_task_ctx (omp_context *ctx)
190{
75a70cf9 191 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
fd6481cf 192}
193
194
195/* Return true if CTX is for an omp parallel or omp task. */
196
197static inline bool
198is_taskreg_ctx (omp_context *ctx)
199{
75a70cf9 200 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
201 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
fd6481cf 202}
203
204
773c5ba7 205/* Return true if REGION is a combined parallel+workshare region. */
1e8e9920 206
207static inline bool
773c5ba7 208is_combined_parallel (struct omp_region *region)
209{
210 return region->is_combined_parallel;
211}
212
213
214/* Extract the header elements of parallel loop FOR_STMT and store
215 them into *FD. */
216
217static void
75a70cf9 218extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
fd6481cf 219 struct omp_for_data_loop *loops)
773c5ba7 220{
fd6481cf 221 tree t, var, *collapse_iter, *collapse_count;
222 tree count = NULL_TREE, iter_type = long_integer_type_node;
223 struct omp_for_data_loop *loop;
224 int i;
225 struct omp_for_data_loop dummy_loop;
389dd41b 226 location_t loc = gimple_location (for_stmt);
773c5ba7 227
228 fd->for_stmt = for_stmt;
229 fd->pre = NULL;
75a70cf9 230 fd->collapse = gimple_omp_for_collapse (for_stmt);
fd6481cf 231 if (fd->collapse > 1)
232 fd->loops = loops;
233 else
234 fd->loops = &fd->loop;
773c5ba7 235
236 fd->have_nowait = fd->have_ordered = false;
237 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238 fd->chunk_size = NULL_TREE;
fd6481cf 239 collapse_iter = NULL;
240 collapse_count = NULL;
773c5ba7 241
75a70cf9 242 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
55d6e7cd 243 switch (OMP_CLAUSE_CODE (t))
773c5ba7 244 {
245 case OMP_CLAUSE_NOWAIT:
246 fd->have_nowait = true;
247 break;
248 case OMP_CLAUSE_ORDERED:
249 fd->have_ordered = true;
250 break;
251 case OMP_CLAUSE_SCHEDULE:
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254 break;
fd6481cf 255 case OMP_CLAUSE_COLLAPSE:
256 if (fd->collapse > 1)
257 {
258 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 }
773c5ba7 261 default:
262 break;
263 }
264
fd6481cf 265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
bde357c8 268 is best) or if it varies (then schedule(dynamic,N) is better). */
fd6481cf 269 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 {
271 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272 gcc_assert (fd->chunk_size == NULL);
273 }
274 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
773c5ba7 275 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276 gcc_assert (fd->chunk_size == NULL);
277 else if (fd->chunk_size == NULL)
278 {
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
fd6481cf 281 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 || fd->have_ordered
283 || fd->collapse > 1)
773c5ba7 284 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285 ? integer_zero_node : integer_one_node;
286 }
fd6481cf 287
288 for (i = 0; i < fd->collapse; i++)
289 {
290 if (fd->collapse == 1)
291 loop = &fd->loop;
292 else if (loops != NULL)
293 loop = loops + i;
294 else
295 loop = &dummy_loop;
296
48e1416a 297
75a70cf9 298 loop->v = gimple_omp_for_index (for_stmt, i);
fd6481cf 299 gcc_assert (SSA_VAR_P (loop->v));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
75a70cf9 303 loop->n1 = gimple_omp_for_initial (for_stmt, i);
fd6481cf 304
75a70cf9 305 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306 loop->n2 = gimple_omp_for_final (for_stmt, i);
fd6481cf 307 switch (loop->cond_code)
308 {
309 case LT_EXPR:
310 case GT_EXPR:
311 break;
312 case LE_EXPR:
313 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
2cc66f2a 314 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
fd6481cf 315 else
389dd41b 316 loop->n2 = fold_build2_loc (loc,
317 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
fd6481cf 318 build_int_cst (TREE_TYPE (loop->n2), 1));
319 loop->cond_code = LT_EXPR;
320 break;
321 case GE_EXPR:
322 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
2cc66f2a 323 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
fd6481cf 324 else
389dd41b 325 loop->n2 = fold_build2_loc (loc,
326 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
fd6481cf 327 build_int_cst (TREE_TYPE (loop->n2), 1));
328 loop->cond_code = GT_EXPR;
329 break;
330 default:
331 gcc_unreachable ();
332 }
333
75a70cf9 334 t = gimple_omp_for_incr (for_stmt, i);
fd6481cf 335 gcc_assert (TREE_OPERAND (t, 0) == var);
336 switch (TREE_CODE (t))
337 {
338 case PLUS_EXPR:
fd6481cf 339 loop->step = TREE_OPERAND (t, 1);
340 break;
85d86b55 341 case POINTER_PLUS_EXPR:
342 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
343 break;
fd6481cf 344 case MINUS_EXPR:
345 loop->step = TREE_OPERAND (t, 1);
389dd41b 346 loop->step = fold_build1_loc (loc,
347 NEGATE_EXPR, TREE_TYPE (loop->step),
fd6481cf 348 loop->step);
349 break;
350 default:
351 gcc_unreachable ();
352 }
353
354 if (iter_type != long_long_unsigned_type_node)
355 {
356 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
357 iter_type = long_long_unsigned_type_node;
358 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
359 && TYPE_PRECISION (TREE_TYPE (loop->v))
360 >= TYPE_PRECISION (iter_type))
361 {
362 tree n;
363
364 if (loop->cond_code == LT_EXPR)
389dd41b 365 n = fold_build2_loc (loc,
366 PLUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 367 loop->n2, loop->step);
368 else
369 n = loop->n1;
370 if (TREE_CODE (n) != INTEGER_CST
371 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
372 iter_type = long_long_unsigned_type_node;
373 }
374 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
375 > TYPE_PRECISION (iter_type))
376 {
377 tree n1, n2;
378
379 if (loop->cond_code == LT_EXPR)
380 {
381 n1 = loop->n1;
389dd41b 382 n2 = fold_build2_loc (loc,
383 PLUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 384 loop->n2, loop->step);
385 }
386 else
387 {
389dd41b 388 n1 = fold_build2_loc (loc,
389 MINUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 390 loop->n2, loop->step);
391 n2 = loop->n1;
392 }
393 if (TREE_CODE (n1) != INTEGER_CST
394 || TREE_CODE (n2) != INTEGER_CST
395 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
396 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
397 iter_type = long_long_unsigned_type_node;
398 }
399 }
400
401 if (collapse_count && *collapse_count == NULL)
402 {
403 if ((i == 0 || count != NULL_TREE)
404 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
405 && TREE_CONSTANT (loop->n1)
406 && TREE_CONSTANT (loop->n2)
407 && TREE_CODE (loop->step) == INTEGER_CST)
408 {
409 tree itype = TREE_TYPE (loop->v);
410
411 if (POINTER_TYPE_P (itype))
3cea8318 412 itype = signed_type_for (itype);
fd6481cf 413 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
389dd41b 414 t = fold_build2_loc (loc,
415 PLUS_EXPR, itype,
416 fold_convert_loc (loc, itype, loop->step), t);
417 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
418 fold_convert_loc (loc, itype, loop->n2));
419 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
420 fold_convert_loc (loc, itype, loop->n1));
fd6481cf 421 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
389dd41b 422 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
423 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
424 fold_build1_loc (loc, NEGATE_EXPR, itype,
425 fold_convert_loc (loc, itype,
426 loop->step)));
fd6481cf 427 else
389dd41b 428 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
429 fold_convert_loc (loc, itype, loop->step));
430 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
fd6481cf 431 if (count != NULL_TREE)
389dd41b 432 count = fold_build2_loc (loc,
433 MULT_EXPR, long_long_unsigned_type_node,
fd6481cf 434 count, t);
435 else
436 count = t;
437 if (TREE_CODE (count) != INTEGER_CST)
438 count = NULL_TREE;
439 }
440 else
441 count = NULL_TREE;
442 }
443 }
444
445 if (count)
446 {
447 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
448 iter_type = long_long_unsigned_type_node;
449 else
450 iter_type = long_integer_type_node;
451 }
452 else if (collapse_iter && *collapse_iter != NULL)
453 iter_type = TREE_TYPE (*collapse_iter);
454 fd->iter_type = iter_type;
455 if (collapse_iter && *collapse_iter == NULL)
456 *collapse_iter = create_tmp_var (iter_type, ".iter");
457 if (collapse_count && *collapse_count == NULL)
458 {
459 if (count)
389dd41b 460 *collapse_count = fold_convert_loc (loc, iter_type, count);
fd6481cf 461 else
462 *collapse_count = create_tmp_var (iter_type, ".count");
463 }
464
465 if (fd->collapse > 1)
466 {
467 fd->loop.v = *collapse_iter;
468 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
469 fd->loop.n2 = *collapse_count;
470 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
471 fd->loop.cond_code = LT_EXPR;
472 }
773c5ba7 473}
474
475
476/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
477 is the immediate dominator of PAR_ENTRY_BB, return true if there
478 are no data dependencies that would prevent expanding the parallel
479 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
480
481 When expanding a combined parallel+workshare region, the call to
482 the child function may need additional arguments in the case of
75a70cf9 483 GIMPLE_OMP_FOR regions. In some cases, these arguments are
484 computed out of variables passed in from the parent to the child
485 via 'struct .omp_data_s'. For instance:
773c5ba7 486
487 #pragma omp parallel for schedule (guided, i * 4)
488 for (j ...)
489
490 Is lowered into:
491
492 # BLOCK 2 (PAR_ENTRY_BB)
493 .omp_data_o.i = i;
494 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
48e1416a 495
773c5ba7 496 # BLOCK 3 (WS_ENTRY_BB)
497 .omp_data_i = &.omp_data_o;
498 D.1667 = .omp_data_i->i;
499 D.1598 = D.1667 * 4;
500 #pragma omp for schedule (guided, D.1598)
501
502 When we outline the parallel region, the call to the child function
503 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
504 that value is computed *after* the call site. So, in principle we
505 cannot do the transformation.
506
507 To see whether the code in WS_ENTRY_BB blocks the combined
508 parallel+workshare call, we collect all the variables used in the
75a70cf9 509 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
773c5ba7 510 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
511 call.
512
513 FIXME. If we had the SSA form built at this point, we could merely
514 hoist the code in block 3 into block 2 and be done with it. But at
515 this point we don't have dataflow information and though we could
516 hack something up here, it is really not worth the aggravation. */
517
518static bool
f018d957 519workshare_safe_to_combine_p (basic_block ws_entry_bb)
773c5ba7 520{
521 struct omp_for_data fd;
f018d957 522 gimple ws_stmt = last_stmt (ws_entry_bb);
773c5ba7 523
75a70cf9 524 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
773c5ba7 525 return true;
526
75a70cf9 527 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
773c5ba7 528
fd6481cf 529 extract_omp_for_data (ws_stmt, &fd, NULL);
530
531 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
532 return false;
533 if (fd.iter_type != long_integer_type_node)
534 return false;
773c5ba7 535
536 /* FIXME. We give up too easily here. If any of these arguments
537 are not constants, they will likely involve variables that have
538 been mapped into fields of .omp_data_s for sharing with the child
539 function. With appropriate data flow, it would be possible to
540 see through this. */
fd6481cf 541 if (!is_gimple_min_invariant (fd.loop.n1)
542 || !is_gimple_min_invariant (fd.loop.n2)
543 || !is_gimple_min_invariant (fd.loop.step)
773c5ba7 544 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
545 return false;
546
547 return true;
548}
549
550
551/* Collect additional arguments needed to emit a combined
552 parallel+workshare call. WS_STMT is the workshare directive being
553 expanded. */
554
414c3a2c 555static VEC(tree,gc) *
75a70cf9 556get_ws_args_for (gimple ws_stmt)
773c5ba7 557{
558 tree t;
389dd41b 559 location_t loc = gimple_location (ws_stmt);
414c3a2c 560 VEC(tree,gc) *ws_args;
773c5ba7 561
75a70cf9 562 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
773c5ba7 563 {
564 struct omp_for_data fd;
773c5ba7 565
fd6481cf 566 extract_omp_for_data (ws_stmt, &fd, NULL);
773c5ba7 567
414c3a2c 568 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
773c5ba7 569
414c3a2c 570 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
571 VEC_quick_push (tree, ws_args, t);
773c5ba7 572
389dd41b 573 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
414c3a2c 574 VEC_quick_push (tree, ws_args, t);
773c5ba7 575
414c3a2c 576 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
577 VEC_quick_push (tree, ws_args, t);
578
579 if (fd.chunk_size)
580 {
581 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
582 VEC_quick_push (tree, ws_args, t);
583 }
773c5ba7 584
585 return ws_args;
586 }
75a70cf9 587 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
773c5ba7 588 {
ac6e3339 589 /* Number of sections is equal to the number of edges from the
75a70cf9 590 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
591 the exit of the sections region. */
592 basic_block bb = single_succ (gimple_bb (ws_stmt));
ac6e3339 593 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
414c3a2c 594 ws_args = VEC_alloc (tree, gc, 1);
595 VEC_quick_push (tree, ws_args, t);
596 return ws_args;
773c5ba7 597 }
598
599 gcc_unreachable ();
600}
601
602
603/* Discover whether REGION is a combined parallel+workshare region. */
604
605static void
606determine_parallel_type (struct omp_region *region)
1e8e9920 607{
773c5ba7 608 basic_block par_entry_bb, par_exit_bb;
609 basic_block ws_entry_bb, ws_exit_bb;
610
03ed154b 611 if (region == NULL || region->inner == NULL
ac6e3339 612 || region->exit == NULL || region->inner->exit == NULL
613 || region->inner->cont == NULL)
773c5ba7 614 return;
615
616 /* We only support parallel+for and parallel+sections. */
75a70cf9 617 if (region->type != GIMPLE_OMP_PARALLEL
618 || (region->inner->type != GIMPLE_OMP_FOR
619 && region->inner->type != GIMPLE_OMP_SECTIONS))
773c5ba7 620 return;
621
622 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
623 WS_EXIT_BB -> PAR_EXIT_BB. */
61e47ac8 624 par_entry_bb = region->entry;
625 par_exit_bb = region->exit;
626 ws_entry_bb = region->inner->entry;
627 ws_exit_bb = region->inner->exit;
773c5ba7 628
629 if (single_succ (par_entry_bb) == ws_entry_bb
630 && single_succ (ws_exit_bb) == par_exit_bb
f018d957 631 && workshare_safe_to_combine_p (ws_entry_bb)
75a70cf9 632 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
de7ef844 633 || (last_and_only_stmt (ws_entry_bb)
634 && last_and_only_stmt (par_exit_bb))))
773c5ba7 635 {
75a70cf9 636 gimple ws_stmt = last_stmt (ws_entry_bb);
61e47ac8 637
75a70cf9 638 if (region->inner->type == GIMPLE_OMP_FOR)
773c5ba7 639 {
640 /* If this is a combined parallel loop, we need to determine
641 whether or not to use the combined library calls. There
642 are two cases where we do not apply the transformation:
643 static loops and any kind of ordered loop. In the first
644 case, we already open code the loop so there is no need
645 to do anything else. In the latter case, the combined
646 parallel loop call would still need extra synchronization
647 to implement ordered semantics, so there would not be any
648 gain in using the combined call. */
75a70cf9 649 tree clauses = gimple_omp_for_clauses (ws_stmt);
773c5ba7 650 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
651 if (c == NULL
652 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
653 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
654 {
655 region->is_combined_parallel = false;
656 region->inner->is_combined_parallel = false;
657 return;
658 }
659 }
660
661 region->is_combined_parallel = true;
662 region->inner->is_combined_parallel = true;
61e47ac8 663 region->ws_args = get_ws_args_for (ws_stmt);
773c5ba7 664 }
1e8e9920 665}
666
773c5ba7 667
1e8e9920 668/* Return true if EXPR is variable sized. */
669
670static inline bool
1f1872fd 671is_variable_sized (const_tree expr)
1e8e9920 672{
673 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
674}
675
676/* Return true if DECL is a reference type. */
677
678static inline bool
679is_reference (tree decl)
680{
681 return lang_hooks.decls.omp_privatize_by_reference (decl);
682}
683
684/* Lookup variables in the decl or field splay trees. The "maybe" form
685 allows for the variable form to not have been entered, otherwise we
686 assert that the variable must have been entered. */
687
688static inline tree
689lookup_decl (tree var, omp_context *ctx)
690{
e3022db7 691 tree *n;
692 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
693 return *n;
1e8e9920 694}
695
696static inline tree
e8a588af 697maybe_lookup_decl (const_tree var, omp_context *ctx)
1e8e9920 698{
e3022db7 699 tree *n;
700 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
701 return n ? *n : NULL_TREE;
1e8e9920 702}
703
704static inline tree
705lookup_field (tree var, omp_context *ctx)
706{
707 splay_tree_node n;
708 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
709 return (tree) n->value;
710}
711
fd6481cf 712static inline tree
713lookup_sfield (tree var, omp_context *ctx)
714{
715 splay_tree_node n;
716 n = splay_tree_lookup (ctx->sfield_map
717 ? ctx->sfield_map : ctx->field_map,
718 (splay_tree_key) var);
719 return (tree) n->value;
720}
721
1e8e9920 722static inline tree
723maybe_lookup_field (tree var, omp_context *ctx)
724{
725 splay_tree_node n;
726 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
727 return n ? (tree) n->value : NULL_TREE;
728}
729
e8a588af 730/* Return true if DECL should be copied by pointer. SHARED_CTX is
731 the parallel context if DECL is to be shared. */
1e8e9920 732
733static bool
fd6481cf 734use_pointer_for_field (tree decl, omp_context *shared_ctx)
1e8e9920 735{
736 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
737 return true;
738
554f2707 739 /* We can only use copy-in/copy-out semantics for shared variables
1e8e9920 740 when we know the value is not accessible from an outer scope. */
e8a588af 741 if (shared_ctx)
1e8e9920 742 {
743 /* ??? Trivially accessible from anywhere. But why would we even
744 be passing an address in this case? Should we simply assert
745 this to be false, or should we have a cleanup pass that removes
746 these from the list of mappings? */
747 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
748 return true;
749
750 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
751 without analyzing the expression whether or not its location
752 is accessible to anyone else. In the case of nested parallel
753 regions it certainly may be. */
df2c34fc 754 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
1e8e9920 755 return true;
756
757 /* Do not use copy-in/copy-out for variables that have their
758 address taken. */
759 if (TREE_ADDRESSABLE (decl))
760 return true;
e8a588af 761
762 /* Disallow copy-in/out in nested parallel if
763 decl is shared in outer parallel, otherwise
764 each thread could store the shared variable
765 in its own copy-in location, making the
766 variable no longer really shared. */
767 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
768 {
769 omp_context *up;
770
771 for (up = shared_ctx->outer; up; up = up->outer)
0cb159ec 772 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
e8a588af 773 break;
774
0cb159ec 775 if (up)
e8a588af 776 {
777 tree c;
778
75a70cf9 779 for (c = gimple_omp_taskreg_clauses (up->stmt);
e8a588af 780 c; c = OMP_CLAUSE_CHAIN (c))
781 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
782 && OMP_CLAUSE_DECL (c) == decl)
783 break;
784
785 if (c)
784ad964 786 goto maybe_mark_addressable_and_ret;
e8a588af 787 }
788 }
fd6481cf 789
790 /* For tasks avoid using copy-in/out, unless they are readonly
791 (in which case just copy-in is used). As tasks can be
792 deferred or executed in different thread, when GOMP_task
793 returns, the task hasn't necessarily terminated. */
794 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
795 {
784ad964 796 tree outer;
797 maybe_mark_addressable_and_ret:
798 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
fd6481cf 799 if (is_gimple_reg (outer))
800 {
801 /* Taking address of OUTER in lower_send_shared_vars
802 might need regimplification of everything that uses the
803 variable. */
804 if (!task_shared_vars)
805 task_shared_vars = BITMAP_ALLOC (NULL);
806 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
807 TREE_ADDRESSABLE (outer) = 1;
808 }
809 return true;
810 }
1e8e9920 811 }
812
813 return false;
814}
815
79acaae1 816/* Create a new VAR_DECL and copy information from VAR to it. */
1e8e9920 817
79acaae1 818tree
819copy_var_decl (tree var, tree name, tree type)
1e8e9920 820{
e60a6f7b 821 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
1e8e9920 822
823 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
79acaae1 824 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
8ea8de24 825 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
1e8e9920 826 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
827 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
79acaae1 828 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
1e8e9920 829 TREE_USED (copy) = 1;
1e8e9920 830 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
831
79acaae1 832 return copy;
833}
834
835/* Construct a new automatic decl similar to VAR. */
836
837static tree
838omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
839{
840 tree copy = copy_var_decl (var, name, type);
841
842 DECL_CONTEXT (copy) = current_function_decl;
1767a056 843 DECL_CHAIN (copy) = ctx->block_vars;
1e8e9920 844 ctx->block_vars = copy;
845
846 return copy;
847}
848
849static tree
850omp_copy_decl_1 (tree var, omp_context *ctx)
851{
852 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
853}
854
445d06b6 855/* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
856 as appropriate. */
857static tree
858omp_build_component_ref (tree obj, tree field)
859{
860 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
861 if (TREE_THIS_VOLATILE (field))
862 TREE_THIS_VOLATILE (ret) |= 1;
863 if (TREE_READONLY (field))
864 TREE_READONLY (ret) |= 1;
865 return ret;
866}
867
1e8e9920 868/* Build tree nodes to access the field for VAR on the receiver side. */
869
870static tree
871build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
872{
873 tree x, field = lookup_field (var, ctx);
874
875 /* If the receiver record type was remapped in the child function,
876 remap the field into the new record type. */
877 x = maybe_lookup_field (field, ctx);
878 if (x != NULL)
879 field = x;
880
182cf5a9 881 x = build_simple_mem_ref (ctx->receiver_decl);
445d06b6 882 x = omp_build_component_ref (x, field);
1e8e9920 883 if (by_ref)
182cf5a9 884 x = build_simple_mem_ref (x);
1e8e9920 885
886 return x;
887}
888
889/* Build tree nodes to access VAR in the scope outer to CTX. In the case
890 of a parallel, this is a component reference; for workshare constructs
891 this is some variable. */
892
893static tree
894build_outer_var_ref (tree var, omp_context *ctx)
895{
896 tree x;
897
f49d7bb5 898 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1e8e9920 899 x = var;
900 else if (is_variable_sized (var))
901 {
902 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
903 x = build_outer_var_ref (x, ctx);
182cf5a9 904 x = build_simple_mem_ref (x);
1e8e9920 905 }
fd6481cf 906 else if (is_taskreg_ctx (ctx))
1e8e9920 907 {
e8a588af 908 bool by_ref = use_pointer_for_field (var, NULL);
1e8e9920 909 x = build_receiver_ref (var, by_ref, ctx);
910 }
911 else if (ctx->outer)
912 x = lookup_decl (var, ctx->outer);
9438af57 913 else if (is_reference (var))
914 /* This can happen with orphaned constructs. If var is reference, it is
915 possible it is shared and as such valid. */
916 x = var;
1e8e9920 917 else
918 gcc_unreachable ();
919
920 if (is_reference (var))
182cf5a9 921 x = build_simple_mem_ref (x);
1e8e9920 922
923 return x;
924}
925
926/* Build tree nodes to access the field for VAR on the sender side. */
927
928static tree
929build_sender_ref (tree var, omp_context *ctx)
930{
fd6481cf 931 tree field = lookup_sfield (var, ctx);
445d06b6 932 return omp_build_component_ref (ctx->sender_decl, field);
1e8e9920 933}
934
935/* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
936
937static void
fd6481cf 938install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1e8e9920 939{
fd6481cf 940 tree field, type, sfield = NULL_TREE;
1e8e9920 941
fd6481cf 942 gcc_assert ((mask & 1) == 0
943 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
944 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
945 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1e8e9920 946
947 type = TREE_TYPE (var);
948 if (by_ref)
949 type = build_pointer_type (type);
fd6481cf 950 else if ((mask & 3) == 1 && is_reference (var))
951 type = TREE_TYPE (type);
1e8e9920 952
e60a6f7b 953 field = build_decl (DECL_SOURCE_LOCATION (var),
954 FIELD_DECL, DECL_NAME (var), type);
1e8e9920 955
956 /* Remember what variable this field was created for. This does have a
957 side effect of making dwarf2out ignore this member, so for helpful
958 debugging we clear it later in delete_omp_context. */
959 DECL_ABSTRACT_ORIGIN (field) = var;
fd6481cf 960 if (type == TREE_TYPE (var))
961 {
962 DECL_ALIGN (field) = DECL_ALIGN (var);
963 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
964 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
965 }
966 else
967 DECL_ALIGN (field) = TYPE_ALIGN (type);
1e8e9920 968
fd6481cf 969 if ((mask & 3) == 3)
970 {
971 insert_field_into_struct (ctx->record_type, field);
972 if (ctx->srecord_type)
973 {
e60a6f7b 974 sfield = build_decl (DECL_SOURCE_LOCATION (var),
975 FIELD_DECL, DECL_NAME (var), type);
fd6481cf 976 DECL_ABSTRACT_ORIGIN (sfield) = var;
977 DECL_ALIGN (sfield) = DECL_ALIGN (field);
978 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
979 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
980 insert_field_into_struct (ctx->srecord_type, sfield);
981 }
982 }
983 else
984 {
985 if (ctx->srecord_type == NULL_TREE)
986 {
987 tree t;
988
989 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
990 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
991 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
992 {
e60a6f7b 993 sfield = build_decl (DECL_SOURCE_LOCATION (var),
994 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
fd6481cf 995 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
996 insert_field_into_struct (ctx->srecord_type, sfield);
997 splay_tree_insert (ctx->sfield_map,
998 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
999 (splay_tree_value) sfield);
1000 }
1001 }
1002 sfield = field;
1003 insert_field_into_struct ((mask & 1) ? ctx->record_type
1004 : ctx->srecord_type, field);
1005 }
1e8e9920 1006
fd6481cf 1007 if (mask & 1)
1008 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1009 (splay_tree_value) field);
1010 if ((mask & 2) && ctx->sfield_map)
1011 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1012 (splay_tree_value) sfield);
1e8e9920 1013}
1014
1015static tree
1016install_var_local (tree var, omp_context *ctx)
1017{
1018 tree new_var = omp_copy_decl_1 (var, ctx);
1019 insert_decl_map (&ctx->cb, var, new_var);
1020 return new_var;
1021}
1022
1023/* Adjust the replacement for DECL in CTX for the new context. This means
1024 copying the DECL_VALUE_EXPR, and fixing up the type. */
1025
1026static void
1027fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1028{
1029 tree new_decl, size;
1030
1031 new_decl = lookup_decl (decl, ctx);
1032
1033 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1034
1035 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1036 && DECL_HAS_VALUE_EXPR_P (decl))
1037 {
1038 tree ve = DECL_VALUE_EXPR (decl);
75a70cf9 1039 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1e8e9920 1040 SET_DECL_VALUE_EXPR (new_decl, ve);
1041 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1042 }
1043
1044 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1045 {
1046 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1047 if (size == error_mark_node)
1048 size = TYPE_SIZE (TREE_TYPE (new_decl));
1049 DECL_SIZE (new_decl) = size;
1050
1051 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1052 if (size == error_mark_node)
1053 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1054 DECL_SIZE_UNIT (new_decl) = size;
1055 }
1056}
1057
1058/* The callback for remap_decl. Search all containing contexts for a
1059 mapping of the variable; this avoids having to duplicate the splay
1060 tree ahead of time. We know a mapping doesn't already exist in the
1061 given context. Create new mappings to implement default semantics. */
1062
1063static tree
1064omp_copy_decl (tree var, copy_body_data *cb)
1065{
1066 omp_context *ctx = (omp_context *) cb;
1067 tree new_var;
1068
1e8e9920 1069 if (TREE_CODE (var) == LABEL_DECL)
1070 {
e60a6f7b 1071 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
773c5ba7 1072 DECL_CONTEXT (new_var) = current_function_decl;
1e8e9920 1073 insert_decl_map (&ctx->cb, var, new_var);
1074 return new_var;
1075 }
1076
fd6481cf 1077 while (!is_taskreg_ctx (ctx))
1e8e9920 1078 {
1079 ctx = ctx->outer;
1080 if (ctx == NULL)
1081 return var;
1082 new_var = maybe_lookup_decl (var, ctx);
1083 if (new_var)
1084 return new_var;
1085 }
1086
f49d7bb5 1087 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1088 return var;
1089
1e8e9920 1090 return error_mark_node;
1091}
1092
773c5ba7 1093
1094/* Return the parallel region associated with STMT. */
1095
773c5ba7 1096/* Debugging dumps for parallel regions. */
1097void dump_omp_region (FILE *, struct omp_region *, int);
1098void debug_omp_region (struct omp_region *);
1099void debug_all_omp_regions (void);
1100
1101/* Dump the parallel region tree rooted at REGION. */
1102
1103void
1104dump_omp_region (FILE *file, struct omp_region *region, int indent)
1105{
61e47ac8 1106 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
75a70cf9 1107 gimple_code_name[region->type]);
773c5ba7 1108
1109 if (region->inner)
1110 dump_omp_region (file, region->inner, indent + 4);
1111
61e47ac8 1112 if (region->cont)
1113 {
75a70cf9 1114 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
61e47ac8 1115 region->cont->index);
1116 }
48e1416a 1117
773c5ba7 1118 if (region->exit)
75a70cf9 1119 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
61e47ac8 1120 region->exit->index);
773c5ba7 1121 else
61e47ac8 1122 fprintf (file, "%*s[no exit marker]\n", indent, "");
773c5ba7 1123
1124 if (region->next)
61e47ac8 1125 dump_omp_region (file, region->next, indent);
773c5ba7 1126}
1127
4b987fac 1128DEBUG_FUNCTION void
773c5ba7 1129debug_omp_region (struct omp_region *region)
1130{
1131 dump_omp_region (stderr, region, 0);
1132}
1133
4b987fac 1134DEBUG_FUNCTION void
773c5ba7 1135debug_all_omp_regions (void)
1136{
1137 dump_omp_region (stderr, root_omp_region, 0);
1138}
1139
1140
1141/* Create a new parallel region starting at STMT inside region PARENT. */
1142
61e47ac8 1143struct omp_region *
75a70cf9 1144new_omp_region (basic_block bb, enum gimple_code type,
1145 struct omp_region *parent)
773c5ba7 1146{
4077bf7a 1147 struct omp_region *region = XCNEW (struct omp_region);
773c5ba7 1148
1149 region->outer = parent;
61e47ac8 1150 region->entry = bb;
1151 region->type = type;
773c5ba7 1152
1153 if (parent)
1154 {
1155 /* This is a nested region. Add it to the list of inner
1156 regions in PARENT. */
1157 region->next = parent->inner;
1158 parent->inner = region;
1159 }
61e47ac8 1160 else
773c5ba7 1161 {
1162 /* This is a toplevel region. Add it to the list of toplevel
1163 regions in ROOT_OMP_REGION. */
1164 region->next = root_omp_region;
1165 root_omp_region = region;
1166 }
61e47ac8 1167
1168 return region;
1169}
1170
1171/* Release the memory associated with the region tree rooted at REGION. */
1172
1173static void
1174free_omp_region_1 (struct omp_region *region)
1175{
1176 struct omp_region *i, *n;
1177
1178 for (i = region->inner; i ; i = n)
773c5ba7 1179 {
61e47ac8 1180 n = i->next;
1181 free_omp_region_1 (i);
773c5ba7 1182 }
1183
61e47ac8 1184 free (region);
1185}
773c5ba7 1186
61e47ac8 1187/* Release the memory for the entire omp region tree. */
1188
1189void
1190free_omp_regions (void)
1191{
1192 struct omp_region *r, *n;
1193 for (r = root_omp_region; r ; r = n)
1194 {
1195 n = r->next;
1196 free_omp_region_1 (r);
1197 }
1198 root_omp_region = NULL;
773c5ba7 1199}
1200
1201
1e8e9920 1202/* Create a new context, with OUTER_CTX being the surrounding context. */
1203
1204static omp_context *
75a70cf9 1205new_omp_context (gimple stmt, omp_context *outer_ctx)
1e8e9920 1206{
1207 omp_context *ctx = XCNEW (omp_context);
1208
1209 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1210 (splay_tree_value) ctx);
1211 ctx->stmt = stmt;
1212
1213 if (outer_ctx)
1214 {
1215 ctx->outer = outer_ctx;
1216 ctx->cb = outer_ctx->cb;
1217 ctx->cb.block = NULL;
1218 ctx->depth = outer_ctx->depth + 1;
1219 }
1220 else
1221 {
1222 ctx->cb.src_fn = current_function_decl;
1223 ctx->cb.dst_fn = current_function_decl;
53f79206 1224 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1225 gcc_checking_assert (ctx->cb.src_node);
1e8e9920 1226 ctx->cb.dst_node = ctx->cb.src_node;
1227 ctx->cb.src_cfun = cfun;
1228 ctx->cb.copy_decl = omp_copy_decl;
e38def9c 1229 ctx->cb.eh_lp_nr = 0;
1e8e9920 1230 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1231 ctx->depth = 1;
1232 }
1233
e3022db7 1234 ctx->cb.decl_map = pointer_map_create ();
1e8e9920 1235
1236 return ctx;
1237}
1238
75a70cf9 1239static gimple_seq maybe_catch_exception (gimple_seq);
f6430caa 1240
1241/* Finalize task copyfn. */
1242
1243static void
75a70cf9 1244finalize_task_copyfn (gimple task_stmt)
f6430caa 1245{
1246 struct function *child_cfun;
1247 tree child_fn, old_fn;
e3a19533 1248 gimple_seq seq = NULL, new_seq;
75a70cf9 1249 gimple bind;
f6430caa 1250
75a70cf9 1251 child_fn = gimple_omp_task_copy_fn (task_stmt);
f6430caa 1252 if (child_fn == NULL_TREE)
1253 return;
1254
1255 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1256
1257 /* Inform the callgraph about the new function. */
1258 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
79f958cb 1259 = cfun->curr_properties & ~PROP_loops;
f6430caa 1260
1261 old_fn = current_function_decl;
1262 push_cfun (child_cfun);
1263 current_function_decl = child_fn;
7e3aae05 1264 bind = gimplify_body (child_fn, false);
75a70cf9 1265 gimple_seq_add_stmt (&seq, bind);
1266 new_seq = maybe_catch_exception (seq);
1267 if (new_seq != seq)
1268 {
1269 bind = gimple_build_bind (NULL, new_seq, NULL);
e3a19533 1270 seq = NULL;
75a70cf9 1271 gimple_seq_add_stmt (&seq, bind);
1272 }
1273 gimple_set_body (child_fn, seq);
f6430caa 1274 pop_cfun ();
1275 current_function_decl = old_fn;
1276
1277 cgraph_add_new_function (child_fn, false);
1278}
1279
1e8e9920 1280/* Destroy a omp_context data structures. Called through the splay tree
1281 value delete callback. */
1282
1283static void
1284delete_omp_context (splay_tree_value value)
1285{
1286 omp_context *ctx = (omp_context *) value;
1287
e3022db7 1288 pointer_map_destroy (ctx->cb.decl_map);
1e8e9920 1289
1290 if (ctx->field_map)
1291 splay_tree_delete (ctx->field_map);
fd6481cf 1292 if (ctx->sfield_map)
1293 splay_tree_delete (ctx->sfield_map);
1e8e9920 1294
1295 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1296 it produces corrupt debug information. */
1297 if (ctx->record_type)
1298 {
1299 tree t;
1767a056 1300 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1e8e9920 1301 DECL_ABSTRACT_ORIGIN (t) = NULL;
1302 }
fd6481cf 1303 if (ctx->srecord_type)
1304 {
1305 tree t;
1767a056 1306 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
fd6481cf 1307 DECL_ABSTRACT_ORIGIN (t) = NULL;
1308 }
1e8e9920 1309
f6430caa 1310 if (is_task_ctx (ctx))
1311 finalize_task_copyfn (ctx->stmt);
1312
1e8e9920 1313 XDELETE (ctx);
1314}
1315
1316/* Fix up RECEIVER_DECL with a type that has been remapped to the child
1317 context. */
1318
1319static void
1320fixup_child_record_type (omp_context *ctx)
1321{
1322 tree f, type = ctx->record_type;
1323
1324 /* ??? It isn't sufficient to just call remap_type here, because
1325 variably_modified_type_p doesn't work the way we expect for
1326 record types. Testing each field for whether it needs remapping
1327 and creating a new record by hand works, however. */
1767a056 1328 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1e8e9920 1329 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1330 break;
1331 if (f)
1332 {
1333 tree name, new_fields = NULL;
1334
1335 type = lang_hooks.types.make_type (RECORD_TYPE);
1336 name = DECL_NAME (TYPE_NAME (ctx->record_type));
e60a6f7b 1337 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1338 TYPE_DECL, name, type);
1e8e9920 1339 TYPE_NAME (type) = name;
1340
1767a056 1341 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1e8e9920 1342 {
1343 tree new_f = copy_node (f);
1344 DECL_CONTEXT (new_f) = type;
1345 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1767a056 1346 DECL_CHAIN (new_f) = new_fields;
75a70cf9 1347 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1348 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1349 &ctx->cb, NULL);
1350 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1351 &ctx->cb, NULL);
1e8e9920 1352 new_fields = new_f;
1353
1354 /* Arrange to be able to look up the receiver field
1355 given the sender field. */
1356 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1357 (splay_tree_value) new_f);
1358 }
1359 TYPE_FIELDS (type) = nreverse (new_fields);
1360 layout_type (type);
1361 }
1362
1363 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1364}
1365
1366/* Instantiate decls as necessary in CTX to satisfy the data sharing
1367 specified by CLAUSES. */
1368
1369static void
1370scan_sharing_clauses (tree clauses, omp_context *ctx)
1371{
1372 tree c, decl;
1373 bool scan_array_reductions = false;
1374
1375 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1376 {
1377 bool by_ref;
1378
55d6e7cd 1379 switch (OMP_CLAUSE_CODE (c))
1e8e9920 1380 {
1381 case OMP_CLAUSE_PRIVATE:
1382 decl = OMP_CLAUSE_DECL (c);
fd6481cf 1383 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1384 goto do_private;
1385 else if (!is_variable_sized (decl))
1e8e9920 1386 install_var_local (decl, ctx);
1387 break;
1388
1389 case OMP_CLAUSE_SHARED:
fd6481cf 1390 gcc_assert (is_taskreg_ctx (ctx));
1e8e9920 1391 decl = OMP_CLAUSE_DECL (c);
e7327393 1392 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1393 || !is_variable_sized (decl));
f49d7bb5 1394 /* Global variables don't need to be copied,
1395 the receiver side will use them directly. */
1396 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1397 break;
fd6481cf 1398 by_ref = use_pointer_for_field (decl, ctx);
1e8e9920 1399 if (! TREE_READONLY (decl)
1400 || TREE_ADDRESSABLE (decl)
1401 || by_ref
1402 || is_reference (decl))
1403 {
fd6481cf 1404 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1405 install_var_local (decl, ctx);
1406 break;
1407 }
1408 /* We don't need to copy const scalar vars back. */
55d6e7cd 1409 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1e8e9920 1410 goto do_private;
1411
1412 case OMP_CLAUSE_LASTPRIVATE:
1413 /* Let the corresponding firstprivate clause create
1414 the variable. */
1415 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1416 break;
1417 /* FALLTHRU */
1418
1419 case OMP_CLAUSE_FIRSTPRIVATE:
1420 case OMP_CLAUSE_REDUCTION:
1421 decl = OMP_CLAUSE_DECL (c);
1422 do_private:
1423 if (is_variable_sized (decl))
1e8e9920 1424 {
fd6481cf 1425 if (is_task_ctx (ctx))
1426 install_var_field (decl, false, 1, ctx);
1427 break;
1428 }
1429 else if (is_taskreg_ctx (ctx))
1430 {
1431 bool global
1432 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
e8a588af 1433 by_ref = use_pointer_for_field (decl, NULL);
fd6481cf 1434
1435 if (is_task_ctx (ctx)
1436 && (global || by_ref || is_reference (decl)))
1437 {
1438 install_var_field (decl, false, 1, ctx);
1439 if (!global)
1440 install_var_field (decl, by_ref, 2, ctx);
1441 }
1442 else if (!global)
1443 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1444 }
1445 install_var_local (decl, ctx);
1446 break;
1447
1448 case OMP_CLAUSE_COPYPRIVATE:
1e8e9920 1449 case OMP_CLAUSE_COPYIN:
1450 decl = OMP_CLAUSE_DECL (c);
e8a588af 1451 by_ref = use_pointer_for_field (decl, NULL);
fd6481cf 1452 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1453 break;
1454
1455 case OMP_CLAUSE_DEFAULT:
1456 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1457 break;
1458
2169f33b 1459 case OMP_CLAUSE_FINAL:
1e8e9920 1460 case OMP_CLAUSE_IF:
1461 case OMP_CLAUSE_NUM_THREADS:
1462 case OMP_CLAUSE_SCHEDULE:
1463 if (ctx->outer)
75a70cf9 1464 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1e8e9920 1465 break;
1466
1467 case OMP_CLAUSE_NOWAIT:
1468 case OMP_CLAUSE_ORDERED:
fd6481cf 1469 case OMP_CLAUSE_COLLAPSE:
1470 case OMP_CLAUSE_UNTIED:
2169f33b 1471 case OMP_CLAUSE_MERGEABLE:
1e8e9920 1472 break;
1473
1474 default:
1475 gcc_unreachable ();
1476 }
1477 }
1478
1479 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1480 {
55d6e7cd 1481 switch (OMP_CLAUSE_CODE (c))
1e8e9920 1482 {
1483 case OMP_CLAUSE_LASTPRIVATE:
1484 /* Let the corresponding firstprivate clause create
1485 the variable. */
75a70cf9 1486 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
fd6481cf 1487 scan_array_reductions = true;
1e8e9920 1488 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1489 break;
1490 /* FALLTHRU */
1491
1492 case OMP_CLAUSE_PRIVATE:
1493 case OMP_CLAUSE_FIRSTPRIVATE:
1494 case OMP_CLAUSE_REDUCTION:
1495 decl = OMP_CLAUSE_DECL (c);
1496 if (is_variable_sized (decl))
1497 install_var_local (decl, ctx);
1498 fixup_remapped_decl (decl, ctx,
55d6e7cd 1499 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1e8e9920 1500 && OMP_CLAUSE_PRIVATE_DEBUG (c));
55d6e7cd 1501 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1e8e9920 1502 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1503 scan_array_reductions = true;
1504 break;
1505
1506 case OMP_CLAUSE_SHARED:
1507 decl = OMP_CLAUSE_DECL (c);
f49d7bb5 1508 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1509 fixup_remapped_decl (decl, ctx, false);
1e8e9920 1510 break;
1511
1512 case OMP_CLAUSE_COPYPRIVATE:
1513 case OMP_CLAUSE_COPYIN:
1514 case OMP_CLAUSE_DEFAULT:
1515 case OMP_CLAUSE_IF:
1516 case OMP_CLAUSE_NUM_THREADS:
1517 case OMP_CLAUSE_SCHEDULE:
1518 case OMP_CLAUSE_NOWAIT:
1519 case OMP_CLAUSE_ORDERED:
fd6481cf 1520 case OMP_CLAUSE_COLLAPSE:
1521 case OMP_CLAUSE_UNTIED:
2169f33b 1522 case OMP_CLAUSE_FINAL:
1523 case OMP_CLAUSE_MERGEABLE:
1e8e9920 1524 break;
1525
1526 default:
1527 gcc_unreachable ();
1528 }
1529 }
1530
1531 if (scan_array_reductions)
1532 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
55d6e7cd 1533 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1e8e9920 1534 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1535 {
ab129075 1536 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1537 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1e8e9920 1538 }
fd6481cf 1539 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
75a70cf9 1540 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
ab129075 1541 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1e8e9920 1542}
1543
1544/* Create a new name for omp child function. Returns an identifier. */
1545
1546static GTY(()) unsigned int tmp_ompfn_id_num;
1547
1548static tree
fd6481cf 1549create_omp_child_function_name (bool task_copy)
1e8e9920 1550{
a70a5e2c 1551 return (clone_function_name (current_function_decl,
1552 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1e8e9920 1553}
1554
1555/* Build a decl for the omp child function. It'll not contain a body
1556 yet, just the bare decl. */
1557
1558static void
fd6481cf 1559create_omp_child_function (omp_context *ctx, bool task_copy)
1e8e9920 1560{
1561 tree decl, type, name, t;
1562
fd6481cf 1563 name = create_omp_child_function_name (task_copy);
1564 if (task_copy)
1565 type = build_function_type_list (void_type_node, ptr_type_node,
1566 ptr_type_node, NULL_TREE);
1567 else
1568 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1e8e9920 1569
e60a6f7b 1570 decl = build_decl (gimple_location (ctx->stmt),
1571 FUNCTION_DECL, name, type);
1e8e9920 1572
fd6481cf 1573 if (!task_copy)
1574 ctx->cb.dst_fn = decl;
1575 else
75a70cf9 1576 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1e8e9920 1577
1578 TREE_STATIC (decl) = 1;
1579 TREE_USED (decl) = 1;
1580 DECL_ARTIFICIAL (decl) = 1;
84bfaaeb 1581 DECL_NAMELESS (decl) = 1;
1e8e9920 1582 DECL_IGNORED_P (decl) = 0;
1583 TREE_PUBLIC (decl) = 0;
1584 DECL_UNINLINABLE (decl) = 1;
1585 DECL_EXTERNAL (decl) = 0;
1586 DECL_CONTEXT (decl) = NULL_TREE;
773c5ba7 1587 DECL_INITIAL (decl) = make_node (BLOCK);
1e8e9920 1588
e60a6f7b 1589 t = build_decl (DECL_SOURCE_LOCATION (decl),
1590 RESULT_DECL, NULL_TREE, void_type_node);
1e8e9920 1591 DECL_ARTIFICIAL (t) = 1;
1592 DECL_IGNORED_P (t) = 1;
8e5b4ed6 1593 DECL_CONTEXT (t) = decl;
1e8e9920 1594 DECL_RESULT (decl) = t;
1595
e60a6f7b 1596 t = build_decl (DECL_SOURCE_LOCATION (decl),
1597 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1e8e9920 1598 DECL_ARTIFICIAL (t) = 1;
84bfaaeb 1599 DECL_NAMELESS (t) = 1;
1e8e9920 1600 DECL_ARG_TYPE (t) = ptr_type_node;
773c5ba7 1601 DECL_CONTEXT (t) = current_function_decl;
1e8e9920 1602 TREE_USED (t) = 1;
1603 DECL_ARGUMENTS (decl) = t;
fd6481cf 1604 if (!task_copy)
1605 ctx->receiver_decl = t;
1606 else
1607 {
e60a6f7b 1608 t = build_decl (DECL_SOURCE_LOCATION (decl),
1609 PARM_DECL, get_identifier (".omp_data_o"),
fd6481cf 1610 ptr_type_node);
1611 DECL_ARTIFICIAL (t) = 1;
84bfaaeb 1612 DECL_NAMELESS (t) = 1;
fd6481cf 1613 DECL_ARG_TYPE (t) = ptr_type_node;
1614 DECL_CONTEXT (t) = current_function_decl;
1615 TREE_USED (t) = 1;
86f2ad37 1616 TREE_ADDRESSABLE (t) = 1;
1767a056 1617 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
fd6481cf 1618 DECL_ARGUMENTS (decl) = t;
1619 }
1e8e9920 1620
48e1416a 1621 /* Allocate memory for the function structure. The call to
773c5ba7 1622 allocate_struct_function clobbers CFUN, so we need to restore
1e8e9920 1623 it afterward. */
87d4aa85 1624 push_struct_function (decl);
75a70cf9 1625 cfun->function_end_locus = gimple_location (ctx->stmt);
87d4aa85 1626 pop_cfun ();
1e8e9920 1627}
1628
1e8e9920 1629
1630/* Scan an OpenMP parallel directive. */
1631
1632static void
75a70cf9 1633scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1e8e9920 1634{
1635 omp_context *ctx;
1636 tree name;
75a70cf9 1637 gimple stmt = gsi_stmt (*gsi);
1e8e9920 1638
1639 /* Ignore parallel directives with empty bodies, unless there
1640 are copyin clauses. */
1641 if (optimize > 0
75a70cf9 1642 && empty_body_p (gimple_omp_body (stmt))
1643 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1644 OMP_CLAUSE_COPYIN) == NULL)
1e8e9920 1645 {
75a70cf9 1646 gsi_replace (gsi, gimple_build_nop (), false);
1e8e9920 1647 return;
1648 }
1649
75a70cf9 1650 ctx = new_omp_context (stmt, outer_ctx);
fd6481cf 1651 if (taskreg_nesting_level > 1)
773c5ba7 1652 ctx->is_nested = true;
1e8e9920 1653 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1e8e9920 1654 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1655 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1e8e9920 1656 name = create_tmp_var_name (".omp_data_s");
e60a6f7b 1657 name = build_decl (gimple_location (stmt),
1658 TYPE_DECL, name, ctx->record_type);
84bfaaeb 1659 DECL_ARTIFICIAL (name) = 1;
1660 DECL_NAMELESS (name) = 1;
1e8e9920 1661 TYPE_NAME (ctx->record_type) = name;
fd6481cf 1662 create_omp_child_function (ctx, false);
75a70cf9 1663 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1e8e9920 1664
75a70cf9 1665 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
ab129075 1666 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 1667
1668 if (TYPE_FIELDS (ctx->record_type) == NULL)
1669 ctx->record_type = ctx->receiver_decl = NULL;
1670 else
1671 {
1672 layout_type (ctx->record_type);
1673 fixup_child_record_type (ctx);
1674 }
1675}
1676
fd6481cf 1677/* Scan an OpenMP task directive. */
1678
1679static void
75a70cf9 1680scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
fd6481cf 1681{
1682 omp_context *ctx;
75a70cf9 1683 tree name, t;
1684 gimple stmt = gsi_stmt (*gsi);
389dd41b 1685 location_t loc = gimple_location (stmt);
fd6481cf 1686
1687 /* Ignore task directives with empty bodies. */
1688 if (optimize > 0
75a70cf9 1689 && empty_body_p (gimple_omp_body (stmt)))
fd6481cf 1690 {
75a70cf9 1691 gsi_replace (gsi, gimple_build_nop (), false);
fd6481cf 1692 return;
1693 }
1694
75a70cf9 1695 ctx = new_omp_context (stmt, outer_ctx);
fd6481cf 1696 if (taskreg_nesting_level > 1)
1697 ctx->is_nested = true;
1698 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1699 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1700 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1701 name = create_tmp_var_name (".omp_data_s");
e60a6f7b 1702 name = build_decl (gimple_location (stmt),
1703 TYPE_DECL, name, ctx->record_type);
84bfaaeb 1704 DECL_ARTIFICIAL (name) = 1;
1705 DECL_NAMELESS (name) = 1;
fd6481cf 1706 TYPE_NAME (ctx->record_type) = name;
1707 create_omp_child_function (ctx, false);
75a70cf9 1708 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
fd6481cf 1709
75a70cf9 1710 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
fd6481cf 1711
1712 if (ctx->srecord_type)
1713 {
1714 name = create_tmp_var_name (".omp_data_a");
e60a6f7b 1715 name = build_decl (gimple_location (stmt),
1716 TYPE_DECL, name, ctx->srecord_type);
84bfaaeb 1717 DECL_ARTIFICIAL (name) = 1;
1718 DECL_NAMELESS (name) = 1;
fd6481cf 1719 TYPE_NAME (ctx->srecord_type) = name;
1720 create_omp_child_function (ctx, true);
1721 }
1722
ab129075 1723 scan_omp (gimple_omp_body_ptr (stmt), ctx);
fd6481cf 1724
1725 if (TYPE_FIELDS (ctx->record_type) == NULL)
1726 {
1727 ctx->record_type = ctx->receiver_decl = NULL;
75a70cf9 1728 t = build_int_cst (long_integer_type_node, 0);
1729 gimple_omp_task_set_arg_size (stmt, t);
1730 t = build_int_cst (long_integer_type_node, 1);
1731 gimple_omp_task_set_arg_align (stmt, t);
fd6481cf 1732 }
1733 else
1734 {
1735 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1736 /* Move VLA fields to the end. */
1737 p = &TYPE_FIELDS (ctx->record_type);
1738 while (*p)
1739 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1740 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1741 {
1742 *q = *p;
1743 *p = TREE_CHAIN (*p);
1744 TREE_CHAIN (*q) = NULL_TREE;
1745 q = &TREE_CHAIN (*q);
1746 }
1747 else
1767a056 1748 p = &DECL_CHAIN (*p);
fd6481cf 1749 *p = vla_fields;
1750 layout_type (ctx->record_type);
1751 fixup_child_record_type (ctx);
1752 if (ctx->srecord_type)
1753 layout_type (ctx->srecord_type);
389dd41b 1754 t = fold_convert_loc (loc, long_integer_type_node,
fd6481cf 1755 TYPE_SIZE_UNIT (ctx->record_type));
75a70cf9 1756 gimple_omp_task_set_arg_size (stmt, t);
1757 t = build_int_cst (long_integer_type_node,
fd6481cf 1758 TYPE_ALIGN_UNIT (ctx->record_type));
75a70cf9 1759 gimple_omp_task_set_arg_align (stmt, t);
fd6481cf 1760 }
1761}
1762
1e8e9920 1763
773c5ba7 1764/* Scan an OpenMP loop directive. */
1e8e9920 1765
1766static void
75a70cf9 1767scan_omp_for (gimple stmt, omp_context *outer_ctx)
1e8e9920 1768{
773c5ba7 1769 omp_context *ctx;
75a70cf9 1770 size_t i;
1e8e9920 1771
773c5ba7 1772 ctx = new_omp_context (stmt, outer_ctx);
1e8e9920 1773
75a70cf9 1774 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1e8e9920 1775
ab129075 1776 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
75a70cf9 1777 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
fd6481cf 1778 {
75a70cf9 1779 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1780 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1781 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1782 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
fd6481cf 1783 }
ab129075 1784 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 1785}
1786
1787/* Scan an OpenMP sections directive. */
1788
1789static void
75a70cf9 1790scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1e8e9920 1791{
1e8e9920 1792 omp_context *ctx;
1793
1794 ctx = new_omp_context (stmt, outer_ctx);
75a70cf9 1795 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
ab129075 1796 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 1797}
1798
1799/* Scan an OpenMP single directive. */
1800
1801static void
75a70cf9 1802scan_omp_single (gimple stmt, omp_context *outer_ctx)
1e8e9920 1803{
1e8e9920 1804 omp_context *ctx;
1805 tree name;
1806
1807 ctx = new_omp_context (stmt, outer_ctx);
1808 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1809 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1810 name = create_tmp_var_name (".omp_copy_s");
e60a6f7b 1811 name = build_decl (gimple_location (stmt),
1812 TYPE_DECL, name, ctx->record_type);
1e8e9920 1813 TYPE_NAME (ctx->record_type) = name;
1814
75a70cf9 1815 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
ab129075 1816 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 1817
1818 if (TYPE_FIELDS (ctx->record_type) == NULL)
1819 ctx->record_type = NULL;
1820 else
1821 layout_type (ctx->record_type);
1822}
1823
1e8e9920 1824
c1d127dd 1825/* Check OpenMP nesting restrictions. */
ab129075 1826static bool
1827check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
c1d127dd 1828{
75a70cf9 1829 switch (gimple_code (stmt))
c1d127dd 1830 {
75a70cf9 1831 case GIMPLE_OMP_FOR:
1832 case GIMPLE_OMP_SECTIONS:
1833 case GIMPLE_OMP_SINGLE:
1834 case GIMPLE_CALL:
c1d127dd 1835 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1836 switch (gimple_code (ctx->stmt))
c1d127dd 1837 {
75a70cf9 1838 case GIMPLE_OMP_FOR:
1839 case GIMPLE_OMP_SECTIONS:
1840 case GIMPLE_OMP_SINGLE:
1841 case GIMPLE_OMP_ORDERED:
1842 case GIMPLE_OMP_MASTER:
1843 case GIMPLE_OMP_TASK:
1844 if (is_gimple_call (stmt))
fd6481cf 1845 {
ab129075 1846 error_at (gimple_location (stmt),
1847 "barrier region may not be closely nested inside "
1848 "of work-sharing, critical, ordered, master or "
1849 "explicit task region");
1850 return false;
fd6481cf 1851 }
ab129075 1852 error_at (gimple_location (stmt),
1853 "work-sharing region may not be closely nested inside "
1854 "of work-sharing, critical, ordered, master or explicit "
1855 "task region");
1856 return false;
75a70cf9 1857 case GIMPLE_OMP_PARALLEL:
ab129075 1858 return true;
c1d127dd 1859 default:
1860 break;
1861 }
1862 break;
75a70cf9 1863 case GIMPLE_OMP_MASTER:
c1d127dd 1864 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1865 switch (gimple_code (ctx->stmt))
c1d127dd 1866 {
75a70cf9 1867 case GIMPLE_OMP_FOR:
1868 case GIMPLE_OMP_SECTIONS:
1869 case GIMPLE_OMP_SINGLE:
1870 case GIMPLE_OMP_TASK:
ab129075 1871 error_at (gimple_location (stmt),
1872 "master region may not be closely nested inside "
1873 "of work-sharing or explicit task region");
1874 return false;
75a70cf9 1875 case GIMPLE_OMP_PARALLEL:
ab129075 1876 return true;
c1d127dd 1877 default:
1878 break;
1879 }
1880 break;
75a70cf9 1881 case GIMPLE_OMP_ORDERED:
c1d127dd 1882 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1883 switch (gimple_code (ctx->stmt))
c1d127dd 1884 {
75a70cf9 1885 case GIMPLE_OMP_CRITICAL:
1886 case GIMPLE_OMP_TASK:
ab129075 1887 error_at (gimple_location (stmt),
1888 "ordered region may not be closely nested inside "
1889 "of critical or explicit task region");
1890 return false;
75a70cf9 1891 case GIMPLE_OMP_FOR:
1892 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
c1d127dd 1893 OMP_CLAUSE_ORDERED) == NULL)
ab129075 1894 {
1895 error_at (gimple_location (stmt),
1896 "ordered region must be closely nested inside "
c1d127dd 1897 "a loop region with an ordered clause");
ab129075 1898 return false;
1899 }
1900 return true;
75a70cf9 1901 case GIMPLE_OMP_PARALLEL:
ab129075 1902 return true;
c1d127dd 1903 default:
1904 break;
1905 }
1906 break;
75a70cf9 1907 case GIMPLE_OMP_CRITICAL:
c1d127dd 1908 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1909 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1910 && (gimple_omp_critical_name (stmt)
1911 == gimple_omp_critical_name (ctx->stmt)))
c1d127dd 1912 {
ab129075 1913 error_at (gimple_location (stmt),
1914 "critical region may not be nested inside a critical "
1915 "region with the same name");
1916 return false;
c1d127dd 1917 }
1918 break;
1919 default:
1920 break;
1921 }
ab129075 1922 return true;
c1d127dd 1923}
1924
1925
75a70cf9 1926/* Helper function scan_omp.
1927
1928 Callback for walk_tree or operators in walk_gimple_stmt used to
1929 scan for OpenMP directives in TP. */
1e8e9920 1930
1931static tree
75a70cf9 1932scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1e8e9920 1933{
4077bf7a 1934 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1935 omp_context *ctx = (omp_context *) wi->info;
1e8e9920 1936 tree t = *tp;
1937
75a70cf9 1938 switch (TREE_CODE (t))
1939 {
1940 case VAR_DECL:
1941 case PARM_DECL:
1942 case LABEL_DECL:
1943 case RESULT_DECL:
1944 if (ctx)
1945 *tp = remap_decl (t, &ctx->cb);
1946 break;
1947
1948 default:
1949 if (ctx && TYPE_P (t))
1950 *tp = remap_type (t, &ctx->cb);
1951 else if (!DECL_P (t))
7cf869dd 1952 {
1953 *walk_subtrees = 1;
1954 if (ctx)
182cf5a9 1955 {
1956 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1957 if (tem != TREE_TYPE (t))
1958 {
1959 if (TREE_CODE (t) == INTEGER_CST)
1960 *tp = build_int_cst_wide (tem,
1961 TREE_INT_CST_LOW (t),
1962 TREE_INT_CST_HIGH (t));
1963 else
1964 TREE_TYPE (t) = tem;
1965 }
1966 }
7cf869dd 1967 }
75a70cf9 1968 break;
1969 }
1970
1971 return NULL_TREE;
1972}
1973
1974
1975/* Helper function for scan_omp.
1976
1977 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1978 the current statement in GSI. */
1979
1980static tree
1981scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1982 struct walk_stmt_info *wi)
1983{
1984 gimple stmt = gsi_stmt (*gsi);
1985 omp_context *ctx = (omp_context *) wi->info;
1986
1987 if (gimple_has_location (stmt))
1988 input_location = gimple_location (stmt);
1e8e9920 1989
c1d127dd 1990 /* Check the OpenMP nesting restrictions. */
fd6481cf 1991 if (ctx != NULL)
1992 {
ab129075 1993 bool remove = false;
75a70cf9 1994 if (is_gimple_omp (stmt))
ab129075 1995 remove = !check_omp_nesting_restrictions (stmt, ctx);
75a70cf9 1996 else if (is_gimple_call (stmt))
fd6481cf 1997 {
75a70cf9 1998 tree fndecl = gimple_call_fndecl (stmt);
fd6481cf 1999 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2000 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
ab129075 2001 remove = !check_omp_nesting_restrictions (stmt, ctx);
2002 }
2003 if (remove)
2004 {
2005 stmt = gimple_build_nop ();
2006 gsi_replace (gsi, stmt, false);
fd6481cf 2007 }
2008 }
c1d127dd 2009
75a70cf9 2010 *handled_ops_p = true;
2011
2012 switch (gimple_code (stmt))
1e8e9920 2013 {
75a70cf9 2014 case GIMPLE_OMP_PARALLEL:
fd6481cf 2015 taskreg_nesting_level++;
75a70cf9 2016 scan_omp_parallel (gsi, ctx);
fd6481cf 2017 taskreg_nesting_level--;
2018 break;
2019
75a70cf9 2020 case GIMPLE_OMP_TASK:
fd6481cf 2021 taskreg_nesting_level++;
75a70cf9 2022 scan_omp_task (gsi, ctx);
fd6481cf 2023 taskreg_nesting_level--;
1e8e9920 2024 break;
2025
75a70cf9 2026 case GIMPLE_OMP_FOR:
2027 scan_omp_for (stmt, ctx);
1e8e9920 2028 break;
2029
75a70cf9 2030 case GIMPLE_OMP_SECTIONS:
2031 scan_omp_sections (stmt, ctx);
1e8e9920 2032 break;
2033
75a70cf9 2034 case GIMPLE_OMP_SINGLE:
2035 scan_omp_single (stmt, ctx);
1e8e9920 2036 break;
2037
75a70cf9 2038 case GIMPLE_OMP_SECTION:
2039 case GIMPLE_OMP_MASTER:
2040 case GIMPLE_OMP_ORDERED:
2041 case GIMPLE_OMP_CRITICAL:
2042 ctx = new_omp_context (stmt, ctx);
ab129075 2043 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1e8e9920 2044 break;
2045
75a70cf9 2046 case GIMPLE_BIND:
1e8e9920 2047 {
2048 tree var;
1e8e9920 2049
75a70cf9 2050 *handled_ops_p = false;
2051 if (ctx)
1767a056 2052 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
75a70cf9 2053 insert_decl_map (&ctx->cb, var, var);
1e8e9920 2054 }
2055 break;
1e8e9920 2056 default:
75a70cf9 2057 *handled_ops_p = false;
1e8e9920 2058 break;
2059 }
2060
2061 return NULL_TREE;
2062}
2063
2064
75a70cf9 2065/* Scan all the statements starting at the current statement. CTX
2066 contains context information about the OpenMP directives and
2067 clauses found during the scan. */
1e8e9920 2068
2069static void
ab129075 2070scan_omp (gimple_seq *body_p, omp_context *ctx)
1e8e9920 2071{
2072 location_t saved_location;
2073 struct walk_stmt_info wi;
2074
2075 memset (&wi, 0, sizeof (wi));
1e8e9920 2076 wi.info = ctx;
1e8e9920 2077 wi.want_locations = true;
2078
2079 saved_location = input_location;
ab129075 2080 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
1e8e9920 2081 input_location = saved_location;
2082}
2083\f
2084/* Re-gimplification and code generation routines. */
2085
2086/* Build a call to GOMP_barrier. */
2087
79acaae1 2088static tree
2089build_omp_barrier (void)
1e8e9920 2090{
b9a16870 2091 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
1e8e9920 2092}
2093
2094/* If a context was created for STMT when it was scanned, return it. */
2095
2096static omp_context *
75a70cf9 2097maybe_lookup_ctx (gimple stmt)
1e8e9920 2098{
2099 splay_tree_node n;
2100 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2101 return n ? (omp_context *) n->value : NULL;
2102}
2103
773c5ba7 2104
2105/* Find the mapping for DECL in CTX or the immediately enclosing
2106 context that has a mapping for DECL.
2107
2108 If CTX is a nested parallel directive, we may have to use the decl
2109 mappings created in CTX's parent context. Suppose that we have the
2110 following parallel nesting (variable UIDs showed for clarity):
2111
2112 iD.1562 = 0;
2113 #omp parallel shared(iD.1562) -> outer parallel
2114 iD.1562 = iD.1562 + 1;
2115
2116 #omp parallel shared (iD.1562) -> inner parallel
2117 iD.1562 = iD.1562 - 1;
2118
2119 Each parallel structure will create a distinct .omp_data_s structure
2120 for copying iD.1562 in/out of the directive:
2121
2122 outer parallel .omp_data_s.1.i -> iD.1562
2123 inner parallel .omp_data_s.2.i -> iD.1562
2124
2125 A shared variable mapping will produce a copy-out operation before
2126 the parallel directive and a copy-in operation after it. So, in
2127 this case we would have:
2128
2129 iD.1562 = 0;
2130 .omp_data_o.1.i = iD.1562;
2131 #omp parallel shared(iD.1562) -> outer parallel
2132 .omp_data_i.1 = &.omp_data_o.1
2133 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2134
2135 .omp_data_o.2.i = iD.1562; -> **
2136 #omp parallel shared(iD.1562) -> inner parallel
2137 .omp_data_i.2 = &.omp_data_o.2
2138 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2139
2140
2141 ** This is a problem. The symbol iD.1562 cannot be referenced
2142 inside the body of the outer parallel region. But since we are
2143 emitting this copy operation while expanding the inner parallel
2144 directive, we need to access the CTX structure of the outer
2145 parallel directive to get the correct mapping:
2146
2147 .omp_data_o.2.i = .omp_data_i.1->i
2148
2149 Since there may be other workshare or parallel directives enclosing
2150 the parallel directive, it may be necessary to walk up the context
2151 parent chain. This is not a problem in general because nested
2152 parallelism happens only rarely. */
2153
2154static tree
2155lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2156{
2157 tree t;
2158 omp_context *up;
2159
773c5ba7 2160 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2161 t = maybe_lookup_decl (decl, up);
2162
87b31375 2163 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
773c5ba7 2164
c37594c7 2165 return t ? t : decl;
773c5ba7 2166}
2167
2168
f49d7bb5 2169/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2170 in outer contexts. */
2171
2172static tree
2173maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2174{
2175 tree t = NULL;
2176 omp_context *up;
2177
87b31375 2178 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2179 t = maybe_lookup_decl (decl, up);
f49d7bb5 2180
2181 return t ? t : decl;
2182}
2183
2184
1e8e9920 2185/* Construct the initialization value for reduction CLAUSE. */
2186
2187tree
2188omp_reduction_init (tree clause, tree type)
2189{
389dd41b 2190 location_t loc = OMP_CLAUSE_LOCATION (clause);
1e8e9920 2191 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2192 {
2193 case PLUS_EXPR:
2194 case MINUS_EXPR:
2195 case BIT_IOR_EXPR:
2196 case BIT_XOR_EXPR:
2197 case TRUTH_OR_EXPR:
2198 case TRUTH_ORIF_EXPR:
2199 case TRUTH_XOR_EXPR:
2200 case NE_EXPR:
385f3f36 2201 return build_zero_cst (type);
1e8e9920 2202
2203 case MULT_EXPR:
2204 case TRUTH_AND_EXPR:
2205 case TRUTH_ANDIF_EXPR:
2206 case EQ_EXPR:
389dd41b 2207 return fold_convert_loc (loc, type, integer_one_node);
1e8e9920 2208
2209 case BIT_AND_EXPR:
389dd41b 2210 return fold_convert_loc (loc, type, integer_minus_one_node);
1e8e9920 2211
2212 case MAX_EXPR:
2213 if (SCALAR_FLOAT_TYPE_P (type))
2214 {
2215 REAL_VALUE_TYPE max, min;
2216 if (HONOR_INFINITIES (TYPE_MODE (type)))
2217 {
2218 real_inf (&max);
2219 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2220 }
2221 else
2222 real_maxval (&min, 1, TYPE_MODE (type));
2223 return build_real (type, min);
2224 }
2225 else
2226 {
2227 gcc_assert (INTEGRAL_TYPE_P (type));
2228 return TYPE_MIN_VALUE (type);
2229 }
2230
2231 case MIN_EXPR:
2232 if (SCALAR_FLOAT_TYPE_P (type))
2233 {
2234 REAL_VALUE_TYPE max;
2235 if (HONOR_INFINITIES (TYPE_MODE (type)))
2236 real_inf (&max);
2237 else
2238 real_maxval (&max, 0, TYPE_MODE (type));
2239 return build_real (type, max);
2240 }
2241 else
2242 {
2243 gcc_assert (INTEGRAL_TYPE_P (type));
2244 return TYPE_MAX_VALUE (type);
2245 }
2246
2247 default:
2248 gcc_unreachable ();
2249 }
2250}
2251
2252/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2253 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2254 private variables. Initialization statements go in ILIST, while calls
2255 to destructors go in DLIST. */
2256
2257static void
75a70cf9 2258lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
1e4afe3c 2259 omp_context *ctx)
1e8e9920 2260{
c2f47e15 2261 tree c, dtor, copyin_seq, x, ptr;
1e8e9920 2262 bool copyin_by_ref = false;
f49d7bb5 2263 bool lastprivate_firstprivate = false;
1e8e9920 2264 int pass;
2265
1e8e9920 2266 copyin_seq = NULL;
2267
2268 /* Do all the fixed sized types in the first pass, and the variable sized
2269 types in the second pass. This makes sure that the scalar arguments to
48e1416a 2270 the variable sized types are processed before we use them in the
1e8e9920 2271 variable sized operations. */
2272 for (pass = 0; pass < 2; ++pass)
2273 {
2274 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2275 {
55d6e7cd 2276 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
1e8e9920 2277 tree var, new_var;
2278 bool by_ref;
389dd41b 2279 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2280
2281 switch (c_kind)
2282 {
2283 case OMP_CLAUSE_PRIVATE:
2284 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2285 continue;
2286 break;
2287 case OMP_CLAUSE_SHARED:
f49d7bb5 2288 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2289 {
2290 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2291 continue;
2292 }
1e8e9920 2293 case OMP_CLAUSE_FIRSTPRIVATE:
1e8e9920 2294 case OMP_CLAUSE_COPYIN:
2295 case OMP_CLAUSE_REDUCTION:
2296 break;
df2c34fc 2297 case OMP_CLAUSE_LASTPRIVATE:
f49d7bb5 2298 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2299 {
2300 lastprivate_firstprivate = true;
2301 if (pass != 0)
2302 continue;
2303 }
df2c34fc 2304 break;
1e8e9920 2305 default:
2306 continue;
2307 }
2308
2309 new_var = var = OMP_CLAUSE_DECL (c);
2310 if (c_kind != OMP_CLAUSE_COPYIN)
2311 new_var = lookup_decl (var, ctx);
2312
2313 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2314 {
2315 if (pass != 0)
2316 continue;
2317 }
1e8e9920 2318 else if (is_variable_sized (var))
2319 {
773c5ba7 2320 /* For variable sized types, we need to allocate the
2321 actual storage here. Call alloca and store the
2322 result in the pointer decl that we created elsewhere. */
1e8e9920 2323 if (pass == 0)
2324 continue;
2325
fd6481cf 2326 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2327 {
75a70cf9 2328 gimple stmt;
b9a16870 2329 tree tmp, atmp;
75a70cf9 2330
fd6481cf 2331 ptr = DECL_VALUE_EXPR (new_var);
2332 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2333 ptr = TREE_OPERAND (ptr, 0);
2334 gcc_assert (DECL_P (ptr));
2335 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
75a70cf9 2336
2337 /* void *tmp = __builtin_alloca */
b9a16870 2338 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2339 stmt = gimple_build_call (atmp, 1, x);
75a70cf9 2340 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2341 gimple_add_tmp_var (tmp);
2342 gimple_call_set_lhs (stmt, tmp);
2343
2344 gimple_seq_add_stmt (ilist, stmt);
2345
389dd41b 2346 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
75a70cf9 2347 gimplify_assign (ptr, x, ilist);
fd6481cf 2348 }
1e8e9920 2349 }
1e8e9920 2350 else if (is_reference (var))
2351 {
773c5ba7 2352 /* For references that are being privatized for Fortran,
2353 allocate new backing storage for the new pointer
2354 variable. This allows us to avoid changing all the
2355 code that expects a pointer to something that expects
2356 a direct variable. Note that this doesn't apply to
2357 C++, since reference types are disallowed in data
df2c34fc 2358 sharing clauses there, except for NRV optimized
2359 return values. */
1e8e9920 2360 if (pass == 0)
2361 continue;
2362
2363 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
fd6481cf 2364 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2365 {
2366 x = build_receiver_ref (var, false, ctx);
389dd41b 2367 x = build_fold_addr_expr_loc (clause_loc, x);
fd6481cf 2368 }
2369 else if (TREE_CONSTANT (x))
1e8e9920 2370 {
2371 const char *name = NULL;
2372 if (DECL_NAME (var))
2373 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2374
df2c34fc 2375 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2376 name);
2377 gimple_add_tmp_var (x);
86f2ad37 2378 TREE_ADDRESSABLE (x) = 1;
389dd41b 2379 x = build_fold_addr_expr_loc (clause_loc, x);
1e8e9920 2380 }
2381 else
2382 {
b9a16870 2383 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2384 x = build_call_expr_loc (clause_loc, atmp, 1, x);
1e8e9920 2385 }
2386
389dd41b 2387 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
75a70cf9 2388 gimplify_assign (new_var, x, ilist);
1e8e9920 2389
182cf5a9 2390 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
1e8e9920 2391 }
2392 else if (c_kind == OMP_CLAUSE_REDUCTION
2393 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2394 {
2395 if (pass == 0)
2396 continue;
2397 }
2398 else if (pass != 0)
2399 continue;
2400
55d6e7cd 2401 switch (OMP_CLAUSE_CODE (c))
1e8e9920 2402 {
2403 case OMP_CLAUSE_SHARED:
f49d7bb5 2404 /* Shared global vars are just accessed directly. */
2405 if (is_global_var (new_var))
2406 break;
1e8e9920 2407 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2408 needs to be delayed until after fixup_child_record_type so
2409 that we get the correct type during the dereference. */
e8a588af 2410 by_ref = use_pointer_for_field (var, ctx);
1e8e9920 2411 x = build_receiver_ref (var, by_ref, ctx);
2412 SET_DECL_VALUE_EXPR (new_var, x);
2413 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2414
2415 /* ??? If VAR is not passed by reference, and the variable
2416 hasn't been initialized yet, then we'll get a warning for
2417 the store into the omp_data_s structure. Ideally, we'd be
48e1416a 2418 able to notice this and not store anything at all, but
1e8e9920 2419 we're generating code too early. Suppress the warning. */
2420 if (!by_ref)
2421 TREE_NO_WARNING (var) = 1;
2422 break;
2423
2424 case OMP_CLAUSE_LASTPRIVATE:
2425 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2426 break;
2427 /* FALLTHRU */
2428
2429 case OMP_CLAUSE_PRIVATE:
fd6481cf 2430 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2431 x = build_outer_var_ref (var, ctx);
2432 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2433 {
2434 if (is_task_ctx (ctx))
2435 x = build_receiver_ref (var, false, ctx);
2436 else
2437 x = build_outer_var_ref (var, ctx);
2438 }
2439 else
2440 x = NULL;
2441 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
1e8e9920 2442 if (x)
2443 gimplify_and_add (x, ilist);
2444 /* FALLTHRU */
2445
2446 do_dtor:
2447 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2448 if (x)
2449 {
75a70cf9 2450 gimple_seq tseq = NULL;
2451
1e8e9920 2452 dtor = x;
75a70cf9 2453 gimplify_stmt (&dtor, &tseq);
e3a19533 2454 gimple_seq_add_seq (dlist, tseq);
1e8e9920 2455 }
2456 break;
2457
2458 case OMP_CLAUSE_FIRSTPRIVATE:
fd6481cf 2459 if (is_task_ctx (ctx))
2460 {
2461 if (is_reference (var) || is_variable_sized (var))
2462 goto do_dtor;
2463 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2464 ctx))
2465 || use_pointer_for_field (var, NULL))
2466 {
2467 x = build_receiver_ref (var, false, ctx);
2468 SET_DECL_VALUE_EXPR (new_var, x);
2469 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2470 goto do_dtor;
2471 }
2472 }
1e8e9920 2473 x = build_outer_var_ref (var, ctx);
2474 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2475 gimplify_and_add (x, ilist);
2476 goto do_dtor;
2477 break;
2478
2479 case OMP_CLAUSE_COPYIN:
e8a588af 2480 by_ref = use_pointer_for_field (var, NULL);
1e8e9920 2481 x = build_receiver_ref (var, by_ref, ctx);
2482 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2483 append_to_statement_list (x, &copyin_seq);
2484 copyin_by_ref |= by_ref;
2485 break;
2486
2487 case OMP_CLAUSE_REDUCTION:
2488 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2489 {
fd6481cf 2490 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2491 x = build_outer_var_ref (var, ctx);
2492
2493 if (is_reference (var))
389dd41b 2494 x = build_fold_addr_expr_loc (clause_loc, x);
fd6481cf 2495 SET_DECL_VALUE_EXPR (placeholder, x);
2496 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
e3a19533 2497 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
75a70cf9 2498 gimple_seq_add_seq (ilist,
2499 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2500 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
fd6481cf 2501 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
1e8e9920 2502 }
2503 else
2504 {
2505 x = omp_reduction_init (c, TREE_TYPE (new_var));
2506 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
75a70cf9 2507 gimplify_assign (new_var, x, ilist);
1e8e9920 2508 }
2509 break;
2510
2511 default:
2512 gcc_unreachable ();
2513 }
2514 }
2515 }
2516
2517 /* The copyin sequence is not to be executed by the main thread, since
2518 that would result in self-copies. Perhaps not visible to scalars,
2519 but it certainly is to C++ operator=. */
2520 if (copyin_seq)
2521 {
b9a16870 2522 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2523 0);
1e8e9920 2524 x = build2 (NE_EXPR, boolean_type_node, x,
2525 build_int_cst (TREE_TYPE (x), 0));
2526 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2527 gimplify_and_add (x, ilist);
2528 }
2529
2530 /* If any copyin variable is passed by reference, we must ensure the
2531 master thread doesn't modify it before it is copied over in all
f49d7bb5 2532 threads. Similarly for variables in both firstprivate and
2533 lastprivate clauses we need to ensure the lastprivate copying
2534 happens after firstprivate copying in all threads. */
2535 if (copyin_by_ref || lastprivate_firstprivate)
79acaae1 2536 gimplify_and_add (build_omp_barrier (), ilist);
1e8e9920 2537}
2538
773c5ba7 2539
1e8e9920 2540/* Generate code to implement the LASTPRIVATE clauses. This is used for
2541 both parallel and workshare constructs. PREDICATE may be NULL if it's
2542 always true. */
2543
2544static void
75a70cf9 2545lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2546 omp_context *ctx)
1e8e9920 2547{
75a70cf9 2548 tree x, c, label = NULL;
fd6481cf 2549 bool par_clauses = false;
1e8e9920 2550
2551 /* Early exit if there are no lastprivate clauses. */
2552 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2553 if (clauses == NULL)
2554 {
2555 /* If this was a workshare clause, see if it had been combined
2556 with its parallel. In that case, look for the clauses on the
2557 parallel statement itself. */
2558 if (is_parallel_ctx (ctx))
2559 return;
2560
2561 ctx = ctx->outer;
2562 if (ctx == NULL || !is_parallel_ctx (ctx))
2563 return;
2564
75a70cf9 2565 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
1e8e9920 2566 OMP_CLAUSE_LASTPRIVATE);
2567 if (clauses == NULL)
2568 return;
fd6481cf 2569 par_clauses = true;
1e8e9920 2570 }
2571
75a70cf9 2572 if (predicate)
2573 {
2574 gimple stmt;
2575 tree label_true, arm1, arm2;
2576
e60a6f7b 2577 label = create_artificial_label (UNKNOWN_LOCATION);
2578 label_true = create_artificial_label (UNKNOWN_LOCATION);
75a70cf9 2579 arm1 = TREE_OPERAND (predicate, 0);
2580 arm2 = TREE_OPERAND (predicate, 1);
2581 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2582 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2583 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2584 label_true, label);
2585 gimple_seq_add_stmt (stmt_list, stmt);
2586 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2587 }
1e8e9920 2588
fd6481cf 2589 for (c = clauses; c ;)
1e8e9920 2590 {
2591 tree var, new_var;
389dd41b 2592 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2593
fd6481cf 2594 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2595 {
2596 var = OMP_CLAUSE_DECL (c);
2597 new_var = lookup_decl (var, ctx);
1e8e9920 2598
75a70cf9 2599 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2600 {
e3a19533 2601 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
75a70cf9 2602 gimple_seq_add_seq (stmt_list,
2603 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2604 }
2605 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
1e8e9920 2606
fd6481cf 2607 x = build_outer_var_ref (var, ctx);
2608 if (is_reference (var))
182cf5a9 2609 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
fd6481cf 2610 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
75a70cf9 2611 gimplify_and_add (x, stmt_list);
fd6481cf 2612 }
2613 c = OMP_CLAUSE_CHAIN (c);
2614 if (c == NULL && !par_clauses)
2615 {
2616 /* If this was a workshare clause, see if it had been combined
2617 with its parallel. In that case, continue looking for the
2618 clauses also on the parallel statement itself. */
2619 if (is_parallel_ctx (ctx))
2620 break;
2621
2622 ctx = ctx->outer;
2623 if (ctx == NULL || !is_parallel_ctx (ctx))
2624 break;
2625
75a70cf9 2626 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
fd6481cf 2627 OMP_CLAUSE_LASTPRIVATE);
2628 par_clauses = true;
2629 }
1e8e9920 2630 }
2631
75a70cf9 2632 if (label)
2633 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
1e8e9920 2634}
2635
773c5ba7 2636
1e8e9920 2637/* Generate code to implement the REDUCTION clauses. */
2638
2639static void
75a70cf9 2640lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
1e8e9920 2641{
75a70cf9 2642 gimple_seq sub_seq = NULL;
2643 gimple stmt;
2644 tree x, c;
1e8e9920 2645 int count = 0;
2646
2647 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2648 update in that case, otherwise use a lock. */
2649 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
55d6e7cd 2650 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
1e8e9920 2651 {
2652 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2653 {
2654 /* Never use OMP_ATOMIC for array reductions. */
2655 count = -1;
2656 break;
2657 }
2658 count++;
2659 }
2660
2661 if (count == 0)
2662 return;
2663
2664 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2665 {
2666 tree var, ref, new_var;
2667 enum tree_code code;
389dd41b 2668 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2669
55d6e7cd 2670 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
1e8e9920 2671 continue;
2672
2673 var = OMP_CLAUSE_DECL (c);
2674 new_var = lookup_decl (var, ctx);
2675 if (is_reference (var))
182cf5a9 2676 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
1e8e9920 2677 ref = build_outer_var_ref (var, ctx);
2678 code = OMP_CLAUSE_REDUCTION_CODE (c);
773c5ba7 2679
2680 /* reduction(-:var) sums up the partial results, so it acts
2681 identically to reduction(+:var). */
1e8e9920 2682 if (code == MINUS_EXPR)
2683 code = PLUS_EXPR;
2684
2685 if (count == 1)
2686 {
389dd41b 2687 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
1e8e9920 2688
2689 addr = save_expr (addr);
2690 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
389dd41b 2691 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
1e8e9920 2692 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
75a70cf9 2693 gimplify_and_add (x, stmt_seqp);
1e8e9920 2694 return;
2695 }
2696
2697 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2698 {
2699 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2700
2701 if (is_reference (var))
389dd41b 2702 ref = build_fold_addr_expr_loc (clause_loc, ref);
1e8e9920 2703 SET_DECL_VALUE_EXPR (placeholder, ref);
2704 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
e3a19533 2705 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
75a70cf9 2706 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2707 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
1e8e9920 2708 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2709 }
2710 else
2711 {
2712 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2713 ref = build_outer_var_ref (var, ctx);
75a70cf9 2714 gimplify_assign (ref, x, &sub_seq);
1e8e9920 2715 }
2716 }
2717
b9a16870 2718 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
2719 0);
75a70cf9 2720 gimple_seq_add_stmt (stmt_seqp, stmt);
1e8e9920 2721
75a70cf9 2722 gimple_seq_add_seq (stmt_seqp, sub_seq);
1e8e9920 2723
b9a16870 2724 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
2725 0);
75a70cf9 2726 gimple_seq_add_stmt (stmt_seqp, stmt);
1e8e9920 2727}
2728
773c5ba7 2729
1e8e9920 2730/* Generate code to implement the COPYPRIVATE clauses. */
2731
2732static void
75a70cf9 2733lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
1e8e9920 2734 omp_context *ctx)
2735{
2736 tree c;
2737
2738 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2739 {
cb561506 2740 tree var, new_var, ref, x;
1e8e9920 2741 bool by_ref;
389dd41b 2742 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2743
55d6e7cd 2744 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
1e8e9920 2745 continue;
2746
2747 var = OMP_CLAUSE_DECL (c);
e8a588af 2748 by_ref = use_pointer_for_field (var, NULL);
1e8e9920 2749
2750 ref = build_sender_ref (var, ctx);
cb561506 2751 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2752 if (by_ref)
2753 {
2754 x = build_fold_addr_expr_loc (clause_loc, new_var);
2755 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2756 }
75a70cf9 2757 gimplify_assign (ref, x, slist);
1e8e9920 2758
cb561506 2759 ref = build_receiver_ref (var, false, ctx);
2760 if (by_ref)
2761 {
2762 ref = fold_convert_loc (clause_loc,
2763 build_pointer_type (TREE_TYPE (new_var)),
2764 ref);
2765 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2766 }
1e8e9920 2767 if (is_reference (var))
2768 {
cb561506 2769 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
182cf5a9 2770 ref = build_simple_mem_ref_loc (clause_loc, ref);
2771 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
1e8e9920 2772 }
cb561506 2773 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
1e8e9920 2774 gimplify_and_add (x, rlist);
2775 }
2776}
2777
773c5ba7 2778
1e8e9920 2779/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2780 and REDUCTION from the sender (aka parent) side. */
2781
2782static void
75a70cf9 2783lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2784 omp_context *ctx)
1e8e9920 2785{
2786 tree c;
2787
2788 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2789 {
773c5ba7 2790 tree val, ref, x, var;
1e8e9920 2791 bool by_ref, do_in = false, do_out = false;
389dd41b 2792 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2793
55d6e7cd 2794 switch (OMP_CLAUSE_CODE (c))
1e8e9920 2795 {
fd6481cf 2796 case OMP_CLAUSE_PRIVATE:
2797 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2798 break;
2799 continue;
1e8e9920 2800 case OMP_CLAUSE_FIRSTPRIVATE:
2801 case OMP_CLAUSE_COPYIN:
2802 case OMP_CLAUSE_LASTPRIVATE:
2803 case OMP_CLAUSE_REDUCTION:
2804 break;
2805 default:
2806 continue;
2807 }
2808
87b31375 2809 val = OMP_CLAUSE_DECL (c);
2810 var = lookup_decl_in_outer_ctx (val, ctx);
773c5ba7 2811
f49d7bb5 2812 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2813 && is_global_var (var))
2814 continue;
1e8e9920 2815 if (is_variable_sized (val))
2816 continue;
e8a588af 2817 by_ref = use_pointer_for_field (val, NULL);
1e8e9920 2818
55d6e7cd 2819 switch (OMP_CLAUSE_CODE (c))
1e8e9920 2820 {
fd6481cf 2821 case OMP_CLAUSE_PRIVATE:
1e8e9920 2822 case OMP_CLAUSE_FIRSTPRIVATE:
2823 case OMP_CLAUSE_COPYIN:
2824 do_in = true;
2825 break;
2826
2827 case OMP_CLAUSE_LASTPRIVATE:
2828 if (by_ref || is_reference (val))
2829 {
2830 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2831 continue;
2832 do_in = true;
2833 }
2834 else
fd6481cf 2835 {
2836 do_out = true;
2837 if (lang_hooks.decls.omp_private_outer_ref (val))
2838 do_in = true;
2839 }
1e8e9920 2840 break;
2841
2842 case OMP_CLAUSE_REDUCTION:
2843 do_in = true;
2844 do_out = !(by_ref || is_reference (val));
2845 break;
2846
2847 default:
2848 gcc_unreachable ();
2849 }
2850
2851 if (do_in)
2852 {
2853 ref = build_sender_ref (val, ctx);
389dd41b 2854 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
75a70cf9 2855 gimplify_assign (ref, x, ilist);
fd6481cf 2856 if (is_task_ctx (ctx))
2857 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
1e8e9920 2858 }
773c5ba7 2859
1e8e9920 2860 if (do_out)
2861 {
2862 ref = build_sender_ref (val, ctx);
75a70cf9 2863 gimplify_assign (var, ref, olist);
1e8e9920 2864 }
2865 }
2866}
2867
75a70cf9 2868/* Generate code to implement SHARED from the sender (aka parent)
2869 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2870 list things that got automatically shared. */
1e8e9920 2871
2872static void
75a70cf9 2873lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
1e8e9920 2874{
fd6481cf 2875 tree var, ovar, nvar, f, x, record_type;
1e8e9920 2876
2877 if (ctx->record_type == NULL)
2878 return;
773c5ba7 2879
fd6481cf 2880 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
1767a056 2881 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
1e8e9920 2882 {
2883 ovar = DECL_ABSTRACT_ORIGIN (f);
2884 nvar = maybe_lookup_decl (ovar, ctx);
2885 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2886 continue;
2887
773c5ba7 2888 /* If CTX is a nested parallel directive. Find the immediately
2889 enclosing parallel or workshare construct that contains a
2890 mapping for OVAR. */
87b31375 2891 var = lookup_decl_in_outer_ctx (ovar, ctx);
773c5ba7 2892
e8a588af 2893 if (use_pointer_for_field (ovar, ctx))
1e8e9920 2894 {
2895 x = build_sender_ref (ovar, ctx);
773c5ba7 2896 var = build_fold_addr_expr (var);
75a70cf9 2897 gimplify_assign (x, var, ilist);
1e8e9920 2898 }
2899 else
2900 {
2901 x = build_sender_ref (ovar, ctx);
75a70cf9 2902 gimplify_assign (x, var, ilist);
1e8e9920 2903
d2263ebb 2904 if (!TREE_READONLY (var)
2905 /* We don't need to receive a new reference to a result
2906 or parm decl. In fact we may not store to it as we will
2907 invalidate any pending RSO and generate wrong gimple
2908 during inlining. */
2909 && !((TREE_CODE (var) == RESULT_DECL
2910 || TREE_CODE (var) == PARM_DECL)
2911 && DECL_BY_REFERENCE (var)))
fd6481cf 2912 {
2913 x = build_sender_ref (ovar, ctx);
75a70cf9 2914 gimplify_assign (var, x, olist);
fd6481cf 2915 }
1e8e9920 2916 }
2917 }
2918}
2919
75a70cf9 2920
2921/* A convenience function to build an empty GIMPLE_COND with just the
2922 condition. */
2923
2924static gimple
2925gimple_build_cond_empty (tree cond)
2926{
2927 enum tree_code pred_code;
2928 tree lhs, rhs;
2929
2930 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2931 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2932}
2933
2934
48e1416a 2935/* Build the function calls to GOMP_parallel_start etc to actually
773c5ba7 2936 generate the parallel operation. REGION is the parallel region
2937 being expanded. BB is the block where to insert the code. WS_ARGS
2938 will be set if this is a call to a combined parallel+workshare
2939 construct, it contains the list of additional arguments needed by
2940 the workshare construct. */
1e8e9920 2941
2942static void
61e47ac8 2943expand_parallel_call (struct omp_region *region, basic_block bb,
414c3a2c 2944 gimple entry_stmt, VEC(tree,gc) *ws_args)
1e8e9920 2945{
79acaae1 2946 tree t, t1, t2, val, cond, c, clauses;
75a70cf9 2947 gimple_stmt_iterator gsi;
2948 gimple stmt;
b9a16870 2949 enum built_in_function start_ix;
2950 int start_ix2;
389dd41b 2951 location_t clause_loc;
414c3a2c 2952 VEC(tree,gc) *args;
773c5ba7 2953
75a70cf9 2954 clauses = gimple_omp_parallel_clauses (entry_stmt);
773c5ba7 2955
334ec2d8 2956 /* Determine what flavor of GOMP_parallel_start we will be
773c5ba7 2957 emitting. */
2958 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2959 if (is_combined_parallel (region))
2960 {
61e47ac8 2961 switch (region->inner->type)
773c5ba7 2962 {
75a70cf9 2963 case GIMPLE_OMP_FOR:
fd6481cf 2964 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
b9a16870 2965 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2966 + (region->inner->sched_kind
2967 == OMP_CLAUSE_SCHEDULE_RUNTIME
2968 ? 3 : region->inner->sched_kind));
2969 start_ix = (enum built_in_function)start_ix2;
61e47ac8 2970 break;
75a70cf9 2971 case GIMPLE_OMP_SECTIONS:
61e47ac8 2972 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2973 break;
2974 default:
2975 gcc_unreachable ();
773c5ba7 2976 }
773c5ba7 2977 }
1e8e9920 2978
2979 /* By default, the value of NUM_THREADS is zero (selected at run time)
2980 and there is no conditional. */
2981 cond = NULL_TREE;
2982 val = build_int_cst (unsigned_type_node, 0);
2983
2984 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2985 if (c)
2986 cond = OMP_CLAUSE_IF_EXPR (c);
2987
2988 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2989 if (c)
389dd41b 2990 {
2991 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2992 clause_loc = OMP_CLAUSE_LOCATION (c);
2993 }
2994 else
2995 clause_loc = gimple_location (entry_stmt);
1e8e9920 2996
2997 /* Ensure 'val' is of the correct type. */
389dd41b 2998 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
1e8e9920 2999
3000 /* If we found the clause 'if (cond)', build either
3001 (cond != 0) or (cond ? val : 1u). */
3002 if (cond)
3003 {
75a70cf9 3004 gimple_stmt_iterator gsi;
773c5ba7 3005
3006 cond = gimple_boolify (cond);
3007
1e8e9920 3008 if (integer_zerop (val))
389dd41b 3009 val = fold_build2_loc (clause_loc,
3010 EQ_EXPR, unsigned_type_node, cond,
79acaae1 3011 build_int_cst (TREE_TYPE (cond), 0));
1e8e9920 3012 else
773c5ba7 3013 {
3014 basic_block cond_bb, then_bb, else_bb;
79acaae1 3015 edge e, e_then, e_else;
75a70cf9 3016 tree tmp_then, tmp_else, tmp_join, tmp_var;
79acaae1 3017
3018 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
3019 if (gimple_in_ssa_p (cfun))
3020 {
75a70cf9 3021 tmp_then = make_ssa_name (tmp_var, NULL);
3022 tmp_else = make_ssa_name (tmp_var, NULL);
3023 tmp_join = make_ssa_name (tmp_var, NULL);
79acaae1 3024 }
3025 else
3026 {
3027 tmp_then = tmp_var;
3028 tmp_else = tmp_var;
3029 tmp_join = tmp_var;
3030 }
773c5ba7 3031
773c5ba7 3032 e = split_block (bb, NULL);
3033 cond_bb = e->src;
3034 bb = e->dest;
3035 remove_edge (e);
3036
3037 then_bb = create_empty_bb (cond_bb);
3038 else_bb = create_empty_bb (then_bb);
79acaae1 3039 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3040 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
773c5ba7 3041
75a70cf9 3042 stmt = gimple_build_cond_empty (cond);
3043 gsi = gsi_start_bb (cond_bb);
3044 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 3045
75a70cf9 3046 gsi = gsi_start_bb (then_bb);
3047 stmt = gimple_build_assign (tmp_then, val);
3048 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 3049
75a70cf9 3050 gsi = gsi_start_bb (else_bb);
3051 stmt = gimple_build_assign
3052 (tmp_else, build_int_cst (unsigned_type_node, 1));
3053 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 3054
3055 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3056 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
79acaae1 3057 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3058 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
773c5ba7 3059
79acaae1 3060 if (gimple_in_ssa_p (cfun))
3061 {
75a70cf9 3062 gimple phi = create_phi_node (tmp_join, bb);
79acaae1 3063 SSA_NAME_DEF_STMT (tmp_join) = phi;
b82a98ee 3064 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION, NULL);
3065 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION, NULL);
79acaae1 3066 }
3067
3068 val = tmp_join;
773c5ba7 3069 }
3070
75a70cf9 3071 gsi = gsi_start_bb (bb);
3072 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3073 false, GSI_CONTINUE_LINKING);
1e8e9920 3074 }
3075
75a70cf9 3076 gsi = gsi_last_bb (bb);
3077 t = gimple_omp_parallel_data_arg (entry_stmt);
1e8e9920 3078 if (t == NULL)
c2f47e15 3079 t1 = null_pointer_node;
1e8e9920 3080 else
c2f47e15 3081 t1 = build_fold_addr_expr (t);
75a70cf9 3082 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
773c5ba7 3083
414c3a2c 3084 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3085 VEC_quick_push (tree, args, t2);
3086 VEC_quick_push (tree, args, t1);
3087 VEC_quick_push (tree, args, val);
3088 VEC_splice (tree, args, ws_args);
3089
3090 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
b9a16870 3091 builtin_decl_explicit (start_ix), args);
773c5ba7 3092
75a70cf9 3093 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3094 false, GSI_CONTINUE_LINKING);
1e8e9920 3095
75a70cf9 3096 t = gimple_omp_parallel_data_arg (entry_stmt);
1e8e9920 3097 if (t == NULL)
3098 t = null_pointer_node;
3099 else
3100 t = build_fold_addr_expr (t);
389dd41b 3101 t = build_call_expr_loc (gimple_location (entry_stmt),
3102 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
75a70cf9 3103 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3104 false, GSI_CONTINUE_LINKING);
1e8e9920 3105
389dd41b 3106 t = build_call_expr_loc (gimple_location (entry_stmt),
b9a16870 3107 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3108 0);
75a70cf9 3109 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3110 false, GSI_CONTINUE_LINKING);
1e8e9920 3111}
3112
773c5ba7 3113
fd6481cf 3114/* Build the function call to GOMP_task to actually
3115 generate the task operation. BB is the block where to insert the code. */
3116
3117static void
75a70cf9 3118expand_task_call (basic_block bb, gimple entry_stmt)
fd6481cf 3119{
2169f33b 3120 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
75a70cf9 3121 gimple_stmt_iterator gsi;
389dd41b 3122 location_t loc = gimple_location (entry_stmt);
fd6481cf 3123
75a70cf9 3124 clauses = gimple_omp_task_clauses (entry_stmt);
fd6481cf 3125
fd6481cf 3126 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3127 if (c)
3128 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3129 else
3130 cond = boolean_true_node;
3131
3132 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
2169f33b 3133 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3134 flags = build_int_cst (unsigned_type_node,
3135 (c ? 1 : 0) + (c2 ? 4 : 0));
3136
3137 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3138 if (c)
3139 {
3140 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3141 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3142 build_int_cst (unsigned_type_node, 2),
3143 build_int_cst (unsigned_type_node, 0));
3144 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3145 }
fd6481cf 3146
75a70cf9 3147 gsi = gsi_last_bb (bb);
3148 t = gimple_omp_task_data_arg (entry_stmt);
fd6481cf 3149 if (t == NULL)
3150 t2 = null_pointer_node;
3151 else
389dd41b 3152 t2 = build_fold_addr_expr_loc (loc, t);
3153 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
75a70cf9 3154 t = gimple_omp_task_copy_fn (entry_stmt);
fd6481cf 3155 if (t == NULL)
3156 t3 = null_pointer_node;
3157 else
389dd41b 3158 t3 = build_fold_addr_expr_loc (loc, t);
fd6481cf 3159
b9a16870 3160 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3161 7, t1, t2, t3,
75a70cf9 3162 gimple_omp_task_arg_size (entry_stmt),
3163 gimple_omp_task_arg_align (entry_stmt), cond, flags);
fd6481cf 3164
75a70cf9 3165 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3166 false, GSI_CONTINUE_LINKING);
fd6481cf 3167}
3168
3169
75a70cf9 3170/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3171 catch handler and return it. This prevents programs from violating the
3172 structured block semantics with throws. */
1e8e9920 3173
75a70cf9 3174static gimple_seq
3175maybe_catch_exception (gimple_seq body)
1e8e9920 3176{
e38def9c 3177 gimple g;
3178 tree decl;
1e8e9920 3179
3180 if (!flag_exceptions)
75a70cf9 3181 return body;
1e8e9920 3182
596981c8 3183 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3184 decl = lang_hooks.eh_protect_cleanup_actions ();
1e8e9920 3185 else
b9a16870 3186 decl = builtin_decl_explicit (BUILT_IN_TRAP);
75a70cf9 3187
e38def9c 3188 g = gimple_build_eh_must_not_throw (decl);
3189 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
75a70cf9 3190 GIMPLE_TRY_CATCH);
1e8e9920 3191
e38def9c 3192 return gimple_seq_alloc_with_stmt (g);
1e8e9920 3193}
3194
773c5ba7 3195/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
1e8e9920 3196
773c5ba7 3197static tree
2ab2ce89 3198vec2chain (VEC(tree,gc) *v)
1e8e9920 3199{
2ab2ce89 3200 tree chain = NULL_TREE, t;
3201 unsigned ix;
1e8e9920 3202
2ab2ce89 3203 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
773c5ba7 3204 {
1767a056 3205 DECL_CHAIN (t) = chain;
2ab2ce89 3206 chain = t;
773c5ba7 3207 }
1e8e9920 3208
2ab2ce89 3209 return chain;
773c5ba7 3210}
1e8e9920 3211
1e8e9920 3212
773c5ba7 3213/* Remove barriers in REGION->EXIT's block. Note that this is only
75a70cf9 3214 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3215 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3216 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
773c5ba7 3217 removed. */
1e8e9920 3218
773c5ba7 3219static void
3220remove_exit_barrier (struct omp_region *region)
3221{
75a70cf9 3222 gimple_stmt_iterator gsi;
773c5ba7 3223 basic_block exit_bb;
61e47ac8 3224 edge_iterator ei;
3225 edge e;
75a70cf9 3226 gimple stmt;
4a04f4b4 3227 int any_addressable_vars = -1;
1e8e9920 3228
61e47ac8 3229 exit_bb = region->exit;
1e8e9920 3230
5056ba1a 3231 /* If the parallel region doesn't return, we don't have REGION->EXIT
3232 block at all. */
3233 if (! exit_bb)
3234 return;
3235
75a70cf9 3236 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3237 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
61e47ac8 3238 statements that can appear in between are extremely limited -- no
3239 memory operations at all. Here, we allow nothing at all, so the
75a70cf9 3240 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3241 gsi = gsi_last_bb (exit_bb);
3242 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3243 gsi_prev (&gsi);
3244 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
773c5ba7 3245 return;
1e8e9920 3246
61e47ac8 3247 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3248 {
75a70cf9 3249 gsi = gsi_last_bb (e->src);
3250 if (gsi_end_p (gsi))
61e47ac8 3251 continue;
75a70cf9 3252 stmt = gsi_stmt (gsi);
4a04f4b4 3253 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3254 && !gimple_omp_return_nowait_p (stmt))
3255 {
3256 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3257 in many cases. If there could be tasks queued, the barrier
3258 might be needed to let the tasks run before some local
3259 variable of the parallel that the task uses as shared
3260 runs out of scope. The task can be spawned either
3261 from within current function (this would be easy to check)
3262 or from some function it calls and gets passed an address
3263 of such a variable. */
3264 if (any_addressable_vars < 0)
3265 {
3266 gimple parallel_stmt = last_stmt (region->entry);
3267 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
2ab2ce89 3268 tree local_decls, block, decl;
3269 unsigned ix;
4a04f4b4 3270
3271 any_addressable_vars = 0;
2ab2ce89 3272 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3273 if (TREE_ADDRESSABLE (decl))
4a04f4b4 3274 {
3275 any_addressable_vars = 1;
3276 break;
3277 }
3278 for (block = gimple_block (stmt);
3279 !any_addressable_vars
3280 && block
3281 && TREE_CODE (block) == BLOCK;
3282 block = BLOCK_SUPERCONTEXT (block))
3283 {
3284 for (local_decls = BLOCK_VARS (block);
3285 local_decls;
1767a056 3286 local_decls = DECL_CHAIN (local_decls))
4a04f4b4 3287 if (TREE_ADDRESSABLE (local_decls))
3288 {
3289 any_addressable_vars = 1;
3290 break;
3291 }
3292 if (block == gimple_block (parallel_stmt))
3293 break;
3294 }
3295 }
3296 if (!any_addressable_vars)
3297 gimple_omp_return_set_nowait (stmt);
3298 }
61e47ac8 3299 }
1e8e9920 3300}
3301
61e47ac8 3302static void
3303remove_exit_barriers (struct omp_region *region)
3304{
75a70cf9 3305 if (region->type == GIMPLE_OMP_PARALLEL)
61e47ac8 3306 remove_exit_barrier (region);
3307
3308 if (region->inner)
3309 {
3310 region = region->inner;
3311 remove_exit_barriers (region);
3312 while (region->next)
3313 {
3314 region = region->next;
3315 remove_exit_barriers (region);
3316 }
3317 }
3318}
773c5ba7 3319
658b4427 3320/* Optimize omp_get_thread_num () and omp_get_num_threads ()
3321 calls. These can't be declared as const functions, but
3322 within one parallel body they are constant, so they can be
3323 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
fd6481cf 3324 which are declared const. Similarly for task body, except
3325 that in untied task omp_get_thread_num () can change at any task
3326 scheduling point. */
658b4427 3327
3328static void
75a70cf9 3329optimize_omp_library_calls (gimple entry_stmt)
658b4427 3330{
3331 basic_block bb;
75a70cf9 3332 gimple_stmt_iterator gsi;
b9a16870 3333 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3334 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3335 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3336 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
75a70cf9 3337 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3338 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
fd6481cf 3339 OMP_CLAUSE_UNTIED) != NULL);
658b4427 3340
3341 FOR_EACH_BB (bb)
75a70cf9 3342 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
658b4427 3343 {
75a70cf9 3344 gimple call = gsi_stmt (gsi);
658b4427 3345 tree decl;
3346
75a70cf9 3347 if (is_gimple_call (call)
3348 && (decl = gimple_call_fndecl (call))
658b4427 3349 && DECL_EXTERNAL (decl)
3350 && TREE_PUBLIC (decl)
3351 && DECL_INITIAL (decl) == NULL)
3352 {
3353 tree built_in;
3354
3355 if (DECL_NAME (decl) == thr_num_id)
fd6481cf 3356 {
3357 /* In #pragma omp task untied omp_get_thread_num () can change
3358 during the execution of the task region. */
3359 if (untied_task)
3360 continue;
b9a16870 3361 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
fd6481cf 3362 }
658b4427 3363 else if (DECL_NAME (decl) == num_thr_id)
b9a16870 3364 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
658b4427 3365 else
3366 continue;
3367
3368 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
75a70cf9 3369 || gimple_call_num_args (call) != 0)
658b4427 3370 continue;
3371
3372 if (flag_exceptions && !TREE_NOTHROW (decl))
3373 continue;
3374
3375 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
1ea6a73c 3376 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3377 TREE_TYPE (TREE_TYPE (built_in))))
658b4427 3378 continue;
3379
0acacf9e 3380 gimple_call_set_fndecl (call, built_in);
658b4427 3381 }
3382 }
3383}
3384
fd6481cf 3385/* Expand the OpenMP parallel or task directive starting at REGION. */
1e8e9920 3386
3387static void
fd6481cf 3388expand_omp_taskreg (struct omp_region *region)
1e8e9920 3389{
773c5ba7 3390 basic_block entry_bb, exit_bb, new_bb;
87d4aa85 3391 struct function *child_cfun;
414c3a2c 3392 tree child_fn, block, t;
ba3a7ba0 3393 tree save_current;
75a70cf9 3394 gimple_stmt_iterator gsi;
3395 gimple entry_stmt, stmt;
773c5ba7 3396 edge e;
414c3a2c 3397 VEC(tree,gc) *ws_args;
773c5ba7 3398
61e47ac8 3399 entry_stmt = last_stmt (region->entry);
75a70cf9 3400 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
773c5ba7 3401 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
b3a3ddec 3402 /* If this function has been already instrumented, make sure
3403 the child function isn't instrumented again. */
3404 child_cfun->after_tree_profile = cfun->after_tree_profile;
773c5ba7 3405
61e47ac8 3406 entry_bb = region->entry;
3407 exit_bb = region->exit;
773c5ba7 3408
773c5ba7 3409 if (is_combined_parallel (region))
61e47ac8 3410 ws_args = region->ws_args;
773c5ba7 3411 else
414c3a2c 3412 ws_args = NULL;
1e8e9920 3413
61e47ac8 3414 if (child_cfun->cfg)
1e8e9920 3415 {
773c5ba7 3416 /* Due to inlining, it may happen that we have already outlined
3417 the region, in which case all we need to do is make the
3418 sub-graph unreachable and emit the parallel call. */
3419 edge entry_succ_e, exit_succ_e;
75a70cf9 3420 gimple_stmt_iterator gsi;
773c5ba7 3421
3422 entry_succ_e = single_succ_edge (entry_bb);
773c5ba7 3423
75a70cf9 3424 gsi = gsi_last_bb (entry_bb);
3425 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3426 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3427 gsi_remove (&gsi, true);
773c5ba7 3428
3429 new_bb = entry_bb;
03ed154b 3430 if (exit_bb)
3431 {
3432 exit_succ_e = single_succ_edge (exit_bb);
3433 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3434 }
79acaae1 3435 remove_edge_and_dominated_blocks (entry_succ_e);
1e8e9920 3436 }
773c5ba7 3437 else
3438 {
501bdd19 3439 unsigned srcidx, dstidx, num;
2ab2ce89 3440
773c5ba7 3441 /* If the parallel region needs data sent from the parent
3480139d 3442 function, then the very first statement (except possible
3443 tree profile counter updates) of the parallel body
773c5ba7 3444 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3445 &.OMP_DATA_O is passed as an argument to the child function,
3446 we need to replace it with the argument as seen by the child
3447 function.
3448
3449 In most cases, this will end up being the identity assignment
3450 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3451 a function call that has been inlined, the original PARM_DECL
3452 .OMP_DATA_I may have been converted into a different local
3453 variable. In which case, we need to keep the assignment. */
75a70cf9 3454 if (gimple_omp_taskreg_data_arg (entry_stmt))
773c5ba7 3455 {
3456 basic_block entry_succ_bb = single_succ (entry_bb);
75a70cf9 3457 gimple_stmt_iterator gsi;
3458 tree arg, narg;
3459 gimple parcopy_stmt = NULL;
1e8e9920 3460
75a70cf9 3461 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3480139d 3462 {
75a70cf9 3463 gimple stmt;
3480139d 3464
75a70cf9 3465 gcc_assert (!gsi_end_p (gsi));
3466 stmt = gsi_stmt (gsi);
3467 if (gimple_code (stmt) != GIMPLE_ASSIGN)
cc6b725b 3468 continue;
3469
75a70cf9 3470 if (gimple_num_ops (stmt) == 2)
3480139d 3471 {
75a70cf9 3472 tree arg = gimple_assign_rhs1 (stmt);
3473
3474 /* We're ignore the subcode because we're
3475 effectively doing a STRIP_NOPS. */
3476
3477 if (TREE_CODE (arg) == ADDR_EXPR
3478 && TREE_OPERAND (arg, 0)
3479 == gimple_omp_taskreg_data_arg (entry_stmt))
3480 {
3481 parcopy_stmt = stmt;
3482 break;
3483 }
3480139d 3484 }
3485 }
79acaae1 3486
75a70cf9 3487 gcc_assert (parcopy_stmt != NULL);
79acaae1 3488 arg = DECL_ARGUMENTS (child_fn);
3489
3490 if (!gimple_in_ssa_p (cfun))
3491 {
75a70cf9 3492 if (gimple_assign_lhs (parcopy_stmt) == arg)
3493 gsi_remove (&gsi, true);
79acaae1 3494 else
75a70cf9 3495 {
3496 /* ?? Is setting the subcode really necessary ?? */
3497 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3498 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3499 }
79acaae1 3500 }
3501 else
3502 {
3503 /* If we are in ssa form, we must load the value from the default
3504 definition of the argument. That should not be defined now,
3505 since the argument is not used uninitialized. */
3506 gcc_assert (gimple_default_def (cfun, arg) == NULL);
75a70cf9 3507 narg = make_ssa_name (arg, gimple_build_nop ());
79acaae1 3508 set_default_def (arg, narg);
75a70cf9 3509 /* ?? Is setting the subcode really necessary ?? */
3510 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3511 gimple_assign_set_rhs1 (parcopy_stmt, narg);
79acaae1 3512 update_stmt (parcopy_stmt);
3513 }
773c5ba7 3514 }
3515
3516 /* Declare local variables needed in CHILD_CFUN. */
3517 block = DECL_INITIAL (child_fn);
2ab2ce89 3518 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
e1a7ccb9 3519 /* The gimplifier could record temporaries in parallel/task block
3520 rather than in containing function's local_decls chain,
3521 which would mean cgraph missed finalizing them. Do it now. */
1767a056 3522 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
e1a7ccb9 3523 if (TREE_CODE (t) == VAR_DECL
3524 && TREE_STATIC (t)
3525 && !DECL_EXTERNAL (t))
3526 varpool_finalize_decl (t);
75a70cf9 3527 DECL_SAVED_TREE (child_fn) = NULL;
e3a19533 3528 /* We'll create a CFG for child_fn, so no gimple body is needed. */
3529 gimple_set_body (child_fn, NULL);
1d22f541 3530 TREE_USED (block) = 1;
773c5ba7 3531
79acaae1 3532 /* Reset DECL_CONTEXT on function arguments. */
1767a056 3533 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
773c5ba7 3534 DECL_CONTEXT (t) = child_fn;
3535
75a70cf9 3536 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3537 so that it can be moved to the child function. */
3538 gsi = gsi_last_bb (entry_bb);
3539 stmt = gsi_stmt (gsi);
3540 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3541 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3542 gsi_remove (&gsi, true);
3543 e = split_block (entry_bb, stmt);
773c5ba7 3544 entry_bb = e->dest;
3545 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3546
75a70cf9 3547 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
5056ba1a 3548 if (exit_bb)
3549 {
75a70cf9 3550 gsi = gsi_last_bb (exit_bb);
3551 gcc_assert (!gsi_end_p (gsi)
3552 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3553 stmt = gimple_build_return (NULL);
3554 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3555 gsi_remove (&gsi, true);
5056ba1a 3556 }
79acaae1 3557
3558 /* Move the parallel region into CHILD_CFUN. */
48e1416a 3559
79acaae1 3560 if (gimple_in_ssa_p (cfun))
3561 {
bcaa2770 3562 init_tree_ssa (child_cfun);
5084b2e4 3563 init_ssa_operands (child_cfun);
3564 child_cfun->gimple_df->in_ssa_p = true;
1d22f541 3565 block = NULL_TREE;
79acaae1 3566 }
1d22f541 3567 else
75a70cf9 3568 block = gimple_block (entry_stmt);
1d22f541 3569
3570 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
79acaae1 3571 if (exit_bb)
3572 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3573
1d22f541 3574 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
501bdd19 3575 num = VEC_length (tree, child_cfun->local_decls);
3576 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3577 {
3578 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3579 if (DECL_CONTEXT (t) == cfun->decl)
3580 continue;
3581 if (srcidx != dstidx)
3582 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3583 dstidx++;
3584 }
3585 if (dstidx != num)
3586 VEC_truncate (tree, child_cfun->local_decls, dstidx);
1d22f541 3587
79acaae1 3588 /* Inform the callgraph about the new function. */
3589 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
79f958cb 3590 = cfun->curr_properties & ~PROP_loops;
79acaae1 3591 cgraph_add_new_function (child_fn, true);
3592
3593 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3594 fixed in a following pass. */
3595 push_cfun (child_cfun);
ba3a7ba0 3596 save_current = current_function_decl;
3597 current_function_decl = child_fn;
658b4427 3598 if (optimize)
fd6481cf 3599 optimize_omp_library_calls (entry_stmt);
79acaae1 3600 rebuild_cgraph_edges ();
fbe86b1b 3601
3602 /* Some EH regions might become dead, see PR34608. If
3603 pass_cleanup_cfg isn't the first pass to happen with the
3604 new child, these dead EH edges might cause problems.
3605 Clean them up now. */
3606 if (flag_exceptions)
3607 {
3608 basic_block bb;
fbe86b1b 3609 bool changed = false;
3610
fbe86b1b 3611 FOR_EACH_BB (bb)
75a70cf9 3612 changed |= gimple_purge_dead_eh_edges (bb);
fbe86b1b 3613 if (changed)
3614 cleanup_tree_cfg ();
fbe86b1b 3615 }
dd277d48 3616 if (gimple_in_ssa_p (cfun))
3617 update_ssa (TODO_update_ssa);
ba3a7ba0 3618 current_function_decl = save_current;
79acaae1 3619 pop_cfun ();
773c5ba7 3620 }
48e1416a 3621
773c5ba7 3622 /* Emit a library call to launch the children threads. */
75a70cf9 3623 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
fd6481cf 3624 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3625 else
3626 expand_task_call (new_bb, entry_stmt);
083152fb 3627 if (gimple_in_ssa_p (cfun))
3628 update_ssa (TODO_update_ssa_only_virtuals);
1e8e9920 3629}
3630
773c5ba7 3631
3632/* A subroutine of expand_omp_for. Generate code for a parallel
1e8e9920 3633 loop with any schedule. Given parameters:
3634
3635 for (V = N1; V cond N2; V += STEP) BODY;
3636
3637 where COND is "<" or ">", we generate pseudocode
3638
3639 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
773c5ba7 3640 if (more) goto L0; else goto L3;
1e8e9920 3641 L0:
3642 V = istart0;
3643 iend = iend0;
3644 L1:
3645 BODY;
3646 V += STEP;
773c5ba7 3647 if (V cond iend) goto L1; else goto L2;
1e8e9920 3648 L2:
773c5ba7 3649 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3650 L3:
1e8e9920 3651
773c5ba7 3652 If this is a combined omp parallel loop, instead of the call to
fd6481cf 3653 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3654
3655 For collapsed loops, given parameters:
3656 collapse(3)
3657 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3658 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3659 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3660 BODY;
3661
3662 we generate pseudocode
3663
3664 if (cond3 is <)
3665 adj = STEP3 - 1;
3666 else
3667 adj = STEP3 + 1;
3668 count3 = (adj + N32 - N31) / STEP3;
3669 if (cond2 is <)
3670 adj = STEP2 - 1;
3671 else
3672 adj = STEP2 + 1;
3673 count2 = (adj + N22 - N21) / STEP2;
3674 if (cond1 is <)
3675 adj = STEP1 - 1;
3676 else
3677 adj = STEP1 + 1;
3678 count1 = (adj + N12 - N11) / STEP1;
3679 count = count1 * count2 * count3;
3680 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3681 if (more) goto L0; else goto L3;
3682 L0:
3683 V = istart0;
3684 T = V;
3685 V3 = N31 + (T % count3) * STEP3;
3686 T = T / count3;
3687 V2 = N21 + (T % count2) * STEP2;
3688 T = T / count2;
3689 V1 = N11 + T * STEP1;
3690 iend = iend0;
3691 L1:
3692 BODY;
3693 V += 1;
3694 if (V < iend) goto L10; else goto L2;
3695 L10:
3696 V3 += STEP3;
3697 if (V3 cond3 N32) goto L1; else goto L11;
3698 L11:
3699 V3 = N31;
3700 V2 += STEP2;
3701 if (V2 cond2 N22) goto L1; else goto L12;
3702 L12:
3703 V2 = N21;
3704 V1 += STEP1;
3705 goto L1;
3706 L2:
3707 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3708 L3:
3709
3710 */
1e8e9920 3711
61e47ac8 3712static void
773c5ba7 3713expand_omp_for_generic (struct omp_region *region,
3714 struct omp_for_data *fd,
1e8e9920 3715 enum built_in_function start_fn,
3716 enum built_in_function next_fn)
3717{
75a70cf9 3718 tree type, istart0, iend0, iend;
fd6481cf 3719 tree t, vmain, vback, bias = NULL_TREE;
3720 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
03ed154b 3721 basic_block l2_bb = NULL, l3_bb = NULL;
75a70cf9 3722 gimple_stmt_iterator gsi;
3723 gimple stmt;
773c5ba7 3724 bool in_combined_parallel = is_combined_parallel (region);
ac6e3339 3725 bool broken_loop = region->cont == NULL;
79acaae1 3726 edge e, ne;
fd6481cf 3727 tree *counts = NULL;
3728 int i;
ac6e3339 3729
3730 gcc_assert (!broken_loop || !in_combined_parallel);
fd6481cf 3731 gcc_assert (fd->iter_type == long_integer_type_node
3732 || !in_combined_parallel);
1e8e9920 3733
fd6481cf 3734 type = TREE_TYPE (fd->loop.v);
3735 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3736 iend0 = create_tmp_var (fd->iter_type, ".iend0");
6d63fc03 3737 TREE_ADDRESSABLE (istart0) = 1;
3738 TREE_ADDRESSABLE (iend0) = 1;
083152fb 3739 if (gimple_referenced_vars (cfun))
79acaae1 3740 {
3741 add_referenced_var (istart0);
3742 add_referenced_var (iend0);
3743 }
1e8e9920 3744
fd6481cf 3745 /* See if we need to bias by LLONG_MIN. */
3746 if (fd->iter_type == long_long_unsigned_type_node
3747 && TREE_CODE (type) == INTEGER_TYPE
3748 && !TYPE_UNSIGNED (type))
3749 {
3750 tree n1, n2;
3751
3752 if (fd->loop.cond_code == LT_EXPR)
3753 {
3754 n1 = fd->loop.n1;
3755 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3756 }
3757 else
3758 {
3759 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3760 n2 = fd->loop.n1;
3761 }
3762 if (TREE_CODE (n1) != INTEGER_CST
3763 || TREE_CODE (n2) != INTEGER_CST
3764 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3765 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3766 }
3767
61e47ac8 3768 entry_bb = region->entry;
03ed154b 3769 cont_bb = region->cont;
fd6481cf 3770 collapse_bb = NULL;
ac6e3339 3771 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3772 gcc_assert (broken_loop
3773 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3774 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3775 l1_bb = single_succ (l0_bb);
3776 if (!broken_loop)
03ed154b 3777 {
3778 l2_bb = create_empty_bb (cont_bb);
ac6e3339 3779 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3780 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
03ed154b 3781 }
ac6e3339 3782 else
3783 l2_bb = NULL;
3784 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3785 exit_bb = region->exit;
773c5ba7 3786
75a70cf9 3787 gsi = gsi_last_bb (entry_bb);
fd6481cf 3788
75a70cf9 3789 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
fd6481cf 3790 if (fd->collapse > 1)
3791 {
3792 /* collapsed loops need work for expansion in SSA form. */
3793 gcc_assert (!gimple_in_ssa_p (cfun));
3794 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3795 for (i = 0; i < fd->collapse; i++)
3796 {
3797 tree itype = TREE_TYPE (fd->loops[i].v);
3798
3799 if (POINTER_TYPE_P (itype))
3cea8318 3800 itype = signed_type_for (itype);
fd6481cf 3801 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3802 ? -1 : 1));
3803 t = fold_build2 (PLUS_EXPR, itype,
3804 fold_convert (itype, fd->loops[i].step), t);
3805 t = fold_build2 (PLUS_EXPR, itype, t,
3806 fold_convert (itype, fd->loops[i].n2));
3807 t = fold_build2 (MINUS_EXPR, itype, t,
3808 fold_convert (itype, fd->loops[i].n1));
3809 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3810 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3811 fold_build1 (NEGATE_EXPR, itype, t),
3812 fold_build1 (NEGATE_EXPR, itype,
3813 fold_convert (itype,
3814 fd->loops[i].step)));
3815 else
3816 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3817 fold_convert (itype, fd->loops[i].step));
3818 t = fold_convert (type, t);
3819 if (TREE_CODE (t) == INTEGER_CST)
3820 counts[i] = t;
3821 else
3822 {
083152fb 3823 counts[i] = make_rename_temp (type, ".count");
75a70cf9 3824 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3825 true, GSI_SAME_STMT);
3826 stmt = gimple_build_assign (counts[i], t);
3827 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
fd6481cf 3828 }
3829 if (SSA_VAR_P (fd->loop.n2))
3830 {
3831 if (i == 0)
75a70cf9 3832 t = counts[0];
fd6481cf 3833 else
3834 {
3835 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
75a70cf9 3836 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3837 true, GSI_SAME_STMT);
fd6481cf 3838 }
75a70cf9 3839 stmt = gimple_build_assign (fd->loop.n2, t);
3840 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
fd6481cf 3841 }
3842 }
3843 }
79acaae1 3844 if (in_combined_parallel)
3845 {
3846 /* In a combined parallel loop, emit a call to
3847 GOMP_loop_foo_next. */
b9a16870 3848 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
79acaae1 3849 build_fold_addr_expr (istart0),
3850 build_fold_addr_expr (iend0));
3851 }
3852 else
1e8e9920 3853 {
c2f47e15 3854 tree t0, t1, t2, t3, t4;
773c5ba7 3855 /* If this is not a combined parallel loop, emit a call to
3856 GOMP_loop_foo_start in ENTRY_BB. */
c2f47e15 3857 t4 = build_fold_addr_expr (iend0);
3858 t3 = build_fold_addr_expr (istart0);
fd6481cf 3859 t2 = fold_convert (fd->iter_type, fd->loop.step);
c799f233 3860 if (POINTER_TYPE_P (type)
3861 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3862 {
3863 /* Avoid casting pointers to integer of a different size. */
3cea8318 3864 tree itype = signed_type_for (type);
c799f233 3865 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3866 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3867 }
3868 else
3869 {
3870 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3871 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3872 }
fd6481cf 3873 if (bias)
1e8e9920 3874 {
fd6481cf 3875 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3876 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3877 }
3878 if (fd->iter_type == long_integer_type_node)
3879 {
3880 if (fd->chunk_size)
3881 {
3882 t = fold_convert (fd->iter_type, fd->chunk_size);
b9a16870 3883 t = build_call_expr (builtin_decl_explicit (start_fn),
3884 6, t0, t1, t2, t, t3, t4);
fd6481cf 3885 }
3886 else
b9a16870 3887 t = build_call_expr (builtin_decl_explicit (start_fn),
3888 5, t0, t1, t2, t3, t4);
1e8e9920 3889 }
c2f47e15 3890 else
fd6481cf 3891 {
3892 tree t5;
3893 tree c_bool_type;
b9a16870 3894 tree bfn_decl;
fd6481cf 3895
3896 /* The GOMP_loop_ull_*start functions have additional boolean
3897 argument, true for < loops and false for > loops.
3898 In Fortran, the C bool type can be different from
3899 boolean_type_node. */
b9a16870 3900 bfn_decl = builtin_decl_explicit (start_fn);
3901 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
fd6481cf 3902 t5 = build_int_cst (c_bool_type,
3903 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3904 if (fd->chunk_size)
3905 {
b9a16870 3906 tree bfn_decl = builtin_decl_explicit (start_fn);
fd6481cf 3907 t = fold_convert (fd->iter_type, fd->chunk_size);
b9a16870 3908 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
fd6481cf 3909 }
3910 else
b9a16870 3911 t = build_call_expr (builtin_decl_explicit (start_fn),
3912 6, t5, t0, t1, t2, t3, t4);
fd6481cf 3913 }
1e8e9920 3914 }
fd6481cf 3915 if (TREE_TYPE (t) != boolean_type_node)
3916 t = fold_build2 (NE_EXPR, boolean_type_node,
3917 t, build_int_cst (TREE_TYPE (t), 0));
75a70cf9 3918 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3919 true, GSI_SAME_STMT);
3920 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
79acaae1 3921
75a70cf9 3922 /* Remove the GIMPLE_OMP_FOR statement. */
3923 gsi_remove (&gsi, true);
1e8e9920 3924
773c5ba7 3925 /* Iteration setup for sequential loop goes in L0_BB. */
75a70cf9 3926 gsi = gsi_start_bb (l0_bb);
1efcacec 3927 t = istart0;
fd6481cf 3928 if (bias)
1efcacec 3929 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3930 if (POINTER_TYPE_P (type))
3cea8318 3931 t = fold_convert (signed_type_for (type), t);
1efcacec 3932 t = fold_convert (type, t);
75a70cf9 3933 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3934 false, GSI_CONTINUE_LINKING);
3935 stmt = gimple_build_assign (fd->loop.v, t);
3936 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1e8e9920 3937
1efcacec 3938 t = iend0;
fd6481cf 3939 if (bias)
1efcacec 3940 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3941 if (POINTER_TYPE_P (type))
3cea8318 3942 t = fold_convert (signed_type_for (type), t);
1efcacec 3943 t = fold_convert (type, t);
75a70cf9 3944 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3945 false, GSI_CONTINUE_LINKING);
fd6481cf 3946 if (fd->collapse > 1)
3947 {
083152fb 3948 tree tem = make_rename_temp (type, ".tem");
75a70cf9 3949 stmt = gimple_build_assign (tem, fd->loop.v);
3950 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3951 for (i = fd->collapse - 1; i >= 0; i--)
3952 {
3953 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3954 itype = vtype;
3955 if (POINTER_TYPE_P (vtype))
3cea8318 3956 itype = signed_type_for (vtype);
fd6481cf 3957 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3958 t = fold_convert (itype, t);
c821ef7d 3959 t = fold_build2 (MULT_EXPR, itype, t,
3960 fold_convert (itype, fd->loops[i].step));
fd6481cf 3961 if (POINTER_TYPE_P (vtype))
2cc66f2a 3962 t = fold_build_pointer_plus (fd->loops[i].n1, t);
fd6481cf 3963 else
3964 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
75a70cf9 3965 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3966 false, GSI_CONTINUE_LINKING);
3967 stmt = gimple_build_assign (fd->loops[i].v, t);
3968 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3969 if (i != 0)
3970 {
3971 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
75a70cf9 3972 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3973 false, GSI_CONTINUE_LINKING);
3974 stmt = gimple_build_assign (tem, t);
3975 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3976 }
3977 }
3978 }
773c5ba7 3979
ac6e3339 3980 if (!broken_loop)
03ed154b 3981 {
ac6e3339 3982 /* Code to control the increment and predicate for the sequential
3983 loop goes in the CONT_BB. */
75a70cf9 3984 gsi = gsi_last_bb (cont_bb);
3985 stmt = gsi_stmt (gsi);
3986 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3987 vmain = gimple_omp_continue_control_use (stmt);
3988 vback = gimple_omp_continue_control_def (stmt);
79acaae1 3989
fd6481cf 3990 if (POINTER_TYPE_P (type))
2cc66f2a 3991 t = fold_build_pointer_plus (vmain, fd->loop.step);
fd6481cf 3992 else
3993 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
75a70cf9 3994 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3995 true, GSI_SAME_STMT);
3996 stmt = gimple_build_assign (vback, t);
3997 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3998
fd6481cf 3999 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
75a70cf9 4000 stmt = gimple_build_cond_empty (t);
4001 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
773c5ba7 4002
75a70cf9 4003 /* Remove GIMPLE_OMP_CONTINUE. */
4004 gsi_remove (&gsi, true);
773c5ba7 4005
fd6481cf 4006 if (fd->collapse > 1)
4007 {
4008 basic_block last_bb, bb;
4009
4010 last_bb = cont_bb;
4011 for (i = fd->collapse - 1; i >= 0; i--)
4012 {
4013 tree vtype = TREE_TYPE (fd->loops[i].v);
4014
4015 bb = create_empty_bb (last_bb);
75a70cf9 4016 gsi = gsi_start_bb (bb);
fd6481cf 4017
4018 if (i < fd->collapse - 1)
4019 {
4020 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
4021 e->probability = REG_BR_PROB_BASE / 8;
4022
75a70cf9 4023 t = fd->loops[i + 1].n1;
4024 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4025 false, GSI_CONTINUE_LINKING);
4026 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4027 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 4028 }
4029 else
4030 collapse_bb = bb;
4031
4032 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4033
4034 if (POINTER_TYPE_P (vtype))
2cc66f2a 4035 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
fd6481cf 4036 else
4037 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
4038 fd->loops[i].step);
75a70cf9 4039 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4040 false, GSI_CONTINUE_LINKING);
4041 stmt = gimple_build_assign (fd->loops[i].v, t);
4042 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 4043
4044 if (i > 0)
4045 {
75a70cf9 4046 t = fd->loops[i].n2;
4047 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4048 false, GSI_CONTINUE_LINKING);
fd6481cf 4049 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
75a70cf9 4050 fd->loops[i].v, t);
4051 stmt = gimple_build_cond_empty (t);
4052 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 4053 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4054 e->probability = REG_BR_PROB_BASE * 7 / 8;
4055 }
4056 else
4057 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4058 last_bb = bb;
4059 }
4060 }
4061
ac6e3339 4062 /* Emit code to get the next parallel iteration in L2_BB. */
75a70cf9 4063 gsi = gsi_start_bb (l2_bb);
773c5ba7 4064
b9a16870 4065 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
ac6e3339 4066 build_fold_addr_expr (istart0),
4067 build_fold_addr_expr (iend0));
75a70cf9 4068 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4069 false, GSI_CONTINUE_LINKING);
fd6481cf 4070 if (TREE_TYPE (t) != boolean_type_node)
4071 t = fold_build2 (NE_EXPR, boolean_type_node,
4072 t, build_int_cst (TREE_TYPE (t), 0));
75a70cf9 4073 stmt = gimple_build_cond_empty (t);
4074 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
ac6e3339 4075 }
1e8e9920 4076
61e47ac8 4077 /* Add the loop cleanup function. */
75a70cf9 4078 gsi = gsi_last_bb (exit_bb);
4079 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
b9a16870 4080 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
61e47ac8 4081 else
b9a16870 4082 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
75a70cf9 4083 stmt = gimple_build_call (t, 0);
4084 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4085 gsi_remove (&gsi, true);
773c5ba7 4086
4087 /* Connect the new blocks. */
79acaae1 4088 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4089 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
1e8e9920 4090
ac6e3339 4091 if (!broken_loop)
4092 {
75a70cf9 4093 gimple_seq phis;
4094
79acaae1 4095 e = find_edge (cont_bb, l3_bb);
4096 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4097
75a70cf9 4098 phis = phi_nodes (l3_bb);
4099 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4100 {
4101 gimple phi = gsi_stmt (gsi);
4102 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4103 PHI_ARG_DEF_FROM_EDGE (phi, e));
4104 }
79acaae1 4105 remove_edge (e);
4106
ac6e3339 4107 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
fd6481cf 4108 if (fd->collapse > 1)
4109 {
4110 e = find_edge (cont_bb, l1_bb);
4111 remove_edge (e);
4112 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4113 }
4114 else
4115 {
4116 e = find_edge (cont_bb, l1_bb);
4117 e->flags = EDGE_TRUE_VALUE;
4118 }
4119 e->probability = REG_BR_PROB_BASE * 7 / 8;
4120 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
ac6e3339 4121 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
79acaae1 4122
4123 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4124 recompute_dominator (CDI_DOMINATORS, l2_bb));
4125 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4126 recompute_dominator (CDI_DOMINATORS, l3_bb));
4127 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4128 recompute_dominator (CDI_DOMINATORS, l0_bb));
4129 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4130 recompute_dominator (CDI_DOMINATORS, l1_bb));
ac6e3339 4131 }
1e8e9920 4132}
4133
4134
773c5ba7 4135/* A subroutine of expand_omp_for. Generate code for a parallel
4136 loop with static schedule and no specified chunk size. Given
4137 parameters:
1e8e9920 4138
4139 for (V = N1; V cond N2; V += STEP) BODY;
4140
4141 where COND is "<" or ">", we generate pseudocode
4142
4143 if (cond is <)
4144 adj = STEP - 1;
4145 else
4146 adj = STEP + 1;
fd6481cf 4147 if ((__typeof (V)) -1 > 0 && cond is >)
4148 n = -(adj + N2 - N1) / -STEP;
4149 else
4150 n = (adj + N2 - N1) / STEP;
1e8e9920 4151 q = n / nthreads;
31712e83 4152 tt = n % nthreads;
4153 if (threadid < tt) goto L3; else goto L4;
4154 L3:
4155 tt = 0;
4156 q = q + 1;
4157 L4:
4158 s0 = q * threadid + tt;
4159 e0 = s0 + q;
79acaae1 4160 V = s0 * STEP + N1;
1e8e9920 4161 if (s0 >= e0) goto L2; else goto L0;
4162 L0:
1e8e9920 4163 e = e0 * STEP + N1;
4164 L1:
4165 BODY;
4166 V += STEP;
4167 if (V cond e) goto L1;
1e8e9920 4168 L2:
4169*/
4170
61e47ac8 4171static void
773c5ba7 4172expand_omp_for_static_nochunk (struct omp_region *region,
4173 struct omp_for_data *fd)
1e8e9920 4174{
31712e83 4175 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
fd6481cf 4176 tree type, itype, vmain, vback;
31712e83 4177 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4178 basic_block body_bb, cont_bb;
61e47ac8 4179 basic_block fin_bb;
75a70cf9 4180 gimple_stmt_iterator gsi;
4181 gimple stmt;
31712e83 4182 edge ep;
1e8e9920 4183
fd6481cf 4184 itype = type = TREE_TYPE (fd->loop.v);
4185 if (POINTER_TYPE_P (type))
3cea8318 4186 itype = signed_type_for (type);
1e8e9920 4187
61e47ac8 4188 entry_bb = region->entry;
61e47ac8 4189 cont_bb = region->cont;
ac6e3339 4190 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4191 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4192 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4193 body_bb = single_succ (seq_start_bb);
4194 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4195 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4196 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
61e47ac8 4197 exit_bb = region->exit;
4198
773c5ba7 4199 /* Iteration space partitioning goes in ENTRY_BB. */
75a70cf9 4200 gsi = gsi_last_bb (entry_bb);
4201 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
61e47ac8 4202
b9a16870 4203 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
fd6481cf 4204 t = fold_convert (itype, t);
75a70cf9 4205 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4206 true, GSI_SAME_STMT);
48e1416a 4207
b9a16870 4208 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
fd6481cf 4209 t = fold_convert (itype, t);
75a70cf9 4210 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4211 true, GSI_SAME_STMT);
1e8e9920 4212
fd6481cf 4213 fd->loop.n1
75a70cf9 4214 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4215 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4216 fd->loop.n2
75a70cf9 4217 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4218 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4219 fd->loop.step
75a70cf9 4220 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4221 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4222
4223 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4224 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4225 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4226 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4227 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4228 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4229 fold_build1 (NEGATE_EXPR, itype, t),
4230 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4231 else
4232 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4233 t = fold_convert (itype, t);
75a70cf9 4234 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 4235
083152fb 4236 q = make_rename_temp (itype, "q");
fd6481cf 4237 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
31712e83 4238 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4239 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4240
083152fb 4241 tt = make_rename_temp (itype, "tt");
31712e83 4242 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4243 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4244 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
1e8e9920 4245
31712e83 4246 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4247 stmt = gimple_build_cond_empty (t);
4248 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4249
4250 second_bb = split_block (entry_bb, stmt)->dest;
4251 gsi = gsi_last_bb (second_bb);
4252 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4253
4254 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4255 GSI_SAME_STMT);
4256 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4257 build_int_cst (itype, 1));
4258 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4259
4260 third_bb = split_block (second_bb, stmt)->dest;
4261 gsi = gsi_last_bb (third_bb);
4262 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
1e8e9920 4263
fd6481cf 4264 t = build2 (MULT_EXPR, itype, q, threadid);
31712e83 4265 t = build2 (PLUS_EXPR, itype, t, tt);
75a70cf9 4266 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 4267
fd6481cf 4268 t = fold_build2 (PLUS_EXPR, itype, s0, q);
75a70cf9 4269 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
79acaae1 4270
1e8e9920 4271 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
75a70cf9 4272 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
773c5ba7 4273
75a70cf9 4274 /* Remove the GIMPLE_OMP_FOR statement. */
4275 gsi_remove (&gsi, true);
773c5ba7 4276
4277 /* Setup code for sequential iteration goes in SEQ_START_BB. */
75a70cf9 4278 gsi = gsi_start_bb (seq_start_bb);
1e8e9920 4279
fd6481cf 4280 t = fold_convert (itype, s0);
4281 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4282 if (POINTER_TYPE_P (type))
2cc66f2a 4283 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4284 else
4285 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4286 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4287 false, GSI_CONTINUE_LINKING);
4288 stmt = gimple_build_assign (fd->loop.v, t);
4289 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
48e1416a 4290
fd6481cf 4291 t = fold_convert (itype, e0);
4292 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4293 if (POINTER_TYPE_P (type))
2cc66f2a 4294 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4295 else
4296 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4297 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4298 false, GSI_CONTINUE_LINKING);
1e8e9920 4299
75a70cf9 4300 /* The code controlling the sequential loop replaces the
4301 GIMPLE_OMP_CONTINUE. */
4302 gsi = gsi_last_bb (cont_bb);
4303 stmt = gsi_stmt (gsi);
4304 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4305 vmain = gimple_omp_continue_control_use (stmt);
4306 vback = gimple_omp_continue_control_def (stmt);
79acaae1 4307
fd6481cf 4308 if (POINTER_TYPE_P (type))
2cc66f2a 4309 t = fold_build_pointer_plus (vmain, fd->loop.step);
fd6481cf 4310 else
4311 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
75a70cf9 4312 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4313 true, GSI_SAME_STMT);
4314 stmt = gimple_build_assign (vback, t);
4315 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
79acaae1 4316
fd6481cf 4317 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
75a70cf9 4318 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
1e8e9920 4319
75a70cf9 4320 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4321 gsi_remove (&gsi, true);
773c5ba7 4322
75a70cf9 4323 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4324 gsi = gsi_last_bb (exit_bb);
4325 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4326 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4327 false, GSI_SAME_STMT);
4328 gsi_remove (&gsi, true);
773c5ba7 4329
4330 /* Connect all the blocks. */
31712e83 4331 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4332 ep->probability = REG_BR_PROB_BASE / 4 * 3;
4333 ep = find_edge (entry_bb, second_bb);
4334 ep->flags = EDGE_TRUE_VALUE;
4335 ep->probability = REG_BR_PROB_BASE / 4;
4336 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4337 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
79acaae1 4338
ac6e3339 4339 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
61e47ac8 4340 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
48e1416a 4341
31712e83 4342 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4343 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4344 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
79acaae1 4345 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4346 recompute_dominator (CDI_DOMINATORS, body_bb));
4347 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4348 recompute_dominator (CDI_DOMINATORS, fin_bb));
1e8e9920 4349}
4350
773c5ba7 4351
4352/* A subroutine of expand_omp_for. Generate code for a parallel
4353 loop with static schedule and a specified chunk size. Given
4354 parameters:
1e8e9920 4355
4356 for (V = N1; V cond N2; V += STEP) BODY;
4357
4358 where COND is "<" or ">", we generate pseudocode
4359
4360 if (cond is <)
4361 adj = STEP - 1;
4362 else
4363 adj = STEP + 1;
fd6481cf 4364 if ((__typeof (V)) -1 > 0 && cond is >)
4365 n = -(adj + N2 - N1) / -STEP;
4366 else
4367 n = (adj + N2 - N1) / STEP;
1e8e9920 4368 trip = 0;
79acaae1 4369 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4370 here so that V is defined
4371 if the loop is not entered
1e8e9920 4372 L0:
4373 s0 = (trip * nthreads + threadid) * CHUNK;
4374 e0 = min(s0 + CHUNK, n);
4375 if (s0 < n) goto L1; else goto L4;
4376 L1:
4377 V = s0 * STEP + N1;
4378 e = e0 * STEP + N1;
4379 L2:
4380 BODY;
4381 V += STEP;
4382 if (V cond e) goto L2; else goto L3;
4383 L3:
4384 trip += 1;
4385 goto L0;
4386 L4:
1e8e9920 4387*/
4388
61e47ac8 4389static void
75a70cf9 4390expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
1e8e9920 4391{
75a70cf9 4392 tree n, s0, e0, e, t;
79acaae1 4393 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
75a70cf9 4394 tree type, itype, v_main, v_back, v_extra;
773c5ba7 4395 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
61e47ac8 4396 basic_block trip_update_bb, cont_bb, fin_bb;
75a70cf9 4397 gimple_stmt_iterator si;
4398 gimple stmt;
4399 edge se;
1e8e9920 4400
fd6481cf 4401 itype = type = TREE_TYPE (fd->loop.v);
4402 if (POINTER_TYPE_P (type))
3cea8318 4403 itype = signed_type_for (type);
1e8e9920 4404
61e47ac8 4405 entry_bb = region->entry;
ac6e3339 4406 se = split_block (entry_bb, last_stmt (entry_bb));
4407 entry_bb = se->src;
4408 iter_part_bb = se->dest;
61e47ac8 4409 cont_bb = region->cont;
ac6e3339 4410 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4411 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4412 == FALLTHRU_EDGE (cont_bb)->dest);
4413 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4414 body_bb = single_succ (seq_start_bb);
4415 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4416 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4417 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4418 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
61e47ac8 4419 exit_bb = region->exit;
773c5ba7 4420
773c5ba7 4421 /* Trip and adjustment setup goes in ENTRY_BB. */
75a70cf9 4422 si = gsi_last_bb (entry_bb);
4423 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
773c5ba7 4424
b9a16870 4425 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
fd6481cf 4426 t = fold_convert (itype, t);
75a70cf9 4427 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4428 true, GSI_SAME_STMT);
48e1416a 4429
b9a16870 4430 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
fd6481cf 4431 t = fold_convert (itype, t);
75a70cf9 4432 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4433 true, GSI_SAME_STMT);
79acaae1 4434
fd6481cf 4435 fd->loop.n1
75a70cf9 4436 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4437 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4438 fd->loop.n2
75a70cf9 4439 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4440 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4441 fd->loop.step
75a70cf9 4442 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4443 true, NULL_TREE, true, GSI_SAME_STMT);
79acaae1 4444 fd->chunk_size
75a70cf9 4445 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4446 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4447
4448 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4449 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4450 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4451 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4452 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4453 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4454 fold_build1 (NEGATE_EXPR, itype, t),
4455 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4456 else
4457 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4458 t = fold_convert (itype, t);
75a70cf9 4459 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4460 true, GSI_SAME_STMT);
79acaae1 4461
083152fb 4462 trip_var = create_tmp_reg (itype, ".trip");
79acaae1 4463 if (gimple_in_ssa_p (cfun))
4464 {
4465 add_referenced_var (trip_var);
75a70cf9 4466 trip_init = make_ssa_name (trip_var, NULL);
4467 trip_main = make_ssa_name (trip_var, NULL);
4468 trip_back = make_ssa_name (trip_var, NULL);
79acaae1 4469 }
1e8e9920 4470 else
79acaae1 4471 {
4472 trip_init = trip_var;
4473 trip_main = trip_var;
4474 trip_back = trip_var;
4475 }
1e8e9920 4476
75a70cf9 4477 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4478 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
773c5ba7 4479
fd6481cf 4480 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4481 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4482 if (POINTER_TYPE_P (type))
2cc66f2a 4483 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4484 else
4485 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4486 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4487 true, GSI_SAME_STMT);
79acaae1 4488
75a70cf9 4489 /* Remove the GIMPLE_OMP_FOR. */
4490 gsi_remove (&si, true);
773c5ba7 4491
4492 /* Iteration space partitioning goes in ITER_PART_BB. */
75a70cf9 4493 si = gsi_last_bb (iter_part_bb);
1e8e9920 4494
fd6481cf 4495 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4496 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4497 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
75a70cf9 4498 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4499 false, GSI_CONTINUE_LINKING);
1e8e9920 4500
fd6481cf 4501 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4502 t = fold_build2 (MIN_EXPR, itype, t, n);
75a70cf9 4503 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4504 false, GSI_CONTINUE_LINKING);
1e8e9920 4505
4506 t = build2 (LT_EXPR, boolean_type_node, s0, n);
75a70cf9 4507 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
773c5ba7 4508
4509 /* Setup code for sequential iteration goes in SEQ_START_BB. */
75a70cf9 4510 si = gsi_start_bb (seq_start_bb);
1e8e9920 4511
fd6481cf 4512 t = fold_convert (itype, s0);
4513 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4514 if (POINTER_TYPE_P (type))
2cc66f2a 4515 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4516 else
4517 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4518 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4519 false, GSI_CONTINUE_LINKING);
4520 stmt = gimple_build_assign (fd->loop.v, t);
4521 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
1e8e9920 4522
fd6481cf 4523 t = fold_convert (itype, e0);
4524 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4525 if (POINTER_TYPE_P (type))
2cc66f2a 4526 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4527 else
4528 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4529 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4530 false, GSI_CONTINUE_LINKING);
1e8e9920 4531
61e47ac8 4532 /* The code controlling the sequential loop goes in CONT_BB,
75a70cf9 4533 replacing the GIMPLE_OMP_CONTINUE. */
4534 si = gsi_last_bb (cont_bb);
4535 stmt = gsi_stmt (si);
4536 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4537 v_main = gimple_omp_continue_control_use (stmt);
4538 v_back = gimple_omp_continue_control_def (stmt);
79acaae1 4539
fd6481cf 4540 if (POINTER_TYPE_P (type))
2cc66f2a 4541 t = fold_build_pointer_plus (v_main, fd->loop.step);
fd6481cf 4542 else
75a70cf9 4543 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4544 stmt = gimple_build_assign (v_back, t);
4545 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
79acaae1 4546
fd6481cf 4547 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
75a70cf9 4548 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
48e1416a 4549
75a70cf9 4550 /* Remove GIMPLE_OMP_CONTINUE. */
4551 gsi_remove (&si, true);
773c5ba7 4552
4553 /* Trip update code goes into TRIP_UPDATE_BB. */
75a70cf9 4554 si = gsi_start_bb (trip_update_bb);
1e8e9920 4555
fd6481cf 4556 t = build_int_cst (itype, 1);
4557 t = build2 (PLUS_EXPR, itype, trip_main, t);
75a70cf9 4558 stmt = gimple_build_assign (trip_back, t);
4559 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
1e8e9920 4560
75a70cf9 4561 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4562 si = gsi_last_bb (exit_bb);
4563 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4564 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4565 false, GSI_SAME_STMT);
4566 gsi_remove (&si, true);
1e8e9920 4567
773c5ba7 4568 /* Connect the new blocks. */
ac6e3339 4569 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4570 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
79acaae1 4571
ac6e3339 4572 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4573 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
79acaae1 4574
ac6e3339 4575 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
79acaae1 4576
4577 if (gimple_in_ssa_p (cfun))
4578 {
75a70cf9 4579 gimple_stmt_iterator psi;
4580 gimple phi;
4581 edge re, ene;
4582 edge_var_map_vector head;
4583 edge_var_map *vm;
4584 size_t i;
4585
79acaae1 4586 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4587 remove arguments of the phi nodes in fin_bb. We need to create
4588 appropriate phi nodes in iter_part_bb instead. */
4589 se = single_pred_edge (fin_bb);
4590 re = single_succ_edge (trip_update_bb);
75a70cf9 4591 head = redirect_edge_var_map_vector (re);
79acaae1 4592 ene = single_succ_edge (entry_bb);
4593
75a70cf9 4594 psi = gsi_start_phis (fin_bb);
4595 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4596 gsi_next (&psi), ++i)
79acaae1 4597 {
75a70cf9 4598 gimple nphi;
efbcb6de 4599 source_location locus;
b82a98ee 4600 tree block;
75a70cf9 4601
4602 phi = gsi_stmt (psi);
4603 t = gimple_phi_result (phi);
4604 gcc_assert (t == redirect_edge_var_map_result (vm));
79acaae1 4605 nphi = create_phi_node (t, iter_part_bb);
4606 SSA_NAME_DEF_STMT (t) = nphi;
4607
4608 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
efbcb6de 4609 locus = gimple_phi_arg_location_from_edge (phi, se);
b82a98ee 4610 block = gimple_phi_arg_block_from_edge (phi, se);
efbcb6de 4611
fd6481cf 4612 /* A special case -- fd->loop.v is not yet computed in
4613 iter_part_bb, we need to use v_extra instead. */
4614 if (t == fd->loop.v)
79acaae1 4615 t = v_extra;
b82a98ee 4616 add_phi_arg (nphi, t, ene, locus, block);
efbcb6de 4617 locus = redirect_edge_var_map_location (vm);
b82a98ee 4618 block = redirect_edge_var_map_block (vm);
4619 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus, block);
75a70cf9 4620 }
4621 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4622 redirect_edge_var_map_clear (re);
4623 while (1)
4624 {
4625 psi = gsi_start_phis (fin_bb);
4626 if (gsi_end_p (psi))
4627 break;
4628 remove_phi_node (&psi, false);
79acaae1 4629 }
79acaae1 4630
4631 /* Make phi node for trip. */
4632 phi = create_phi_node (trip_main, iter_part_bb);
4633 SSA_NAME_DEF_STMT (trip_main) = phi;
efbcb6de 4634 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
b82a98ee 4635 UNKNOWN_LOCATION, NULL);
efbcb6de 4636 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
b82a98ee 4637 UNKNOWN_LOCATION, NULL);
79acaae1 4638 }
4639
4640 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4641 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4642 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4643 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4644 recompute_dominator (CDI_DOMINATORS, fin_bb));
4645 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4646 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4647 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4648 recompute_dominator (CDI_DOMINATORS, body_bb));
1e8e9920 4649}
4650
1e8e9920 4651
773c5ba7 4652/* Expand the OpenMP loop defined by REGION. */
1e8e9920 4653
773c5ba7 4654static void
4655expand_omp_for (struct omp_region *region)
4656{
4657 struct omp_for_data fd;
fd6481cf 4658 struct omp_for_data_loop *loops;
1e8e9920 4659
fd6481cf 4660 loops
4661 = (struct omp_for_data_loop *)
75a70cf9 4662 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
fd6481cf 4663 * sizeof (struct omp_for_data_loop));
fd6481cf 4664 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
f77459c5 4665 region->sched_kind = fd.sched_kind;
1e8e9920 4666
b3a3ddec 4667 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4668 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4669 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4670 if (region->cont)
4671 {
4672 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4673 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4674 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4675 }
4676
03ed154b 4677 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4678 && !fd.have_ordered
fd6481cf 4679 && fd.collapse == 1
ac6e3339 4680 && region->cont != NULL)
1e8e9920 4681 {
4682 if (fd.chunk_size == NULL)
61e47ac8 4683 expand_omp_for_static_nochunk (region, &fd);
1e8e9920 4684 else
61e47ac8 4685 expand_omp_for_static_chunk (region, &fd);
1e8e9920 4686 }
4687 else
4688 {
fd6481cf 4689 int fn_index, start_ix, next_ix;
4690
0416ca72 4691 if (fd.chunk_size == NULL
4692 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
4693 fd.chunk_size = integer_zero_node;
fd6481cf 4694 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4695 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
75a70cf9 4696 ? 3 : fd.sched_kind;
fd6481cf 4697 fn_index += fd.have_ordered * 4;
b9a16870 4698 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
4699 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
fd6481cf 4700 if (fd.iter_type == long_long_unsigned_type_node)
4701 {
b9a16870 4702 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4703 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
4704 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4705 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
fd6481cf 4706 }
b9c74b4d 4707 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4708 (enum built_in_function) next_ix);
1e8e9920 4709 }
28c92cbb 4710
083152fb 4711 if (gimple_in_ssa_p (cfun))
4712 update_ssa (TODO_update_ssa_only_virtuals);
1e8e9920 4713}
4714
1e8e9920 4715
4716/* Expand code for an OpenMP sections directive. In pseudo code, we generate
4717
1e8e9920 4718 v = GOMP_sections_start (n);
4719 L0:
4720 switch (v)
4721 {
4722 case 0:
4723 goto L2;
4724 case 1:
4725 section 1;
4726 goto L1;
4727 case 2:
4728 ...
4729 case n:
4730 ...
1e8e9920 4731 default:
4732 abort ();
4733 }
4734 L1:
4735 v = GOMP_sections_next ();
4736 goto L0;
4737 L2:
4738 reduction;
4739
773c5ba7 4740 If this is a combined parallel sections, replace the call to
79acaae1 4741 GOMP_sections_start with call to GOMP_sections_next. */
1e8e9920 4742
4743static void
773c5ba7 4744expand_omp_sections (struct omp_region *region)
1e8e9920 4745{
f018d957 4746 tree t, u, vin = NULL, vmain, vnext, l2;
75a70cf9 4747 VEC (tree,heap) *label_vec;
4748 unsigned len;
ac6e3339 4749 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
75a70cf9 4750 gimple_stmt_iterator si, switch_si;
4751 gimple sections_stmt, stmt, cont;
9884aaf8 4752 edge_iterator ei;
4753 edge e;
61e47ac8 4754 struct omp_region *inner;
75a70cf9 4755 unsigned i, casei;
ac6e3339 4756 bool exit_reachable = region->cont != NULL;
1e8e9920 4757
ac6e3339 4758 gcc_assert (exit_reachable == (region->exit != NULL));
61e47ac8 4759 entry_bb = region->entry;
ac6e3339 4760 l0_bb = single_succ (entry_bb);
61e47ac8 4761 l1_bb = region->cont;
ac6e3339 4762 l2_bb = region->exit;
4763 if (exit_reachable)
03ed154b 4764 {
295e9e85 4765 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
75a70cf9 4766 l2 = gimple_block_label (l2_bb);
9884aaf8 4767 else
4768 {
4769 /* This can happen if there are reductions. */
4770 len = EDGE_COUNT (l0_bb->succs);
4771 gcc_assert (len > 0);
4772 e = EDGE_SUCC (l0_bb, len - 1);
75a70cf9 4773 si = gsi_last_bb (e->dest);
6d5a0fbe 4774 l2 = NULL_TREE;
75a70cf9 4775 if (gsi_end_p (si)
4776 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4777 l2 = gimple_block_label (e->dest);
9884aaf8 4778 else
4779 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4780 {
75a70cf9 4781 si = gsi_last_bb (e->dest);
4782 if (gsi_end_p (si)
4783 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
9884aaf8 4784 {
75a70cf9 4785 l2 = gimple_block_label (e->dest);
9884aaf8 4786 break;
4787 }
4788 }
4789 }
03ed154b 4790 default_bb = create_empty_bb (l1_bb->prev_bb);
03ed154b 4791 }
4792 else
4793 {
ac6e3339 4794 default_bb = create_empty_bb (l0_bb);
75a70cf9 4795 l2 = gimple_block_label (default_bb);
03ed154b 4796 }
773c5ba7 4797
4798 /* We will build a switch() with enough cases for all the
75a70cf9 4799 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
773c5ba7 4800 and a default case to abort if something goes wrong. */
ac6e3339 4801 len = EDGE_COUNT (l0_bb->succs);
75a70cf9 4802
4803 /* Use VEC_quick_push on label_vec throughout, since we know the size
4804 in advance. */
4805 label_vec = VEC_alloc (tree, heap, len);
1e8e9920 4806
61e47ac8 4807 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
75a70cf9 4808 GIMPLE_OMP_SECTIONS statement. */
4809 si = gsi_last_bb (entry_bb);
4810 sections_stmt = gsi_stmt (si);
4811 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4812 vin = gimple_omp_sections_control (sections_stmt);
773c5ba7 4813 if (!is_combined_parallel (region))
1e8e9920 4814 {
773c5ba7 4815 /* If we are not inside a combined parallel+sections region,
4816 call GOMP_sections_start. */
ac6e3339 4817 t = build_int_cst (unsigned_type_node,
4818 exit_reachable ? len - 1 : len);
b9a16870 4819 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
75a70cf9 4820 stmt = gimple_build_call (u, 1, t);
1e8e9920 4821 }
79acaae1 4822 else
4823 {
4824 /* Otherwise, call GOMP_sections_next. */
b9a16870 4825 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
75a70cf9 4826 stmt = gimple_build_call (u, 0);
79acaae1 4827 }
75a70cf9 4828 gimple_call_set_lhs (stmt, vin);
4829 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4830 gsi_remove (&si, true);
4831
4832 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4833 L0_BB. */
4834 switch_si = gsi_last_bb (l0_bb);
4835 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
79acaae1 4836 if (exit_reachable)
4837 {
4838 cont = last_stmt (l1_bb);
75a70cf9 4839 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4840 vmain = gimple_omp_continue_control_use (cont);
4841 vnext = gimple_omp_continue_control_def (cont);
79acaae1 4842 }
4843 else
4844 {
4845 vmain = vin;
4846 vnext = NULL_TREE;
4847 }
1e8e9920 4848
ac6e3339 4849 i = 0;
4850 if (exit_reachable)
4851 {
b6e3dd65 4852 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
75a70cf9 4853 VEC_quick_push (tree, label_vec, t);
ac6e3339 4854 i++;
4855 }
03ed154b 4856
75a70cf9 4857 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
ac6e3339 4858 for (inner = region->inner, casei = 1;
4859 inner;
4860 inner = inner->next, i++, casei++)
1e8e9920 4861 {
773c5ba7 4862 basic_block s_entry_bb, s_exit_bb;
4863
9884aaf8 4864 /* Skip optional reduction region. */
75a70cf9 4865 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
9884aaf8 4866 {
4867 --i;
4868 --casei;
4869 continue;
4870 }
4871
61e47ac8 4872 s_entry_bb = inner->entry;
4873 s_exit_bb = inner->exit;
1e8e9920 4874
75a70cf9 4875 t = gimple_block_label (s_entry_bb);
ac6e3339 4876 u = build_int_cst (unsigned_type_node, casei);
b6e3dd65 4877 u = build_case_label (u, NULL, t);
75a70cf9 4878 VEC_quick_push (tree, label_vec, u);
61e47ac8 4879
75a70cf9 4880 si = gsi_last_bb (s_entry_bb);
4881 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4882 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4883 gsi_remove (&si, true);
61e47ac8 4884 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
03ed154b 4885
4886 if (s_exit_bb == NULL)
4887 continue;
4888
75a70cf9 4889 si = gsi_last_bb (s_exit_bb);
4890 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4891 gsi_remove (&si, true);
03ed154b 4892
773c5ba7 4893 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
1e8e9920 4894 }
4895
773c5ba7 4896 /* Error handling code goes in DEFAULT_BB. */
75a70cf9 4897 t = gimple_block_label (default_bb);
b6e3dd65 4898 u = build_case_label (NULL, NULL, t);
61e47ac8 4899 make_edge (l0_bb, default_bb, 0);
1e8e9920 4900
75a70cf9 4901 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4902 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4903 gsi_remove (&switch_si, true);
4904 VEC_free (tree, heap, label_vec);
4905
4906 si = gsi_start_bb (default_bb);
b9a16870 4907 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
75a70cf9 4908 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
773c5ba7 4909
ac6e3339 4910 if (exit_reachable)
03ed154b 4911 {
b9a16870 4912 tree bfn_decl;
4913
ac6e3339 4914 /* Code to get the next section goes in L1_BB. */
75a70cf9 4915 si = gsi_last_bb (l1_bb);
4916 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
1e8e9920 4917
b9a16870 4918 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4919 stmt = gimple_build_call (bfn_decl, 0);
75a70cf9 4920 gimple_call_set_lhs (stmt, vnext);
4921 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4922 gsi_remove (&si, true);
773c5ba7 4923
ac6e3339 4924 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4925
75a70cf9 4926 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4927 si = gsi_last_bb (l2_bb);
4928 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
b9a16870 4929 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
03ed154b 4930 else
b9a16870 4931 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
75a70cf9 4932 stmt = gimple_build_call (t, 0);
4933 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4934 gsi_remove (&si, true);
03ed154b 4935 }
773c5ba7 4936
79acaae1 4937 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
773c5ba7 4938}
1e8e9920 4939
1e8e9920 4940
61e47ac8 4941/* Expand code for an OpenMP single directive. We've already expanded
4942 much of the code, here we simply place the GOMP_barrier call. */
4943
4944static void
4945expand_omp_single (struct omp_region *region)
4946{
4947 basic_block entry_bb, exit_bb;
75a70cf9 4948 gimple_stmt_iterator si;
61e47ac8 4949 bool need_barrier = false;
4950
4951 entry_bb = region->entry;
4952 exit_bb = region->exit;
4953
75a70cf9 4954 si = gsi_last_bb (entry_bb);
61e47ac8 4955 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4956 be removed. We need to ensure that the thread that entered the single
4957 does not exit before the data is copied out by the other threads. */
75a70cf9 4958 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
61e47ac8 4959 OMP_CLAUSE_COPYPRIVATE))
4960 need_barrier = true;
75a70cf9 4961 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4962 gsi_remove (&si, true);
61e47ac8 4963 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4964
75a70cf9 4965 si = gsi_last_bb (exit_bb);
4966 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4967 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4968 false, GSI_SAME_STMT);
4969 gsi_remove (&si, true);
61e47ac8 4970 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4971}
4972
4973
4974/* Generic expansion for OpenMP synchronization directives: master,
4975 ordered and critical. All we need to do here is remove the entry
4976 and exit markers for REGION. */
773c5ba7 4977
4978static void
4979expand_omp_synch (struct omp_region *region)
4980{
4981 basic_block entry_bb, exit_bb;
75a70cf9 4982 gimple_stmt_iterator si;
773c5ba7 4983
61e47ac8 4984 entry_bb = region->entry;
4985 exit_bb = region->exit;
773c5ba7 4986
75a70cf9 4987 si = gsi_last_bb (entry_bb);
4988 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4989 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4990 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4991 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4992 gsi_remove (&si, true);
773c5ba7 4993 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4994
03ed154b 4995 if (exit_bb)
4996 {
75a70cf9 4997 si = gsi_last_bb (exit_bb);
4998 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4999 gsi_remove (&si, true);
03ed154b 5000 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
5001 }
773c5ba7 5002}
1e8e9920 5003
2169f33b 5004/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5005 operation as a normal volatile load. */
5006
5007static bool
3ec11c49 5008expand_omp_atomic_load (basic_block load_bb, tree addr,
5009 tree loaded_val, int index)
2169f33b 5010{
3ec11c49 5011 enum built_in_function tmpbase;
5012 gimple_stmt_iterator gsi;
5013 basic_block store_bb;
5014 location_t loc;
5015 gimple stmt;
5016 tree decl, call, type, itype;
5017
5018 gsi = gsi_last_bb (load_bb);
5019 stmt = gsi_stmt (gsi);
5020 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5021 loc = gimple_location (stmt);
5022
5023 /* ??? If the target does not implement atomic_load_optab[mode], and mode
5024 is smaller than word size, then expand_atomic_load assumes that the load
5025 is atomic. We could avoid the builtin entirely in this case. */
5026
5027 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
5028 decl = builtin_decl_explicit (tmpbase);
5029 if (decl == NULL_TREE)
5030 return false;
5031
5032 type = TREE_TYPE (loaded_val);
5033 itype = TREE_TYPE (TREE_TYPE (decl));
5034
5035 call = build_call_expr_loc (loc, decl, 2, addr,
5036 build_int_cst (NULL, MEMMODEL_RELAXED));
5037 if (!useless_type_conversion_p (type, itype))
5038 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5039 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5040
5041 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5042 gsi_remove (&gsi, true);
5043
5044 store_bb = single_succ (load_bb);
5045 gsi = gsi_last_bb (store_bb);
5046 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5047 gsi_remove (&gsi, true);
5048
5049 if (gimple_in_ssa_p (cfun))
5050 update_ssa (TODO_update_ssa_no_phi);
5051
5052 return true;
2169f33b 5053}
5054
5055/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5056 operation as a normal volatile store. */
5057
5058static bool
3ec11c49 5059expand_omp_atomic_store (basic_block load_bb, tree addr,
5060 tree loaded_val, tree stored_val, int index)
2169f33b 5061{
3ec11c49 5062 enum built_in_function tmpbase;
5063 gimple_stmt_iterator gsi;
5064 basic_block store_bb = single_succ (load_bb);
5065 location_t loc;
5066 gimple stmt;
5067 tree decl, call, type, itype;
5068 enum machine_mode imode;
5069 bool exchange;
5070
5071 gsi = gsi_last_bb (load_bb);
5072 stmt = gsi_stmt (gsi);
5073 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5074
5075 /* If the load value is needed, then this isn't a store but an exchange. */
5076 exchange = gimple_omp_atomic_need_value_p (stmt);
5077
5078 gsi = gsi_last_bb (store_bb);
5079 stmt = gsi_stmt (gsi);
5080 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
5081 loc = gimple_location (stmt);
5082
5083 /* ??? If the target does not implement atomic_store_optab[mode], and mode
5084 is smaller than word size, then expand_atomic_store assumes that the store
5085 is atomic. We could avoid the builtin entirely in this case. */
5086
5087 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
5088 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
5089 decl = builtin_decl_explicit (tmpbase);
5090 if (decl == NULL_TREE)
5091 return false;
5092
5093 type = TREE_TYPE (stored_val);
5094
5095 /* Dig out the type of the function's second argument. */
5096 itype = TREE_TYPE (decl);
5097 itype = TYPE_ARG_TYPES (itype);
5098 itype = TREE_CHAIN (itype);
5099 itype = TREE_VALUE (itype);
5100 imode = TYPE_MODE (itype);
5101
5102 if (exchange && !can_atomic_exchange_p (imode, true))
5103 return false;
5104
5105 if (!useless_type_conversion_p (itype, type))
5106 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
5107 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
5108 build_int_cst (NULL, MEMMODEL_RELAXED));
5109 if (exchange)
5110 {
5111 if (!useless_type_conversion_p (type, itype))
5112 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5113 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5114 }
5115
5116 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5117 gsi_remove (&gsi, true);
5118
5119 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
5120 gsi = gsi_last_bb (load_bb);
5121 gsi_remove (&gsi, true);
5122
5123 if (gimple_in_ssa_p (cfun))
5124 update_ssa (TODO_update_ssa_no_phi);
5125
5126 return true;
2169f33b 5127}
5128
cb7f680b 5129/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
1cd6e20d 5130 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
cb7f680b 5131 size of the data type, and thus usable to find the index of the builtin
5132 decl. Returns false if the expression is not of the proper form. */
5133
5134static bool
5135expand_omp_atomic_fetch_op (basic_block load_bb,
5136 tree addr, tree loaded_val,
5137 tree stored_val, int index)
5138{
b9a16870 5139 enum built_in_function oldbase, newbase, tmpbase;
cb7f680b 5140 tree decl, itype, call;
2169f33b 5141 tree lhs, rhs;
cb7f680b 5142 basic_block store_bb = single_succ (load_bb);
75a70cf9 5143 gimple_stmt_iterator gsi;
5144 gimple stmt;
389dd41b 5145 location_t loc;
1cd6e20d 5146 enum tree_code code;
2169f33b 5147 bool need_old, need_new;
1cd6e20d 5148 enum machine_mode imode;
cb7f680b 5149
5150 /* We expect to find the following sequences:
48e1416a 5151
cb7f680b 5152 load_bb:
75a70cf9 5153 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
cb7f680b 5154
5155 store_bb:
5156 val = tmp OP something; (or: something OP tmp)
48e1416a 5157 GIMPLE_OMP_STORE (val)
cb7f680b 5158
48e1416a 5159 ???FIXME: Allow a more flexible sequence.
cb7f680b 5160 Perhaps use data flow to pick the statements.
48e1416a 5161
cb7f680b 5162 */
5163
75a70cf9 5164 gsi = gsi_after_labels (store_bb);
5165 stmt = gsi_stmt (gsi);
389dd41b 5166 loc = gimple_location (stmt);
75a70cf9 5167 if (!is_gimple_assign (stmt))
cb7f680b 5168 return false;
75a70cf9 5169 gsi_next (&gsi);
5170 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
cb7f680b 5171 return false;
2169f33b 5172 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
5173 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
5174 gcc_checking_assert (!need_old || !need_new);
cb7f680b 5175
75a70cf9 5176 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
cb7f680b 5177 return false;
5178
cb7f680b 5179 /* Check for one of the supported fetch-op operations. */
1cd6e20d 5180 code = gimple_assign_rhs_code (stmt);
5181 switch (code)
cb7f680b 5182 {
5183 case PLUS_EXPR:
5184 case POINTER_PLUS_EXPR:
1cd6e20d 5185 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
5186 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
cb7f680b 5187 break;
5188 case MINUS_EXPR:
1cd6e20d 5189 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
5190 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
cb7f680b 5191 break;
5192 case BIT_AND_EXPR:
1cd6e20d 5193 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
5194 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
cb7f680b 5195 break;
5196 case BIT_IOR_EXPR:
1cd6e20d 5197 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
5198 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
cb7f680b 5199 break;
5200 case BIT_XOR_EXPR:
1cd6e20d 5201 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
5202 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
cb7f680b 5203 break;
5204 default:
5205 return false;
5206 }
1cd6e20d 5207
cb7f680b 5208 /* Make sure the expression is of the proper form. */
75a70cf9 5209 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5210 rhs = gimple_assign_rhs2 (stmt);
5211 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5212 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5213 rhs = gimple_assign_rhs1 (stmt);
cb7f680b 5214 else
5215 return false;
5216
b9a16870 5217 tmpbase = ((enum built_in_function)
5218 ((need_new ? newbase : oldbase) + index + 1));
5219 decl = builtin_decl_explicit (tmpbase);
0f94f46b 5220 if (decl == NULL_TREE)
5221 return false;
cb7f680b 5222 itype = TREE_TYPE (TREE_TYPE (decl));
1cd6e20d 5223 imode = TYPE_MODE (itype);
cb7f680b 5224
1cd6e20d 5225 /* We could test all of the various optabs involved, but the fact of the
5226 matter is that (with the exception of i486 vs i586 and xadd) all targets
5227 that support any atomic operaton optab also implements compare-and-swap.
5228 Let optabs.c take care of expanding any compare-and-swap loop. */
29139cdc 5229 if (!can_compare_and_swap_p (imode, true))
cb7f680b 5230 return false;
5231
75a70cf9 5232 gsi = gsi_last_bb (load_bb);
5233 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
1cd6e20d 5234
5235 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5236 It only requires that the operation happen atomically. Thus we can
5237 use the RELAXED memory model. */
5238 call = build_call_expr_loc (loc, decl, 3, addr,
5239 fold_convert_loc (loc, itype, rhs),
5240 build_int_cst (NULL, MEMMODEL_RELAXED));
5241
2169f33b 5242 if (need_old || need_new)
5243 {
5244 lhs = need_old ? loaded_val : stored_val;
5245 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
5246 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
5247 }
5248 else
5249 call = fold_convert_loc (loc, void_type_node, call);
75a70cf9 5250 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5251 gsi_remove (&gsi, true);
cb7f680b 5252
75a70cf9 5253 gsi = gsi_last_bb (store_bb);
5254 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5255 gsi_remove (&gsi, true);
5256 gsi = gsi_last_bb (store_bb);
5257 gsi_remove (&gsi, true);
cb7f680b 5258
5259 if (gimple_in_ssa_p (cfun))
5260 update_ssa (TODO_update_ssa_no_phi);
5261
5262 return true;
5263}
5264
5265/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5266
5267 oldval = *addr;
5268 repeat:
5269 newval = rhs; // with oldval replacing *addr in rhs
5270 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5271 if (oldval != newval)
5272 goto repeat;
5273
5274 INDEX is log2 of the size of the data type, and thus usable to find the
5275 index of the builtin decl. */
5276
5277static bool
5278expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5279 tree addr, tree loaded_val, tree stored_val,
5280 int index)
5281{
790368c5 5282 tree loadedi, storedi, initial, new_storedi, old_vali;
cb7f680b 5283 tree type, itype, cmpxchg, iaddr;
75a70cf9 5284 gimple_stmt_iterator si;
cb7f680b 5285 basic_block loop_header = single_succ (load_bb);
75a70cf9 5286 gimple phi, stmt;
cb7f680b 5287 edge e;
b9a16870 5288 enum built_in_function fncode;
cb7f680b 5289
1cd6e20d 5290 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5291 order to use the RELAXED memory model effectively. */
b9a16870 5292 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5293 + index + 1);
5294 cmpxchg = builtin_decl_explicit (fncode);
0f94f46b 5295 if (cmpxchg == NULL_TREE)
5296 return false;
cb7f680b 5297 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5298 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5299
29139cdc 5300 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
cb7f680b 5301 return false;
5302
75a70cf9 5303 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5304 si = gsi_last_bb (load_bb);
5305 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5306
790368c5 5307 /* For floating-point values, we'll need to view-convert them to integers
5308 so that we can perform the atomic compare and swap. Simplify the
5309 following code by always setting up the "i"ntegral variables. */
5310 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5311 {
75a70cf9 5312 tree iaddr_val;
5313
083152fb 5314 iaddr = make_rename_temp (build_pointer_type_for_mode (itype, ptr_mode,
5315 true), NULL);
75a70cf9 5316 iaddr_val
5317 = force_gimple_operand_gsi (&si,
5318 fold_convert (TREE_TYPE (iaddr), addr),
5319 false, NULL_TREE, true, GSI_SAME_STMT);
5320 stmt = gimple_build_assign (iaddr, iaddr_val);
5321 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
790368c5 5322 loadedi = create_tmp_var (itype, NULL);
5323 if (gimple_in_ssa_p (cfun))
5324 {
5325 add_referenced_var (iaddr);
5326 add_referenced_var (loadedi);
5327 loadedi = make_ssa_name (loadedi, NULL);
5328 }
5329 }
5330 else
5331 {
5332 iaddr = addr;
5333 loadedi = loaded_val;
5334 }
75a70cf9 5335
182cf5a9 5336 initial
5337 = force_gimple_operand_gsi (&si,
5338 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5339 iaddr,
5340 build_int_cst (TREE_TYPE (iaddr), 0)),
5341 true, NULL_TREE, true, GSI_SAME_STMT);
790368c5 5342
5343 /* Move the value to the LOADEDI temporary. */
cb7f680b 5344 if (gimple_in_ssa_p (cfun))
5345 {
75a70cf9 5346 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
790368c5 5347 phi = create_phi_node (loadedi, loop_header);
5348 SSA_NAME_DEF_STMT (loadedi) = phi;
cb7f680b 5349 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5350 initial);
5351 }
5352 else
75a70cf9 5353 gsi_insert_before (&si,
5354 gimple_build_assign (loadedi, initial),
5355 GSI_SAME_STMT);
790368c5 5356 if (loadedi != loaded_val)
5357 {
75a70cf9 5358 gimple_stmt_iterator gsi2;
5359 tree x;
790368c5 5360
5361 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
75a70cf9 5362 gsi2 = gsi_start_bb (loop_header);
790368c5 5363 if (gimple_in_ssa_p (cfun))
5364 {
75a70cf9 5365 gimple stmt;
5366 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5367 true, GSI_SAME_STMT);
5368 stmt = gimple_build_assign (loaded_val, x);
5369 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
790368c5 5370 }
5371 else
5372 {
75a70cf9 5373 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5374 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5375 true, GSI_SAME_STMT);
790368c5 5376 }
5377 }
75a70cf9 5378 gsi_remove (&si, true);
cb7f680b 5379
75a70cf9 5380 si = gsi_last_bb (store_bb);
5381 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
cb7f680b 5382
790368c5 5383 if (iaddr == addr)
5384 storedi = stored_val;
cb7f680b 5385 else
790368c5 5386 storedi =
75a70cf9 5387 force_gimple_operand_gsi (&si,
790368c5 5388 build1 (VIEW_CONVERT_EXPR, itype,
5389 stored_val), true, NULL_TREE, true,
75a70cf9 5390 GSI_SAME_STMT);
cb7f680b 5391
5392 /* Build the compare&swap statement. */
5393 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
75a70cf9 5394 new_storedi = force_gimple_operand_gsi (&si,
87f9ffa4 5395 fold_convert (TREE_TYPE (loadedi),
5396 new_storedi),
cb7f680b 5397 true, NULL_TREE,
75a70cf9 5398 true, GSI_SAME_STMT);
cb7f680b 5399
5400 if (gimple_in_ssa_p (cfun))
5401 old_vali = loadedi;
5402 else
5403 {
87f9ffa4 5404 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
790368c5 5405 if (gimple_in_ssa_p (cfun))
5406 add_referenced_var (old_vali);
75a70cf9 5407 stmt = gimple_build_assign (old_vali, loadedi);
5408 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5409
75a70cf9 5410 stmt = gimple_build_assign (loadedi, new_storedi);
5411 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5412 }
5413
5414 /* Note that we always perform the comparison as an integer, even for
48e1416a 5415 floating point. This allows the atomic operation to properly
cb7f680b 5416 succeed even with NaNs and -0.0. */
75a70cf9 5417 stmt = gimple_build_cond_empty
5418 (build2 (NE_EXPR, boolean_type_node,
5419 new_storedi, old_vali));
5420 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5421
5422 /* Update cfg. */
5423 e = single_succ_edge (store_bb);
5424 e->flags &= ~EDGE_FALLTHRU;
5425 e->flags |= EDGE_FALSE_VALUE;
5426
5427 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5428
790368c5 5429 /* Copy the new value to loadedi (we already did that before the condition
cb7f680b 5430 if we are not in SSA). */
5431 if (gimple_in_ssa_p (cfun))
5432 {
75a70cf9 5433 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
790368c5 5434 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
cb7f680b 5435 }
5436
75a70cf9 5437 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5438 gsi_remove (&si, true);
cb7f680b 5439
5440 if (gimple_in_ssa_p (cfun))
5441 update_ssa (TODO_update_ssa_no_phi);
5442
5443 return true;
5444}
5445
5446/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5447
5448 GOMP_atomic_start ();
5449 *addr = rhs;
5450 GOMP_atomic_end ();
5451
5452 The result is not globally atomic, but works so long as all parallel
5453 references are within #pragma omp atomic directives. According to
5454 responses received from omp@openmp.org, appears to be within spec.
5455 Which makes sense, since that's how several other compilers handle
48e1416a 5456 this situation as well.
75a70cf9 5457 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5458 expanding. STORED_VAL is the operand of the matching
5459 GIMPLE_OMP_ATOMIC_STORE.
cb7f680b 5460
48e1416a 5461 We replace
5462 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
cb7f680b 5463 loaded_val = *addr;
5464
5465 and replace
3ec11c49 5466 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
48e1416a 5467 *addr = stored_val;
cb7f680b 5468*/
5469
5470static bool
5471expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5472 tree addr, tree loaded_val, tree stored_val)
5473{
75a70cf9 5474 gimple_stmt_iterator si;
5475 gimple stmt;
cb7f680b 5476 tree t;
5477
75a70cf9 5478 si = gsi_last_bb (load_bb);
5479 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
cb7f680b 5480
b9a16870 5481 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
414c3a2c 5482 t = build_call_expr (t, 0);
75a70cf9 5483 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
cb7f680b 5484
182cf5a9 5485 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
75a70cf9 5486 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5487 gsi_remove (&si, true);
cb7f680b 5488
75a70cf9 5489 si = gsi_last_bb (store_bb);
5490 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
cb7f680b 5491
182cf5a9 5492 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5493 stored_val);
75a70cf9 5494 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5495
b9a16870 5496 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
414c3a2c 5497 t = build_call_expr (t, 0);
75a70cf9 5498 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5499 gsi_remove (&si, true);
cb7f680b 5500
5501 if (gimple_in_ssa_p (cfun))
5502 update_ssa (TODO_update_ssa_no_phi);
5503 return true;
5504}
5505
48e1416a 5506/* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5507 using expand_omp_atomic_fetch_op. If it failed, we try to
cb7f680b 5508 call expand_omp_atomic_pipeline, and if it fails too, the
5509 ultimate fallback is wrapping the operation in a mutex
48e1416a 5510 (expand_omp_atomic_mutex). REGION is the atomic region built
5511 by build_omp_regions_1(). */
cb7f680b 5512
5513static void
5514expand_omp_atomic (struct omp_region *region)
5515{
5516 basic_block load_bb = region->entry, store_bb = region->exit;
75a70cf9 5517 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5518 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5519 tree addr = gimple_omp_atomic_load_rhs (load);
5520 tree stored_val = gimple_omp_atomic_store_val (store);
cb7f680b 5521 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5522 HOST_WIDE_INT index;
5523
5524 /* Make sure the type is one of the supported sizes. */
5525 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5526 index = exact_log2 (index);
5527 if (index >= 0 && index <= 4)
5528 {
5529 unsigned int align = TYPE_ALIGN_UNIT (type);
5530
5531 /* __sync builtins require strict data alignment. */
dcf7024c 5532 if (exact_log2 (align) >= index)
cb7f680b 5533 {
3ec11c49 5534 /* Atomic load. */
2169f33b 5535 if (loaded_val == stored_val
5536 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5537 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5538 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
3ec11c49 5539 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
2169f33b 5540 return;
5541
3ec11c49 5542 /* Atomic store. */
2169f33b 5543 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5544 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5545 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5546 && store_bb == single_succ (load_bb)
5547 && first_stmt (store_bb) == store
3ec11c49 5548 && expand_omp_atomic_store (load_bb, addr, loaded_val,
5549 stored_val, index))
2169f33b 5550 return;
5551
cb7f680b 5552 /* When possible, use specialized atomic update functions. */
5553 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
3ec11c49 5554 && store_bb == single_succ (load_bb)
5555 && expand_omp_atomic_fetch_op (load_bb, addr,
5556 loaded_val, stored_val, index))
5557 return;
cb7f680b 5558
5559 /* If we don't have specialized __sync builtins, try and implement
5560 as a compare and swap loop. */
5561 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5562 loaded_val, stored_val, index))
5563 return;
5564 }
5565 }
5566
5567 /* The ultimate fallback is wrapping the operation in a mutex. */
5568 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5569}
5570
1e8e9920 5571
773c5ba7 5572/* Expand the parallel region tree rooted at REGION. Expansion
5573 proceeds in depth-first order. Innermost regions are expanded
5574 first. This way, parallel regions that require a new function to
75a70cf9 5575 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
773c5ba7 5576 internal dependencies in their body. */
5577
5578static void
5579expand_omp (struct omp_region *region)
5580{
5581 while (region)
5582 {
1d22f541 5583 location_t saved_location;
5584
d1d5b012 5585 /* First, determine whether this is a combined parallel+workshare
5586 region. */
75a70cf9 5587 if (region->type == GIMPLE_OMP_PARALLEL)
d1d5b012 5588 determine_parallel_type (region);
5589
773c5ba7 5590 if (region->inner)
5591 expand_omp (region->inner);
5592
1d22f541 5593 saved_location = input_location;
75a70cf9 5594 if (gimple_has_location (last_stmt (region->entry)))
5595 input_location = gimple_location (last_stmt (region->entry));
1d22f541 5596
61e47ac8 5597 switch (region->type)
773c5ba7 5598 {
75a70cf9 5599 case GIMPLE_OMP_PARALLEL:
5600 case GIMPLE_OMP_TASK:
fd6481cf 5601 expand_omp_taskreg (region);
5602 break;
5603
75a70cf9 5604 case GIMPLE_OMP_FOR:
61e47ac8 5605 expand_omp_for (region);
5606 break;
773c5ba7 5607
75a70cf9 5608 case GIMPLE_OMP_SECTIONS:
61e47ac8 5609 expand_omp_sections (region);
5610 break;
773c5ba7 5611
75a70cf9 5612 case GIMPLE_OMP_SECTION:
61e47ac8 5613 /* Individual omp sections are handled together with their
75a70cf9 5614 parent GIMPLE_OMP_SECTIONS region. */
61e47ac8 5615 break;
773c5ba7 5616
75a70cf9 5617 case GIMPLE_OMP_SINGLE:
61e47ac8 5618 expand_omp_single (region);
5619 break;
773c5ba7 5620
75a70cf9 5621 case GIMPLE_OMP_MASTER:
5622 case GIMPLE_OMP_ORDERED:
5623 case GIMPLE_OMP_CRITICAL:
61e47ac8 5624 expand_omp_synch (region);
5625 break;
773c5ba7 5626
75a70cf9 5627 case GIMPLE_OMP_ATOMIC_LOAD:
cb7f680b 5628 expand_omp_atomic (region);
5629 break;
5630
61e47ac8 5631 default:
5632 gcc_unreachable ();
5633 }
cc5982dc 5634
1d22f541 5635 input_location = saved_location;
773c5ba7 5636 region = region->next;
5637 }
5638}
5639
5640
5641/* Helper for build_omp_regions. Scan the dominator tree starting at
28c92cbb 5642 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5643 true, the function ends once a single tree is built (otherwise, whole
5644 forest of OMP constructs may be built). */
773c5ba7 5645
5646static void
28c92cbb 5647build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5648 bool single_tree)
773c5ba7 5649{
75a70cf9 5650 gimple_stmt_iterator gsi;
5651 gimple stmt;
773c5ba7 5652 basic_block son;
5653
75a70cf9 5654 gsi = gsi_last_bb (bb);
5655 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
773c5ba7 5656 {
5657 struct omp_region *region;
75a70cf9 5658 enum gimple_code code;
773c5ba7 5659
75a70cf9 5660 stmt = gsi_stmt (gsi);
5661 code = gimple_code (stmt);
5662 if (code == GIMPLE_OMP_RETURN)
773c5ba7 5663 {
5664 /* STMT is the return point out of region PARENT. Mark it
5665 as the exit point and make PARENT the immediately
5666 enclosing region. */
5667 gcc_assert (parent);
5668 region = parent;
61e47ac8 5669 region->exit = bb;
773c5ba7 5670 parent = parent->outer;
773c5ba7 5671 }
75a70cf9 5672 else if (code == GIMPLE_OMP_ATOMIC_STORE)
cb7f680b 5673 {
75a70cf9 5674 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5675 GIMPLE_OMP_RETURN, but matches with
5676 GIMPLE_OMP_ATOMIC_LOAD. */
cb7f680b 5677 gcc_assert (parent);
75a70cf9 5678 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
cb7f680b 5679 region = parent;
5680 region->exit = bb;
5681 parent = parent->outer;
5682 }
5683
75a70cf9 5684 else if (code == GIMPLE_OMP_CONTINUE)
61e47ac8 5685 {
5686 gcc_assert (parent);
5687 parent->cont = bb;
5688 }
75a70cf9 5689 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
ac6e3339 5690 {
75a70cf9 5691 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5692 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5693 ;
ac6e3339 5694 }
773c5ba7 5695 else
5696 {
5697 /* Otherwise, this directive becomes the parent for a new
5698 region. */
61e47ac8 5699 region = new_omp_region (bb, code, parent);
773c5ba7 5700 parent = region;
5701 }
773c5ba7 5702 }
5703
28c92cbb 5704 if (single_tree && !parent)
5705 return;
5706
773c5ba7 5707 for (son = first_dom_son (CDI_DOMINATORS, bb);
5708 son;
5709 son = next_dom_son (CDI_DOMINATORS, son))
28c92cbb 5710 build_omp_regions_1 (son, parent, single_tree);
5711}
5712
5713/* Builds the tree of OMP regions rooted at ROOT, storing it to
5714 root_omp_region. */
5715
5716static void
5717build_omp_regions_root (basic_block root)
5718{
5719 gcc_assert (root_omp_region == NULL);
5720 build_omp_regions_1 (root, NULL, true);
5721 gcc_assert (root_omp_region != NULL);
773c5ba7 5722}
5723
28c92cbb 5724/* Expands omp construct (and its subconstructs) starting in HEAD. */
5725
5726void
5727omp_expand_local (basic_block head)
5728{
5729 build_omp_regions_root (head);
5730 if (dump_file && (dump_flags & TDF_DETAILS))
5731 {
5732 fprintf (dump_file, "\nOMP region tree\n\n");
5733 dump_omp_region (dump_file, root_omp_region, 0);
5734 fprintf (dump_file, "\n");
5735 }
5736
5737 remove_exit_barriers (root_omp_region);
5738 expand_omp (root_omp_region);
5739
5740 free_omp_regions ();
5741}
773c5ba7 5742
5743/* Scan the CFG and build a tree of OMP regions. Return the root of
5744 the OMP region tree. */
5745
5746static void
5747build_omp_regions (void)
5748{
61e47ac8 5749 gcc_assert (root_omp_region == NULL);
773c5ba7 5750 calculate_dominance_info (CDI_DOMINATORS);
28c92cbb 5751 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
773c5ba7 5752}
5753
773c5ba7 5754/* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5755
2a1990e9 5756static unsigned int
773c5ba7 5757execute_expand_omp (void)
5758{
5759 build_omp_regions ();
5760
61e47ac8 5761 if (!root_omp_region)
5762 return 0;
773c5ba7 5763
61e47ac8 5764 if (dump_file)
5765 {
5766 fprintf (dump_file, "\nOMP region tree\n\n");
5767 dump_omp_region (dump_file, root_omp_region, 0);
5768 fprintf (dump_file, "\n");
773c5ba7 5769 }
61e47ac8 5770
5771 remove_exit_barriers (root_omp_region);
5772
5773 expand_omp (root_omp_region);
5774
61e47ac8 5775 cleanup_tree_cfg ();
5776
5777 free_omp_regions ();
5778
2a1990e9 5779 return 0;
773c5ba7 5780}
5781
79acaae1 5782/* OMP expansion -- the default pass, run before creation of SSA form. */
5783
773c5ba7 5784static bool
5785gate_expand_omp (void)
5786{
852f689e 5787 return (flag_openmp != 0 && !seen_error ());
773c5ba7 5788}
5789
48e1416a 5790struct gimple_opt_pass pass_expand_omp =
773c5ba7 5791{
20099e35 5792 {
5793 GIMPLE_PASS,
773c5ba7 5794 "ompexp", /* name */
5795 gate_expand_omp, /* gate */
5796 execute_expand_omp, /* execute */
5797 NULL, /* sub */
5798 NULL, /* next */
5799 0, /* static_pass_number */
0b1615c1 5800 TV_NONE, /* tv_id */
773c5ba7 5801 PROP_gimple_any, /* properties_required */
41709826 5802 0, /* properties_provided */
773c5ba7 5803 0, /* properties_destroyed */
5804 0, /* todo_flags_start */
771e2890 5805 0 /* todo_flags_finish */
20099e35 5806 }
773c5ba7 5807};
5808\f
5809/* Routines to lower OpenMP directives into OMP-GIMPLE. */
5810
75a70cf9 5811/* Lower the OpenMP sections directive in the current statement in GSI_P.
5812 CTX is the enclosing OMP context for the current statement. */
773c5ba7 5813
5814static void
75a70cf9 5815lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 5816{
75a70cf9 5817 tree block, control;
5818 gimple_stmt_iterator tgsi;
75a70cf9 5819 gimple stmt, new_stmt, bind, t;
e3a19533 5820 gimple_seq ilist, dlist, olist, new_body;
dac18d1a 5821 struct gimplify_ctx gctx;
773c5ba7 5822
75a70cf9 5823 stmt = gsi_stmt (*gsi_p);
773c5ba7 5824
dac18d1a 5825 push_gimplify_context (&gctx);
773c5ba7 5826
5827 dlist = NULL;
5828 ilist = NULL;
75a70cf9 5829 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5830 &ilist, &dlist, ctx);
773c5ba7 5831
e3a19533 5832 new_body = gimple_omp_body (stmt);
5833 gimple_omp_set_body (stmt, NULL);
5834 tgsi = gsi_start (new_body);
5835 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
773c5ba7 5836 {
5837 omp_context *sctx;
75a70cf9 5838 gimple sec_start;
773c5ba7 5839
75a70cf9 5840 sec_start = gsi_stmt (tgsi);
773c5ba7 5841 sctx = maybe_lookup_ctx (sec_start);
5842 gcc_assert (sctx);
5843
e3a19533 5844 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
5845 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
5846 GSI_CONTINUE_LINKING);
75a70cf9 5847 gimple_omp_set_body (sec_start, NULL);
773c5ba7 5848
e3a19533 5849 if (gsi_one_before_end_p (tgsi))
773c5ba7 5850 {
75a70cf9 5851 gimple_seq l = NULL;
5852 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
773c5ba7 5853 &l, ctx);
e3a19533 5854 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
75a70cf9 5855 gimple_omp_section_set_last (sec_start);
773c5ba7 5856 }
48e1416a 5857
e3a19533 5858 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
5859 GSI_CONTINUE_LINKING);
773c5ba7 5860 }
1e8e9920 5861
5862 block = make_node (BLOCK);
e3a19533 5863 bind = gimple_build_bind (NULL, new_body, block);
1e8e9920 5864
75a70cf9 5865 olist = NULL;
5866 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
773c5ba7 5867
1d22f541 5868 block = make_node (BLOCK);
75a70cf9 5869 new_stmt = gimple_build_bind (NULL, NULL, block);
e3a19533 5870 gsi_replace (gsi_p, new_stmt, true);
773c5ba7 5871
1d22f541 5872 pop_gimplify_context (new_stmt);
75a70cf9 5873 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5874 BLOCK_VARS (block) = gimple_bind_vars (bind);
1d22f541 5875 if (BLOCK_VARS (block))
5876 TREE_USED (block) = 1;
5877
75a70cf9 5878 new_body = NULL;
5879 gimple_seq_add_seq (&new_body, ilist);
5880 gimple_seq_add_stmt (&new_body, stmt);
5881 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5882 gimple_seq_add_stmt (&new_body, bind);
61e47ac8 5883
ac6e3339 5884 control = create_tmp_var (unsigned_type_node, ".section");
75a70cf9 5885 t = gimple_build_omp_continue (control, control);
5886 gimple_omp_sections_set_control (stmt, control);
5887 gimple_seq_add_stmt (&new_body, t);
61e47ac8 5888
75a70cf9 5889 gimple_seq_add_seq (&new_body, olist);
5890 gimple_seq_add_seq (&new_body, dlist);
773c5ba7 5891
75a70cf9 5892 new_body = maybe_catch_exception (new_body);
aade31a0 5893
75a70cf9 5894 t = gimple_build_omp_return
5895 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5896 OMP_CLAUSE_NOWAIT));
5897 gimple_seq_add_stmt (&new_body, t);
61e47ac8 5898
75a70cf9 5899 gimple_bind_set_body (new_stmt, new_body);
1e8e9920 5900}
5901
5902
773c5ba7 5903/* A subroutine of lower_omp_single. Expand the simple form of
75a70cf9 5904 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
1e8e9920 5905
5906 if (GOMP_single_start ())
5907 BODY;
5908 [ GOMP_barrier (); ] -> unless 'nowait' is present.
773c5ba7 5909
5910 FIXME. It may be better to delay expanding the logic of this until
5911 pass_expand_omp. The expanded logic may make the job more difficult
5912 to a synchronization analysis pass. */
1e8e9920 5913
5914static void
75a70cf9 5915lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
1e8e9920 5916{
e60a6f7b 5917 location_t loc = gimple_location (single_stmt);
5918 tree tlabel = create_artificial_label (loc);
5919 tree flabel = create_artificial_label (loc);
75a70cf9 5920 gimple call, cond;
5921 tree lhs, decl;
5922
b9a16870 5923 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
75a70cf9 5924 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5925 call = gimple_build_call (decl, 0);
5926 gimple_call_set_lhs (call, lhs);
5927 gimple_seq_add_stmt (pre_p, call);
5928
5929 cond = gimple_build_cond (EQ_EXPR, lhs,
389dd41b 5930 fold_convert_loc (loc, TREE_TYPE (lhs),
5931 boolean_true_node),
75a70cf9 5932 tlabel, flabel);
5933 gimple_seq_add_stmt (pre_p, cond);
5934 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5935 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5936 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
1e8e9920 5937}
5938
773c5ba7 5939
5940/* A subroutine of lower_omp_single. Expand the simple form of
75a70cf9 5941 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
1e8e9920 5942
5943 #pragma omp single copyprivate (a, b, c)
5944
5945 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5946
5947 {
5948 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5949 {
5950 BODY;
5951 copyout.a = a;
5952 copyout.b = b;
5953 copyout.c = c;
5954 GOMP_single_copy_end (&copyout);
5955 }
5956 else
5957 {
5958 a = copyout_p->a;
5959 b = copyout_p->b;
5960 c = copyout_p->c;
5961 }
5962 GOMP_barrier ();
5963 }
773c5ba7 5964
5965 FIXME. It may be better to delay expanding the logic of this until
5966 pass_expand_omp. The expanded logic may make the job more difficult
5967 to a synchronization analysis pass. */
1e8e9920 5968
5969static void
75a70cf9 5970lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
1e8e9920 5971{
b9a16870 5972 tree ptr_type, t, l0, l1, l2, bfn_decl;
75a70cf9 5973 gimple_seq copyin_seq;
e60a6f7b 5974 location_t loc = gimple_location (single_stmt);
1e8e9920 5975
5976 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5977
5978 ptr_type = build_pointer_type (ctx->record_type);
5979 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5980
e60a6f7b 5981 l0 = create_artificial_label (loc);
5982 l1 = create_artificial_label (loc);
5983 l2 = create_artificial_label (loc);
1e8e9920 5984
b9a16870 5985 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
5986 t = build_call_expr_loc (loc, bfn_decl, 0);
389dd41b 5987 t = fold_convert_loc (loc, ptr_type, t);
75a70cf9 5988 gimplify_assign (ctx->receiver_decl, t, pre_p);
1e8e9920 5989
5990 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5991 build_int_cst (ptr_type, 0));
5992 t = build3 (COND_EXPR, void_type_node, t,
5993 build_and_jump (&l0), build_and_jump (&l1));
5994 gimplify_and_add (t, pre_p);
5995
75a70cf9 5996 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
1e8e9920 5997
75a70cf9 5998 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
1e8e9920 5999
6000 copyin_seq = NULL;
75a70cf9 6001 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
1e8e9920 6002 &copyin_seq, ctx);
6003
389dd41b 6004 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
b9a16870 6005 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
6006 t = build_call_expr_loc (loc, bfn_decl, 1, t);
1e8e9920 6007 gimplify_and_add (t, pre_p);
6008
6009 t = build_and_jump (&l2);
6010 gimplify_and_add (t, pre_p);
6011
75a70cf9 6012 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
1e8e9920 6013
75a70cf9 6014 gimple_seq_add_seq (pre_p, copyin_seq);
1e8e9920 6015
75a70cf9 6016 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
1e8e9920 6017}
6018
773c5ba7 6019
1e8e9920 6020/* Expand code for an OpenMP single directive. */
6021
6022static void
75a70cf9 6023lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 6024{
75a70cf9 6025 tree block;
6026 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
6027 gimple_seq bind_body, dlist;
dac18d1a 6028 struct gimplify_ctx gctx;
1e8e9920 6029
dac18d1a 6030 push_gimplify_context (&gctx);
1e8e9920 6031
e3a19533 6032 block = make_node (BLOCK);
6033 bind = gimple_build_bind (NULL, NULL, block);
6034 gsi_replace (gsi_p, bind, true);
75a70cf9 6035 bind_body = NULL;
e3a19533 6036 dlist = NULL;
75a70cf9 6037 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
6038 &bind_body, &dlist, ctx);
e3a19533 6039 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
1e8e9920 6040
75a70cf9 6041 gimple_seq_add_stmt (&bind_body, single_stmt);
1e8e9920 6042
6043 if (ctx->record_type)
75a70cf9 6044 lower_omp_single_copy (single_stmt, &bind_body, ctx);
1e8e9920 6045 else
75a70cf9 6046 lower_omp_single_simple (single_stmt, &bind_body);
6047
6048 gimple_omp_set_body (single_stmt, NULL);
1e8e9920 6049
75a70cf9 6050 gimple_seq_add_seq (&bind_body, dlist);
61e47ac8 6051
75a70cf9 6052 bind_body = maybe_catch_exception (bind_body);
61e47ac8 6053
48e1416a 6054 t = gimple_build_omp_return
75a70cf9 6055 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
6056 OMP_CLAUSE_NOWAIT));
6057 gimple_seq_add_stmt (&bind_body, t);
e3a19533 6058 gimple_bind_set_body (bind, bind_body);
61e47ac8 6059
1e8e9920 6060 pop_gimplify_context (bind);
773c5ba7 6061
75a70cf9 6062 gimple_bind_append_vars (bind, ctx->block_vars);
6063 BLOCK_VARS (block) = ctx->block_vars;
1d22f541 6064 if (BLOCK_VARS (block))
6065 TREE_USED (block) = 1;
1e8e9920 6066}
6067
773c5ba7 6068
1e8e9920 6069/* Expand code for an OpenMP master directive. */
6070
6071static void
75a70cf9 6072lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 6073{
b9a16870 6074 tree block, lab = NULL, x, bfn_decl;
75a70cf9 6075 gimple stmt = gsi_stmt (*gsi_p), bind;
389dd41b 6076 location_t loc = gimple_location (stmt);
75a70cf9 6077 gimple_seq tseq;
dac18d1a 6078 struct gimplify_ctx gctx;
1e8e9920 6079
dac18d1a 6080 push_gimplify_context (&gctx);
1e8e9920 6081
6082 block = make_node (BLOCK);
e3a19533 6083 bind = gimple_build_bind (NULL, NULL, block);
6084 gsi_replace (gsi_p, bind, true);
6085 gimple_bind_add_stmt (bind, stmt);
61e47ac8 6086
b9a16870 6087 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6088 x = build_call_expr_loc (loc, bfn_decl, 0);
1e8e9920 6089 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
6090 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
75a70cf9 6091 tseq = NULL;
6092 gimplify_and_add (x, &tseq);
6093 gimple_bind_add_seq (bind, tseq);
1e8e9920 6094
e3a19533 6095 lower_omp (gimple_omp_body_ptr (stmt), ctx);
75a70cf9 6096 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6097 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6098 gimple_omp_set_body (stmt, NULL);
1e8e9920 6099
75a70cf9 6100 gimple_bind_add_stmt (bind, gimple_build_label (lab));
61e47ac8 6101
75a70cf9 6102 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
61e47ac8 6103
1e8e9920 6104 pop_gimplify_context (bind);
773c5ba7 6105
75a70cf9 6106 gimple_bind_append_vars (bind, ctx->block_vars);
6107 BLOCK_VARS (block) = ctx->block_vars;
1e8e9920 6108}
6109
773c5ba7 6110
1e8e9920 6111/* Expand code for an OpenMP ordered directive. */
6112
6113static void
75a70cf9 6114lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 6115{
75a70cf9 6116 tree block;
6117 gimple stmt = gsi_stmt (*gsi_p), bind, x;
dac18d1a 6118 struct gimplify_ctx gctx;
1e8e9920 6119
dac18d1a 6120 push_gimplify_context (&gctx);
1e8e9920 6121
6122 block = make_node (BLOCK);
e3a19533 6123 bind = gimple_build_bind (NULL, NULL, block);
6124 gsi_replace (gsi_p, bind, true);
6125 gimple_bind_add_stmt (bind, stmt);
61e47ac8 6126
b9a16870 6127 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
6128 0);
75a70cf9 6129 gimple_bind_add_stmt (bind, x);
1e8e9920 6130
e3a19533 6131 lower_omp (gimple_omp_body_ptr (stmt), ctx);
75a70cf9 6132 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6133 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6134 gimple_omp_set_body (stmt, NULL);
1e8e9920 6135
b9a16870 6136 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
75a70cf9 6137 gimple_bind_add_stmt (bind, x);
61e47ac8 6138
75a70cf9 6139 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
61e47ac8 6140
1e8e9920 6141 pop_gimplify_context (bind);
773c5ba7 6142
75a70cf9 6143 gimple_bind_append_vars (bind, ctx->block_vars);
6144 BLOCK_VARS (block) = gimple_bind_vars (bind);
1e8e9920 6145}
6146
1e8e9920 6147
75a70cf9 6148/* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
1e8e9920 6149 substitution of a couple of function calls. But in the NAMED case,
6150 requires that languages coordinate a symbol name. It is therefore
6151 best put here in common code. */
6152
6153static GTY((param1_is (tree), param2_is (tree)))
6154 splay_tree critical_name_mutexes;
6155
6156static void
75a70cf9 6157lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 6158{
75a70cf9 6159 tree block;
6160 tree name, lock, unlock;
6161 gimple stmt = gsi_stmt (*gsi_p), bind;
389dd41b 6162 location_t loc = gimple_location (stmt);
75a70cf9 6163 gimple_seq tbody;
dac18d1a 6164 struct gimplify_ctx gctx;
1e8e9920 6165
75a70cf9 6166 name = gimple_omp_critical_name (stmt);
1e8e9920 6167 if (name)
6168 {
c2f47e15 6169 tree decl;
1e8e9920 6170 splay_tree_node n;
6171
6172 if (!critical_name_mutexes)
6173 critical_name_mutexes
ba72912a 6174 = splay_tree_new_ggc (splay_tree_compare_pointers,
6175 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
6176 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
1e8e9920 6177
6178 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
6179 if (n == NULL)
6180 {
6181 char *new_str;
6182
6183 decl = create_tmp_var_raw (ptr_type_node, NULL);
6184
6185 new_str = ACONCAT ((".gomp_critical_user_",
6186 IDENTIFIER_POINTER (name), NULL));
6187 DECL_NAME (decl) = get_identifier (new_str);
6188 TREE_PUBLIC (decl) = 1;
6189 TREE_STATIC (decl) = 1;
6190 DECL_COMMON (decl) = 1;
6191 DECL_ARTIFICIAL (decl) = 1;
6192 DECL_IGNORED_P (decl) = 1;
1d416bd7 6193 varpool_finalize_decl (decl);
1e8e9920 6194
6195 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
6196 (splay_tree_value) decl);
6197 }
6198 else
6199 decl = (tree) n->value;
6200
b9a16870 6201 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
389dd41b 6202 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
1e8e9920 6203
b9a16870 6204 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
389dd41b 6205 unlock = build_call_expr_loc (loc, unlock, 1,
6206 build_fold_addr_expr_loc (loc, decl));
1e8e9920 6207 }
6208 else
6209 {
b9a16870 6210 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
389dd41b 6211 lock = build_call_expr_loc (loc, lock, 0);
1e8e9920 6212
b9a16870 6213 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
389dd41b 6214 unlock = build_call_expr_loc (loc, unlock, 0);
1e8e9920 6215 }
6216
dac18d1a 6217 push_gimplify_context (&gctx);
1e8e9920 6218
6219 block = make_node (BLOCK);
e3a19533 6220 bind = gimple_build_bind (NULL, NULL, block);
6221 gsi_replace (gsi_p, bind, true);
6222 gimple_bind_add_stmt (bind, stmt);
61e47ac8 6223
75a70cf9 6224 tbody = gimple_bind_body (bind);
6225 gimplify_and_add (lock, &tbody);
6226 gimple_bind_set_body (bind, tbody);
1e8e9920 6227
e3a19533 6228 lower_omp (gimple_omp_body_ptr (stmt), ctx);
75a70cf9 6229 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6230 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6231 gimple_omp_set_body (stmt, NULL);
1e8e9920 6232
75a70cf9 6233 tbody = gimple_bind_body (bind);
6234 gimplify_and_add (unlock, &tbody);
6235 gimple_bind_set_body (bind, tbody);
61e47ac8 6236
75a70cf9 6237 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
1e8e9920 6238
6239 pop_gimplify_context (bind);
75a70cf9 6240 gimple_bind_append_vars (bind, ctx->block_vars);
6241 BLOCK_VARS (block) = gimple_bind_vars (bind);
773c5ba7 6242}
6243
6244
6245/* A subroutine of lower_omp_for. Generate code to emit the predicate
6246 for a lastprivate clause. Given a loop control predicate of (V
6247 cond N2), we gate the clause on (!(V cond N2)). The lowered form
1e4afe3c 6248 is appended to *DLIST, iterator initialization is appended to
6249 *BODY_P. */
773c5ba7 6250
6251static void
75a70cf9 6252lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6253 gimple_seq *dlist, struct omp_context *ctx)
773c5ba7 6254{
75a70cf9 6255 tree clauses, cond, vinit;
773c5ba7 6256 enum tree_code cond_code;
75a70cf9 6257 gimple_seq stmts;
48e1416a 6258
fd6481cf 6259 cond_code = fd->loop.cond_code;
773c5ba7 6260 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6261
6262 /* When possible, use a strict equality expression. This can let VRP
6263 type optimizations deduce the value and remove a copy. */
fd6481cf 6264 if (host_integerp (fd->loop.step, 0))
773c5ba7 6265 {
fd6481cf 6266 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
773c5ba7 6267 if (step == 1 || step == -1)
6268 cond_code = EQ_EXPR;
6269 }
6270
fd6481cf 6271 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
773c5ba7 6272
75a70cf9 6273 clauses = gimple_omp_for_clauses (fd->for_stmt);
1e4afe3c 6274 stmts = NULL;
6275 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
75a70cf9 6276 if (!gimple_seq_empty_p (stmts))
1e4afe3c 6277 {
75a70cf9 6278 gimple_seq_add_seq (&stmts, *dlist);
fd6481cf 6279 *dlist = stmts;
1e4afe3c 6280
6281 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
fd6481cf 6282 vinit = fd->loop.n1;
1e4afe3c 6283 if (cond_code == EQ_EXPR
fd6481cf 6284 && host_integerp (fd->loop.n2, 0)
6285 && ! integer_zerop (fd->loop.n2))
6286 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
1e4afe3c 6287
6288 /* Initialize the iterator variable, so that threads that don't execute
6289 any iterations don't execute the lastprivate clauses by accident. */
75a70cf9 6290 gimplify_assign (fd->loop.v, vinit, body_p);
1e4afe3c 6291 }
773c5ba7 6292}
6293
6294
6295/* Lower code for an OpenMP loop directive. */
6296
6297static void
75a70cf9 6298lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 6299{
75a70cf9 6300 tree *rhs_p, block;
773c5ba7 6301 struct omp_for_data fd;
75a70cf9 6302 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
f018d957 6303 gimple_seq omp_for_body, body, dlist;
75a70cf9 6304 size_t i;
dac18d1a 6305 struct gimplify_ctx gctx;
773c5ba7 6306
dac18d1a 6307 push_gimplify_context (&gctx);
773c5ba7 6308
e3a19533 6309 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
6310 lower_omp (gimple_omp_body_ptr (stmt), ctx);
773c5ba7 6311
1d22f541 6312 block = make_node (BLOCK);
75a70cf9 6313 new_stmt = gimple_build_bind (NULL, NULL, block);
e3a19533 6314 /* Replace at gsi right away, so that 'stmt' is no member
6315 of a sequence anymore as we're going to add to to a different
6316 one below. */
6317 gsi_replace (gsi_p, new_stmt, true);
1d22f541 6318
773c5ba7 6319 /* Move declaration of temporaries in the loop body before we make
6320 it go away. */
75a70cf9 6321 omp_for_body = gimple_omp_body (stmt);
6322 if (!gimple_seq_empty_p (omp_for_body)
6323 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6324 {
6325 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6326 gimple_bind_append_vars (new_stmt, vars);
6327 }
773c5ba7 6328
75a70cf9 6329 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
773c5ba7 6330 dlist = NULL;
75a70cf9 6331 body = NULL;
6332 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6333 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
773c5ba7 6334
6335 /* Lower the header expressions. At this point, we can assume that
6336 the header is of the form:
6337
6338 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6339
6340 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6341 using the .omp_data_s mapping, if needed. */
75a70cf9 6342 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
fd6481cf 6343 {
75a70cf9 6344 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
fd6481cf 6345 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 6346 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 6347
75a70cf9 6348 rhs_p = gimple_omp_for_final_ptr (stmt, i);
fd6481cf 6349 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 6350 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 6351
75a70cf9 6352 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
fd6481cf 6353 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 6354 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 6355 }
773c5ba7 6356
6357 /* Once lowered, extract the bounds and clauses. */
fd6481cf 6358 extract_omp_for_data (stmt, &fd, NULL);
773c5ba7 6359
75a70cf9 6360 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
773c5ba7 6361
75a70cf9 6362 gimple_seq_add_stmt (&body, stmt);
6363 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
61e47ac8 6364
75a70cf9 6365 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6366 fd.loop.v));
61e47ac8 6367
773c5ba7 6368 /* After the loop, add exit clauses. */
75a70cf9 6369 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6370 gimple_seq_add_seq (&body, dlist);
773c5ba7 6371
75a70cf9 6372 body = maybe_catch_exception (body);
aade31a0 6373
61e47ac8 6374 /* Region exit marker goes at the end of the loop body. */
75a70cf9 6375 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
773c5ba7 6376
1d22f541 6377 pop_gimplify_context (new_stmt);
75a70cf9 6378
6379 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6380 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
1d22f541 6381 if (BLOCK_VARS (block))
6382 TREE_USED (block) = 1;
773c5ba7 6383
75a70cf9 6384 gimple_bind_set_body (new_stmt, body);
6385 gimple_omp_set_body (stmt, NULL);
6386 gimple_omp_for_set_pre_body (stmt, NULL);
1e8e9920 6387}
6388
48e1416a 6389/* Callback for walk_stmts. Check if the current statement only contains
75a70cf9 6390 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
de7ef844 6391
6392static tree
75a70cf9 6393check_combined_parallel (gimple_stmt_iterator *gsi_p,
6394 bool *handled_ops_p,
6395 struct walk_stmt_info *wi)
de7ef844 6396{
4077bf7a 6397 int *info = (int *) wi->info;
75a70cf9 6398 gimple stmt = gsi_stmt (*gsi_p);
de7ef844 6399
75a70cf9 6400 *handled_ops_p = true;
6401 switch (gimple_code (stmt))
de7ef844 6402 {
75a70cf9 6403 WALK_SUBSTMTS;
6404
6405 case GIMPLE_OMP_FOR:
6406 case GIMPLE_OMP_SECTIONS:
de7ef844 6407 *info = *info == 0 ? 1 : -1;
6408 break;
6409 default:
6410 *info = -1;
6411 break;
6412 }
6413 return NULL;
6414}
773c5ba7 6415
fd6481cf 6416struct omp_taskcopy_context
6417{
6418 /* This field must be at the beginning, as we do "inheritance": Some
6419 callback functions for tree-inline.c (e.g., omp_copy_decl)
6420 receive a copy_body_data pointer that is up-casted to an
6421 omp_context pointer. */
6422 copy_body_data cb;
6423 omp_context *ctx;
6424};
6425
6426static tree
6427task_copyfn_copy_decl (tree var, copy_body_data *cb)
6428{
6429 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6430
6431 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6432 return create_tmp_var (TREE_TYPE (var), NULL);
6433
6434 return var;
6435}
6436
6437static tree
6438task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6439{
6440 tree name, new_fields = NULL, type, f;
6441
6442 type = lang_hooks.types.make_type (RECORD_TYPE);
6443 name = DECL_NAME (TYPE_NAME (orig_type));
e60a6f7b 6444 name = build_decl (gimple_location (tcctx->ctx->stmt),
6445 TYPE_DECL, name, type);
fd6481cf 6446 TYPE_NAME (type) = name;
6447
6448 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6449 {
6450 tree new_f = copy_node (f);
6451 DECL_CONTEXT (new_f) = type;
6452 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6453 TREE_CHAIN (new_f) = new_fields;
75a70cf9 6454 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6455 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6456 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6457 &tcctx->cb, NULL);
fd6481cf 6458 new_fields = new_f;
6459 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6460 }
6461 TYPE_FIELDS (type) = nreverse (new_fields);
6462 layout_type (type);
6463 return type;
6464}
6465
6466/* Create task copyfn. */
6467
6468static void
75a70cf9 6469create_task_copyfn (gimple task_stmt, omp_context *ctx)
fd6481cf 6470{
6471 struct function *child_cfun;
6472 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6473 tree record_type, srecord_type, bind, list;
6474 bool record_needs_remap = false, srecord_needs_remap = false;
6475 splay_tree_node n;
6476 struct omp_taskcopy_context tcctx;
dac18d1a 6477 struct gimplify_ctx gctx;
389dd41b 6478 location_t loc = gimple_location (task_stmt);
fd6481cf 6479
75a70cf9 6480 child_fn = gimple_omp_task_copy_fn (task_stmt);
fd6481cf 6481 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6482 gcc_assert (child_cfun->cfg == NULL);
fd6481cf 6483 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6484
6485 /* Reset DECL_CONTEXT on function arguments. */
1767a056 6486 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
fd6481cf 6487 DECL_CONTEXT (t) = child_fn;
6488
6489 /* Populate the function. */
dac18d1a 6490 push_gimplify_context (&gctx);
fd6481cf 6491 current_function_decl = child_fn;
6492
6493 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6494 TREE_SIDE_EFFECTS (bind) = 1;
6495 list = NULL;
6496 DECL_SAVED_TREE (child_fn) = bind;
75a70cf9 6497 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
fd6481cf 6498
6499 /* Remap src and dst argument types if needed. */
6500 record_type = ctx->record_type;
6501 srecord_type = ctx->srecord_type;
1767a056 6502 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
fd6481cf 6503 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6504 {
6505 record_needs_remap = true;
6506 break;
6507 }
1767a056 6508 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
fd6481cf 6509 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6510 {
6511 srecord_needs_remap = true;
6512 break;
6513 }
6514
6515 if (record_needs_remap || srecord_needs_remap)
6516 {
6517 memset (&tcctx, '\0', sizeof (tcctx));
6518 tcctx.cb.src_fn = ctx->cb.src_fn;
6519 tcctx.cb.dst_fn = child_fn;
53f79206 6520 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6521 gcc_checking_assert (tcctx.cb.src_node);
fd6481cf 6522 tcctx.cb.dst_node = tcctx.cb.src_node;
6523 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6524 tcctx.cb.copy_decl = task_copyfn_copy_decl;
e38def9c 6525 tcctx.cb.eh_lp_nr = 0;
fd6481cf 6526 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6527 tcctx.cb.decl_map = pointer_map_create ();
6528 tcctx.ctx = ctx;
6529
6530 if (record_needs_remap)
6531 record_type = task_copyfn_remap_type (&tcctx, record_type);
6532 if (srecord_needs_remap)
6533 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6534 }
6535 else
6536 tcctx.cb.decl_map = NULL;
6537
6538 push_cfun (child_cfun);
6539
6540 arg = DECL_ARGUMENTS (child_fn);
6541 TREE_TYPE (arg) = build_pointer_type (record_type);
1767a056 6542 sarg = DECL_CHAIN (arg);
fd6481cf 6543 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6544
6545 /* First pass: initialize temporaries used in record_type and srecord_type
6546 sizes and field offsets. */
6547 if (tcctx.cb.decl_map)
75a70cf9 6548 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 6549 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6550 {
6551 tree *p;
6552
6553 decl = OMP_CLAUSE_DECL (c);
6554 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6555 if (p == NULL)
6556 continue;
6557 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6558 sf = (tree) n->value;
6559 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 6560 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 6561 src = omp_build_component_ref (src, sf);
75a70cf9 6562 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
fd6481cf 6563 append_to_statement_list (t, &list);
6564 }
6565
6566 /* Second pass: copy shared var pointers and copy construct non-VLA
6567 firstprivate vars. */
75a70cf9 6568 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 6569 switch (OMP_CLAUSE_CODE (c))
6570 {
6571 case OMP_CLAUSE_SHARED:
6572 decl = OMP_CLAUSE_DECL (c);
6573 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6574 if (n == NULL)
6575 break;
6576 f = (tree) n->value;
6577 if (tcctx.cb.decl_map)
6578 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6579 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6580 sf = (tree) n->value;
6581 if (tcctx.cb.decl_map)
6582 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 6583 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 6584 src = omp_build_component_ref (src, sf);
182cf5a9 6585 dst = build_simple_mem_ref_loc (loc, arg);
445d06b6 6586 dst = omp_build_component_ref (dst, f);
75a70cf9 6587 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
fd6481cf 6588 append_to_statement_list (t, &list);
6589 break;
6590 case OMP_CLAUSE_FIRSTPRIVATE:
6591 decl = OMP_CLAUSE_DECL (c);
6592 if (is_variable_sized (decl))
6593 break;
6594 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6595 if (n == NULL)
6596 break;
6597 f = (tree) n->value;
6598 if (tcctx.cb.decl_map)
6599 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6600 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6601 if (n != NULL)
6602 {
6603 sf = (tree) n->value;
6604 if (tcctx.cb.decl_map)
6605 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 6606 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 6607 src = omp_build_component_ref (src, sf);
fd6481cf 6608 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
182cf5a9 6609 src = build_simple_mem_ref_loc (loc, src);
fd6481cf 6610 }
6611 else
6612 src = decl;
182cf5a9 6613 dst = build_simple_mem_ref_loc (loc, arg);
445d06b6 6614 dst = omp_build_component_ref (dst, f);
fd6481cf 6615 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6616 append_to_statement_list (t, &list);
6617 break;
6618 case OMP_CLAUSE_PRIVATE:
6619 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6620 break;
6621 decl = OMP_CLAUSE_DECL (c);
6622 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6623 f = (tree) n->value;
6624 if (tcctx.cb.decl_map)
6625 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6626 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6627 if (n != NULL)
6628 {
6629 sf = (tree) n->value;
6630 if (tcctx.cb.decl_map)
6631 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 6632 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 6633 src = omp_build_component_ref (src, sf);
fd6481cf 6634 if (use_pointer_for_field (decl, NULL))
182cf5a9 6635 src = build_simple_mem_ref_loc (loc, src);
fd6481cf 6636 }
6637 else
6638 src = decl;
182cf5a9 6639 dst = build_simple_mem_ref_loc (loc, arg);
445d06b6 6640 dst = omp_build_component_ref (dst, f);
75a70cf9 6641 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
fd6481cf 6642 append_to_statement_list (t, &list);
6643 break;
6644 default:
6645 break;
6646 }
6647
6648 /* Last pass: handle VLA firstprivates. */
6649 if (tcctx.cb.decl_map)
75a70cf9 6650 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 6651 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6652 {
6653 tree ind, ptr, df;
6654
6655 decl = OMP_CLAUSE_DECL (c);
6656 if (!is_variable_sized (decl))
6657 continue;
6658 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6659 if (n == NULL)
6660 continue;
6661 f = (tree) n->value;
6662 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6663 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6664 ind = DECL_VALUE_EXPR (decl);
6665 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6666 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6667 n = splay_tree_lookup (ctx->sfield_map,
6668 (splay_tree_key) TREE_OPERAND (ind, 0));
6669 sf = (tree) n->value;
6670 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 6671 src = build_simple_mem_ref_loc (loc, sarg);
445d06b6 6672 src = omp_build_component_ref (src, sf);
182cf5a9 6673 src = build_simple_mem_ref_loc (loc, src);
6674 dst = build_simple_mem_ref_loc (loc, arg);
445d06b6 6675 dst = omp_build_component_ref (dst, f);
fd6481cf 6676 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6677 append_to_statement_list (t, &list);
6678 n = splay_tree_lookup (ctx->field_map,
6679 (splay_tree_key) TREE_OPERAND (ind, 0));
6680 df = (tree) n->value;
6681 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
182cf5a9 6682 ptr = build_simple_mem_ref_loc (loc, arg);
445d06b6 6683 ptr = omp_build_component_ref (ptr, df);
75a70cf9 6684 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
389dd41b 6685 build_fold_addr_expr_loc (loc, dst));
fd6481cf 6686 append_to_statement_list (t, &list);
6687 }
6688
6689 t = build1 (RETURN_EXPR, void_type_node, NULL);
6690 append_to_statement_list (t, &list);
6691
6692 if (tcctx.cb.decl_map)
6693 pointer_map_destroy (tcctx.cb.decl_map);
6694 pop_gimplify_context (NULL);
6695 BIND_EXPR_BODY (bind) = list;
6696 pop_cfun ();
6697 current_function_decl = ctx->cb.src_fn;
6698}
6699
75a70cf9 6700/* Lower the OpenMP parallel or task directive in the current statement
6701 in GSI_P. CTX holds context information for the directive. */
773c5ba7 6702
6703static void
75a70cf9 6704lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 6705{
75a70cf9 6706 tree clauses;
6707 tree child_fn, t;
6708 gimple stmt = gsi_stmt (*gsi_p);
6709 gimple par_bind, bind;
6710 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
dac18d1a 6711 struct gimplify_ctx gctx;
389dd41b 6712 location_t loc = gimple_location (stmt);
773c5ba7 6713
75a70cf9 6714 clauses = gimple_omp_taskreg_clauses (stmt);
6715 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6716 par_body = gimple_bind_body (par_bind);
773c5ba7 6717 child_fn = ctx->cb.dst_fn;
75a70cf9 6718 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6719 && !gimple_omp_parallel_combined_p (stmt))
de7ef844 6720 {
6721 struct walk_stmt_info wi;
6722 int ws_num = 0;
6723
6724 memset (&wi, 0, sizeof (wi));
de7ef844 6725 wi.info = &ws_num;
6726 wi.val_only = true;
75a70cf9 6727 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
de7ef844 6728 if (ws_num == 1)
75a70cf9 6729 gimple_omp_parallel_set_combined_p (stmt, true);
de7ef844 6730 }
fd6481cf 6731 if (ctx->srecord_type)
6732 create_task_copyfn (stmt, ctx);
773c5ba7 6733
dac18d1a 6734 push_gimplify_context (&gctx);
773c5ba7 6735
75a70cf9 6736 par_olist = NULL;
6737 par_ilist = NULL;
773c5ba7 6738 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
e3a19533 6739 lower_omp (&par_body, ctx);
75a70cf9 6740 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
fd6481cf 6741 lower_reduction_clauses (clauses, &par_olist, ctx);
773c5ba7 6742
6743 /* Declare all the variables created by mapping and the variables
6744 declared in the scope of the parallel body. */
6745 record_vars_into (ctx->block_vars, child_fn);
75a70cf9 6746 record_vars_into (gimple_bind_vars (par_bind), child_fn);
773c5ba7 6747
6748 if (ctx->record_type)
6749 {
fd6481cf 6750 ctx->sender_decl
6751 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6752 : ctx->record_type, ".omp_data_o");
84bfaaeb 6753 DECL_NAMELESS (ctx->sender_decl) = 1;
86f2ad37 6754 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
75a70cf9 6755 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
773c5ba7 6756 }
6757
75a70cf9 6758 olist = NULL;
6759 ilist = NULL;
773c5ba7 6760 lower_send_clauses (clauses, &ilist, &olist, ctx);
6761 lower_send_shared_vars (&ilist, &olist, ctx);
6762
6763 /* Once all the expansions are done, sequence all the different
75a70cf9 6764 fragments inside gimple_omp_body. */
773c5ba7 6765
75a70cf9 6766 new_body = NULL;
773c5ba7 6767
6768 if (ctx->record_type)
6769 {
389dd41b 6770 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
cc6b725b 6771 /* fixup_child_record_type might have changed receiver_decl's type. */
389dd41b 6772 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
75a70cf9 6773 gimple_seq_add_stmt (&new_body,
6774 gimple_build_assign (ctx->receiver_decl, t));
773c5ba7 6775 }
6776
75a70cf9 6777 gimple_seq_add_seq (&new_body, par_ilist);
6778 gimple_seq_add_seq (&new_body, par_body);
6779 gimple_seq_add_seq (&new_body, par_olist);
6780 new_body = maybe_catch_exception (new_body);
6781 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6782 gimple_omp_set_body (stmt, new_body);
773c5ba7 6783
75a70cf9 6784 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
75a70cf9 6785 gsi_replace (gsi_p, bind, true);
e3a19533 6786 gimple_bind_add_seq (bind, ilist);
6787 gimple_bind_add_stmt (bind, stmt);
6788 gimple_bind_add_seq (bind, olist);
773c5ba7 6789
75a70cf9 6790 pop_gimplify_context (NULL);
773c5ba7 6791}
6792
a4890dc9 6793/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
75a70cf9 6794 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6795 of OpenMP context, but with task_shared_vars set. */
46515c92 6796
6797static tree
75a70cf9 6798lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6799 void *data)
46515c92 6800{
a4890dc9 6801 tree t = *tp;
46515c92 6802
a4890dc9 6803 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
75a70cf9 6804 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
9f49e155 6805 return t;
6806
6807 if (task_shared_vars
6808 && DECL_P (t)
6809 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
a4890dc9 6810 return t;
46515c92 6811
a4890dc9 6812 /* If a global variable has been privatized, TREE_CONSTANT on
6813 ADDR_EXPR might be wrong. */
75a70cf9 6814 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
a4890dc9 6815 recompute_tree_invariant_for_addr_expr (t);
46515c92 6816
a4890dc9 6817 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6818 return NULL_TREE;
46515c92 6819}
773c5ba7 6820
a4890dc9 6821static void
75a70cf9 6822lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 6823{
75a70cf9 6824 gimple stmt = gsi_stmt (*gsi_p);
6825 struct walk_stmt_info wi;
1e8e9920 6826
75a70cf9 6827 if (gimple_has_location (stmt))
6828 input_location = gimple_location (stmt);
a4890dc9 6829
75a70cf9 6830 if (task_shared_vars)
6831 memset (&wi, '\0', sizeof (wi));
a4890dc9 6832
773c5ba7 6833 /* If we have issued syntax errors, avoid doing any heavy lifting.
6834 Just replace the OpenMP directives with a NOP to avoid
6835 confusing RTL expansion. */
852f689e 6836 if (seen_error () && is_gimple_omp (stmt))
773c5ba7 6837 {
75a70cf9 6838 gsi_replace (gsi_p, gimple_build_nop (), true);
a4890dc9 6839 return;
773c5ba7 6840 }
6841
75a70cf9 6842 switch (gimple_code (stmt))
1e8e9920 6843 {
75a70cf9 6844 case GIMPLE_COND:
fd6481cf 6845 if ((ctx || task_shared_vars)
75a70cf9 6846 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6847 ctx ? NULL : &wi, NULL)
6848 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6849 ctx ? NULL : &wi, NULL)))
6850 gimple_regimplify_operands (stmt, gsi_p);
a4890dc9 6851 break;
75a70cf9 6852 case GIMPLE_CATCH:
e3a19533 6853 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
a4890dc9 6854 break;
75a70cf9 6855 case GIMPLE_EH_FILTER:
e3a19533 6856 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
a4890dc9 6857 break;
75a70cf9 6858 case GIMPLE_TRY:
e3a19533 6859 lower_omp (gimple_try_eval_ptr (stmt), ctx);
6860 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
a4890dc9 6861 break;
75a70cf9 6862 case GIMPLE_BIND:
e3a19533 6863 lower_omp (gimple_bind_body_ptr (stmt), ctx);
a4890dc9 6864 break;
75a70cf9 6865 case GIMPLE_OMP_PARALLEL:
6866 case GIMPLE_OMP_TASK:
6867 ctx = maybe_lookup_ctx (stmt);
6868 lower_omp_taskreg (gsi_p, ctx);
a4890dc9 6869 break;
75a70cf9 6870 case GIMPLE_OMP_FOR:
6871 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6872 gcc_assert (ctx);
75a70cf9 6873 lower_omp_for (gsi_p, ctx);
1e8e9920 6874 break;
75a70cf9 6875 case GIMPLE_OMP_SECTIONS:
6876 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6877 gcc_assert (ctx);
75a70cf9 6878 lower_omp_sections (gsi_p, ctx);
1e8e9920 6879 break;
75a70cf9 6880 case GIMPLE_OMP_SINGLE:
6881 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6882 gcc_assert (ctx);
75a70cf9 6883 lower_omp_single (gsi_p, ctx);
1e8e9920 6884 break;
75a70cf9 6885 case GIMPLE_OMP_MASTER:
6886 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6887 gcc_assert (ctx);
75a70cf9 6888 lower_omp_master (gsi_p, ctx);
1e8e9920 6889 break;
75a70cf9 6890 case GIMPLE_OMP_ORDERED:
6891 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6892 gcc_assert (ctx);
75a70cf9 6893 lower_omp_ordered (gsi_p, ctx);
1e8e9920 6894 break;
75a70cf9 6895 case GIMPLE_OMP_CRITICAL:
6896 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6897 gcc_assert (ctx);
75a70cf9 6898 lower_omp_critical (gsi_p, ctx);
6899 break;
6900 case GIMPLE_OMP_ATOMIC_LOAD:
6901 if ((ctx || task_shared_vars)
6902 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6903 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6904 gimple_regimplify_operands (stmt, gsi_p);
1e8e9920 6905 break;
a4890dc9 6906 default:
fd6481cf 6907 if ((ctx || task_shared_vars)
75a70cf9 6908 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6909 ctx ? NULL : &wi))
6910 gimple_regimplify_operands (stmt, gsi_p);
1e8e9920 6911 break;
1e8e9920 6912 }
1e8e9920 6913}
6914
6915static void
e3a19533 6916lower_omp (gimple_seq *body, omp_context *ctx)
1e8e9920 6917{
1d22f541 6918 location_t saved_location = input_location;
e3a19533 6919 gimple_stmt_iterator gsi;
6920 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
75a70cf9 6921 lower_omp_1 (&gsi, ctx);
1d22f541 6922 input_location = saved_location;
1e8e9920 6923}
6924\f
6925/* Main entry point. */
6926
2a1990e9 6927static unsigned int
1e8e9920 6928execute_lower_omp (void)
6929{
75a70cf9 6930 gimple_seq body;
6931
41709826 6932 /* This pass always runs, to provide PROP_gimple_lomp.
6933 But there is nothing to do unless -fopenmp is given. */
6934 if (flag_openmp == 0)
6935 return 0;
6936
1e8e9920 6937 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6938 delete_omp_context);
6939
75a70cf9 6940 body = gimple_body (current_function_decl);
ab129075 6941 scan_omp (&body, NULL);
fd6481cf 6942 gcc_assert (taskreg_nesting_level == 0);
1e8e9920 6943
6944 if (all_contexts->root)
fd6481cf 6945 {
dac18d1a 6946 struct gimplify_ctx gctx;
6947
fd6481cf 6948 if (task_shared_vars)
dac18d1a 6949 push_gimplify_context (&gctx);
e3a19533 6950 lower_omp (&body, NULL);
fd6481cf 6951 if (task_shared_vars)
6952 pop_gimplify_context (NULL);
6953 }
1e8e9920 6954
773c5ba7 6955 if (all_contexts)
6956 {
6957 splay_tree_delete (all_contexts);
6958 all_contexts = NULL;
6959 }
fd6481cf 6960 BITMAP_FREE (task_shared_vars);
2a1990e9 6961 return 0;
1e8e9920 6962}
6963
48e1416a 6964struct gimple_opt_pass pass_lower_omp =
1e8e9920 6965{
20099e35 6966 {
6967 GIMPLE_PASS,
1e8e9920 6968 "omplower", /* name */
41709826 6969 NULL, /* gate */
1e8e9920 6970 execute_lower_omp, /* execute */
6971 NULL, /* sub */
6972 NULL, /* next */
6973 0, /* static_pass_number */
0b1615c1 6974 TV_NONE, /* tv_id */
1e8e9920 6975 PROP_gimple_any, /* properties_required */
6976 PROP_gimple_lomp, /* properties_provided */
6977 0, /* properties_destroyed */
6978 0, /* todo_flags_start */
771e2890 6979 0 /* todo_flags_finish */
20099e35 6980 }
1e8e9920 6981};
1e8e9920 6982\f
6983/* The following is a utility to diagnose OpenMP structured block violations.
61e47ac8 6984 It is not part of the "omplower" pass, as that's invoked too late. It
6985 should be invoked by the respective front ends after gimplification. */
1e8e9920 6986
6987static splay_tree all_labels;
6988
6989/* Check for mismatched contexts and generate an error if needed. Return
6990 true if an error is detected. */
6991
6992static bool
75a70cf9 6993diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6994 gimple branch_ctx, gimple label_ctx)
1e8e9920 6995{
75a70cf9 6996 if (label_ctx == branch_ctx)
1e8e9920 6997 return false;
6998
48e1416a 6999
75a70cf9 7000 /*
7001 Previously we kept track of the label's entire context in diagnose_sb_[12]
7002 so we could traverse it and issue a correct "exit" or "enter" error
7003 message upon a structured block violation.
7004
7005 We built the context by building a list with tree_cons'ing, but there is
7006 no easy counterpart in gimple tuples. It seems like far too much work
7007 for issuing exit/enter error messages. If someone really misses the
7008 distinct error message... patches welcome.
7009 */
48e1416a 7010
75a70cf9 7011#if 0
1e8e9920 7012 /* Try to avoid confusing the user by producing and error message
f0b5f617 7013 with correct "exit" or "enter" verbiage. We prefer "exit"
1e8e9920 7014 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
7015 if (branch_ctx == NULL)
7016 exit_p = false;
7017 else
7018 {
7019 while (label_ctx)
7020 {
7021 if (TREE_VALUE (label_ctx) == branch_ctx)
7022 {
7023 exit_p = false;
7024 break;
7025 }
7026 label_ctx = TREE_CHAIN (label_ctx);
7027 }
7028 }
7029
7030 if (exit_p)
7031 error ("invalid exit from OpenMP structured block");
7032 else
7033 error ("invalid entry to OpenMP structured block");
75a70cf9 7034#endif
1e8e9920 7035
75a70cf9 7036 /* If it's obvious we have an invalid entry, be specific about the error. */
7037 if (branch_ctx == NULL)
7038 error ("invalid entry to OpenMP structured block");
7039 else
7040 /* Otherwise, be vague and lazy, but efficient. */
7041 error ("invalid branch to/from an OpenMP structured block");
7042
7043 gsi_replace (gsi_p, gimple_build_nop (), false);
1e8e9920 7044 return true;
7045}
7046
7047/* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
75a70cf9 7048 where each label is found. */
1e8e9920 7049
7050static tree
75a70cf9 7051diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7052 struct walk_stmt_info *wi)
1e8e9920 7053{
75a70cf9 7054 gimple context = (gimple) wi->info;
7055 gimple inner_context;
7056 gimple stmt = gsi_stmt (*gsi_p);
1e8e9920 7057
75a70cf9 7058 *handled_ops_p = true;
7059
7060 switch (gimple_code (stmt))
1e8e9920 7061 {
75a70cf9 7062 WALK_SUBSTMTS;
48e1416a 7063
75a70cf9 7064 case GIMPLE_OMP_PARALLEL:
7065 case GIMPLE_OMP_TASK:
7066 case GIMPLE_OMP_SECTIONS:
7067 case GIMPLE_OMP_SINGLE:
7068 case GIMPLE_OMP_SECTION:
7069 case GIMPLE_OMP_MASTER:
7070 case GIMPLE_OMP_ORDERED:
7071 case GIMPLE_OMP_CRITICAL:
7072 /* The minimal context here is just the current OMP construct. */
7073 inner_context = stmt;
1e8e9920 7074 wi->info = inner_context;
75a70cf9 7075 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
1e8e9920 7076 wi->info = context;
7077 break;
7078
75a70cf9 7079 case GIMPLE_OMP_FOR:
7080 inner_context = stmt;
1e8e9920 7081 wi->info = inner_context;
75a70cf9 7082 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7083 walk them. */
7084 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7085 diagnose_sb_1, NULL, wi);
7086 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
1e8e9920 7087 wi->info = context;
7088 break;
7089
75a70cf9 7090 case GIMPLE_LABEL:
7091 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
1e8e9920 7092 (splay_tree_value) context);
7093 break;
7094
7095 default:
7096 break;
7097 }
7098
7099 return NULL_TREE;
7100}
7101
7102/* Pass 2: Check each branch and see if its context differs from that of
7103 the destination label's context. */
7104
7105static tree
75a70cf9 7106diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7107 struct walk_stmt_info *wi)
1e8e9920 7108{
75a70cf9 7109 gimple context = (gimple) wi->info;
1e8e9920 7110 splay_tree_node n;
75a70cf9 7111 gimple stmt = gsi_stmt (*gsi_p);
1e8e9920 7112
75a70cf9 7113 *handled_ops_p = true;
7114
7115 switch (gimple_code (stmt))
1e8e9920 7116 {
75a70cf9 7117 WALK_SUBSTMTS;
7118
7119 case GIMPLE_OMP_PARALLEL:
7120 case GIMPLE_OMP_TASK:
7121 case GIMPLE_OMP_SECTIONS:
7122 case GIMPLE_OMP_SINGLE:
7123 case GIMPLE_OMP_SECTION:
7124 case GIMPLE_OMP_MASTER:
7125 case GIMPLE_OMP_ORDERED:
7126 case GIMPLE_OMP_CRITICAL:
7127 wi->info = stmt;
e3a19533 7128 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
1e8e9920 7129 wi->info = context;
7130 break;
7131
75a70cf9 7132 case GIMPLE_OMP_FOR:
7133 wi->info = stmt;
7134 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7135 walk them. */
e3a19533 7136 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
7137 diagnose_sb_2, NULL, wi);
7138 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
1e8e9920 7139 wi->info = context;
7140 break;
7141
0e1818e7 7142 case GIMPLE_COND:
7143 {
7144 tree lab = gimple_cond_true_label (stmt);
7145 if (lab)
7146 {
7147 n = splay_tree_lookup (all_labels,
7148 (splay_tree_key) lab);
7149 diagnose_sb_0 (gsi_p, context,
7150 n ? (gimple) n->value : NULL);
7151 }
7152 lab = gimple_cond_false_label (stmt);
7153 if (lab)
7154 {
7155 n = splay_tree_lookup (all_labels,
7156 (splay_tree_key) lab);
7157 diagnose_sb_0 (gsi_p, context,
7158 n ? (gimple) n->value : NULL);
7159 }
7160 }
7161 break;
7162
75a70cf9 7163 case GIMPLE_GOTO:
1e8e9920 7164 {
75a70cf9 7165 tree lab = gimple_goto_dest (stmt);
1e8e9920 7166 if (TREE_CODE (lab) != LABEL_DECL)
7167 break;
7168
7169 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
75a70cf9 7170 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
1e8e9920 7171 }
7172 break;
7173
75a70cf9 7174 case GIMPLE_SWITCH:
1e8e9920 7175 {
75a70cf9 7176 unsigned int i;
7177 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
1e8e9920 7178 {
75a70cf9 7179 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
1e8e9920 7180 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
75a70cf9 7181 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
1e8e9920 7182 break;
7183 }
7184 }
7185 break;
7186
75a70cf9 7187 case GIMPLE_RETURN:
7188 diagnose_sb_0 (gsi_p, context, NULL);
1e8e9920 7189 break;
7190
7191 default:
7192 break;
7193 }
7194
7195 return NULL_TREE;
7196}
7197
bfec3452 7198static unsigned int
7199diagnose_omp_structured_block_errors (void)
1e8e9920 7200{
1e8e9920 7201 struct walk_stmt_info wi;
bfec3452 7202 gimple_seq body = gimple_body (current_function_decl);
1e8e9920 7203
7204 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
7205
7206 memset (&wi, 0, sizeof (wi));
75a70cf9 7207 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
1e8e9920 7208
7209 memset (&wi, 0, sizeof (wi));
1e8e9920 7210 wi.want_locations = true;
e3a19533 7211 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
7212
7213 gimple_set_body (current_function_decl, body);
1e8e9920 7214
7215 splay_tree_delete (all_labels);
7216 all_labels = NULL;
7217
bfec3452 7218 return 0;
1e8e9920 7219}
7220
bfec3452 7221static bool
7222gate_diagnose_omp_blocks (void)
7223{
7224 return flag_openmp != 0;
7225}
7226
7227struct gimple_opt_pass pass_diagnose_omp_blocks =
7228{
7229 {
7230 GIMPLE_PASS,
53b5ae07 7231 "*diagnose_omp_blocks", /* name */
bfec3452 7232 gate_diagnose_omp_blocks, /* gate */
7233 diagnose_omp_structured_block_errors, /* execute */
7234 NULL, /* sub */
7235 NULL, /* next */
7236 0, /* static_pass_number */
7237 TV_NONE, /* tv_id */
7238 PROP_gimple_any, /* properties_required */
7239 0, /* properties_provided */
7240 0, /* properties_destroyed */
7241 0, /* todo_flags_start */
7242 0, /* todo_flags_finish */
7243 }
7244};
7245
1e8e9920 7246#include "gt-omp-low.h"