]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/omp-low.c
PR c++/50976
[thirdparty/gcc.git] / gcc / omp-low.c
CommitLineData
1e8e9920 1/* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
31712e83 6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
7cf0dbf3 7 Free Software Foundation, Inc.
1e8e9920 8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
8c4c00c1 13Software Foundation; either version 3, or (at your option) any later
1e8e9920 14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
8c4c00c1 22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
1e8e9920 24
25#include "config.h"
26#include "system.h"
27#include "coretypes.h"
28#include "tm.h"
29#include "tree.h"
30#include "rtl.h"
75a70cf9 31#include "gimple.h"
32#include "tree-iterator.h"
1e8e9920 33#include "tree-inline.h"
34#include "langhooks.h"
852f689e 35#include "diagnostic-core.h"
1e8e9920 36#include "tree-flow.h"
37#include "timevar.h"
38#include "flags.h"
39#include "function.h"
40#include "expr.h"
1e8e9920 41#include "tree-pass.h"
42#include "ggc.h"
43#include "except.h"
e3022db7 44#include "splay-tree.h"
cb7f680b 45#include "optabs.h"
46#include "cfgloop.h"
1e8e9920 47
75a70cf9 48
48e1416a 49/* Lowering of OpenMP parallel and workshare constructs proceeds in two
1e8e9920 50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
334ec2d8 53 re-gimplifying things when variables have been replaced with complex
1e8e9920 54 expressions.
55
d134bccc 56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
1e8e9920 59
60/* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63typedef struct omp_context
64{
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
75a70cf9 73 gimple stmt;
1e8e9920 74
48e1416a 75 /* Map variables to fields in a structure that allows communication
1e8e9920 76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
fd6481cf 82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
1e8e9920 90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
1e8e9920 103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
1e8e9920 105} omp_context;
106
107
fd6481cf 108struct omp_for_data_loop
109{
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112};
113
773c5ba7 114/* A structure describing the main elements of a parallel loop. */
1e8e9920 115
773c5ba7 116struct omp_for_data
1e8e9920 117{
fd6481cf 118 struct omp_for_data_loop loop;
75a70cf9 119 tree chunk_size;
120 gimple for_stmt;
fd6481cf 121 tree pre, iter_type;
122 int collapse;
1e8e9920 123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
fd6481cf 125 struct omp_for_data_loop *loops;
1e8e9920 126};
127
773c5ba7 128
1e8e9920 129static splay_tree all_contexts;
fd6481cf 130static int taskreg_nesting_level;
61e47ac8 131struct omp_region *root_omp_region;
fd6481cf 132static bitmap task_shared_vars;
1e8e9920 133
75a70cf9 134static void scan_omp (gimple_seq, omp_context *);
135static tree scan_omp_1_op (tree *, int *, void *);
136
137#define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
4c0315d0 142 case GIMPLE_TRANSACTION: \
75a70cf9 143 /* The sub-statements for these should be walked. */ \
144 *handled_ops_p = false; \
145 break;
146
147/* Convenience function for calling scan_omp_1_op on tree operands. */
148
149static inline tree
150scan_omp_op (tree *tp, omp_context *ctx)
151{
152 struct walk_stmt_info wi;
153
154 memset (&wi, 0, sizeof (wi));
155 wi.info = ctx;
156 wi.want_locations = true;
157
158 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
159}
160
161static void lower_omp (gimple_seq, omp_context *);
f49d7bb5 162static tree lookup_decl_in_outer_ctx (tree, omp_context *);
163static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
1e8e9920 164
165/* Find an OpenMP clause of type KIND within CLAUSES. */
166
79acaae1 167tree
590c3166 168find_omp_clause (tree clauses, enum omp_clause_code kind)
1e8e9920 169{
170 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
55d6e7cd 171 if (OMP_CLAUSE_CODE (clauses) == kind)
1e8e9920 172 return clauses;
173
174 return NULL_TREE;
175}
176
177/* Return true if CTX is for an omp parallel. */
178
179static inline bool
180is_parallel_ctx (omp_context *ctx)
181{
75a70cf9 182 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
1e8e9920 183}
184
773c5ba7 185
fd6481cf 186/* Return true if CTX is for an omp task. */
187
188static inline bool
189is_task_ctx (omp_context *ctx)
190{
75a70cf9 191 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
fd6481cf 192}
193
194
195/* Return true if CTX is for an omp parallel or omp task. */
196
197static inline bool
198is_taskreg_ctx (omp_context *ctx)
199{
75a70cf9 200 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
201 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
fd6481cf 202}
203
204
773c5ba7 205/* Return true if REGION is a combined parallel+workshare region. */
1e8e9920 206
207static inline bool
773c5ba7 208is_combined_parallel (struct omp_region *region)
209{
210 return region->is_combined_parallel;
211}
212
213
214/* Extract the header elements of parallel loop FOR_STMT and store
215 them into *FD. */
216
217static void
75a70cf9 218extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
fd6481cf 219 struct omp_for_data_loop *loops)
773c5ba7 220{
fd6481cf 221 tree t, var, *collapse_iter, *collapse_count;
222 tree count = NULL_TREE, iter_type = long_integer_type_node;
223 struct omp_for_data_loop *loop;
224 int i;
225 struct omp_for_data_loop dummy_loop;
389dd41b 226 location_t loc = gimple_location (for_stmt);
773c5ba7 227
228 fd->for_stmt = for_stmt;
229 fd->pre = NULL;
75a70cf9 230 fd->collapse = gimple_omp_for_collapse (for_stmt);
fd6481cf 231 if (fd->collapse > 1)
232 fd->loops = loops;
233 else
234 fd->loops = &fd->loop;
773c5ba7 235
236 fd->have_nowait = fd->have_ordered = false;
237 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238 fd->chunk_size = NULL_TREE;
fd6481cf 239 collapse_iter = NULL;
240 collapse_count = NULL;
773c5ba7 241
75a70cf9 242 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
55d6e7cd 243 switch (OMP_CLAUSE_CODE (t))
773c5ba7 244 {
245 case OMP_CLAUSE_NOWAIT:
246 fd->have_nowait = true;
247 break;
248 case OMP_CLAUSE_ORDERED:
249 fd->have_ordered = true;
250 break;
251 case OMP_CLAUSE_SCHEDULE:
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254 break;
fd6481cf 255 case OMP_CLAUSE_COLLAPSE:
256 if (fd->collapse > 1)
257 {
258 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 }
773c5ba7 261 default:
262 break;
263 }
264
fd6481cf 265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
bde357c8 268 is best) or if it varies (then schedule(dynamic,N) is better). */
fd6481cf 269 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 {
271 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272 gcc_assert (fd->chunk_size == NULL);
273 }
274 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
773c5ba7 275 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276 gcc_assert (fd->chunk_size == NULL);
277 else if (fd->chunk_size == NULL)
278 {
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
fd6481cf 281 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 || fd->have_ordered
283 || fd->collapse > 1)
773c5ba7 284 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285 ? integer_zero_node : integer_one_node;
286 }
fd6481cf 287
288 for (i = 0; i < fd->collapse; i++)
289 {
290 if (fd->collapse == 1)
291 loop = &fd->loop;
292 else if (loops != NULL)
293 loop = loops + i;
294 else
295 loop = &dummy_loop;
296
48e1416a 297
75a70cf9 298 loop->v = gimple_omp_for_index (for_stmt, i);
fd6481cf 299 gcc_assert (SSA_VAR_P (loop->v));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
75a70cf9 303 loop->n1 = gimple_omp_for_initial (for_stmt, i);
fd6481cf 304
75a70cf9 305 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306 loop->n2 = gimple_omp_for_final (for_stmt, i);
fd6481cf 307 switch (loop->cond_code)
308 {
309 case LT_EXPR:
310 case GT_EXPR:
311 break;
312 case LE_EXPR:
313 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
2cc66f2a 314 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
fd6481cf 315 else
389dd41b 316 loop->n2 = fold_build2_loc (loc,
317 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
fd6481cf 318 build_int_cst (TREE_TYPE (loop->n2), 1));
319 loop->cond_code = LT_EXPR;
320 break;
321 case GE_EXPR:
322 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
2cc66f2a 323 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
fd6481cf 324 else
389dd41b 325 loop->n2 = fold_build2_loc (loc,
326 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
fd6481cf 327 build_int_cst (TREE_TYPE (loop->n2), 1));
328 loop->cond_code = GT_EXPR;
329 break;
330 default:
331 gcc_unreachable ();
332 }
333
75a70cf9 334 t = gimple_omp_for_incr (for_stmt, i);
fd6481cf 335 gcc_assert (TREE_OPERAND (t, 0) == var);
336 switch (TREE_CODE (t))
337 {
338 case PLUS_EXPR:
339 case POINTER_PLUS_EXPR:
340 loop->step = TREE_OPERAND (t, 1);
341 break;
342 case MINUS_EXPR:
343 loop->step = TREE_OPERAND (t, 1);
389dd41b 344 loop->step = fold_build1_loc (loc,
345 NEGATE_EXPR, TREE_TYPE (loop->step),
fd6481cf 346 loop->step);
347 break;
348 default:
349 gcc_unreachable ();
350 }
351
352 if (iter_type != long_long_unsigned_type_node)
353 {
354 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
355 iter_type = long_long_unsigned_type_node;
356 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
357 && TYPE_PRECISION (TREE_TYPE (loop->v))
358 >= TYPE_PRECISION (iter_type))
359 {
360 tree n;
361
362 if (loop->cond_code == LT_EXPR)
389dd41b 363 n = fold_build2_loc (loc,
364 PLUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 365 loop->n2, loop->step);
366 else
367 n = loop->n1;
368 if (TREE_CODE (n) != INTEGER_CST
369 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
370 iter_type = long_long_unsigned_type_node;
371 }
372 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
373 > TYPE_PRECISION (iter_type))
374 {
375 tree n1, n2;
376
377 if (loop->cond_code == LT_EXPR)
378 {
379 n1 = loop->n1;
389dd41b 380 n2 = fold_build2_loc (loc,
381 PLUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 382 loop->n2, loop->step);
383 }
384 else
385 {
389dd41b 386 n1 = fold_build2_loc (loc,
387 MINUS_EXPR, TREE_TYPE (loop->v),
fd6481cf 388 loop->n2, loop->step);
389 n2 = loop->n1;
390 }
391 if (TREE_CODE (n1) != INTEGER_CST
392 || TREE_CODE (n2) != INTEGER_CST
393 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
394 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
395 iter_type = long_long_unsigned_type_node;
396 }
397 }
398
399 if (collapse_count && *collapse_count == NULL)
400 {
401 if ((i == 0 || count != NULL_TREE)
402 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
403 && TREE_CONSTANT (loop->n1)
404 && TREE_CONSTANT (loop->n2)
405 && TREE_CODE (loop->step) == INTEGER_CST)
406 {
407 tree itype = TREE_TYPE (loop->v);
408
409 if (POINTER_TYPE_P (itype))
410 itype
411 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
412 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
389dd41b 413 t = fold_build2_loc (loc,
414 PLUS_EXPR, itype,
415 fold_convert_loc (loc, itype, loop->step), t);
416 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
417 fold_convert_loc (loc, itype, loop->n2));
418 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
419 fold_convert_loc (loc, itype, loop->n1));
fd6481cf 420 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
389dd41b 421 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
422 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
423 fold_build1_loc (loc, NEGATE_EXPR, itype,
424 fold_convert_loc (loc, itype,
425 loop->step)));
fd6481cf 426 else
389dd41b 427 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
428 fold_convert_loc (loc, itype, loop->step));
429 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
fd6481cf 430 if (count != NULL_TREE)
389dd41b 431 count = fold_build2_loc (loc,
432 MULT_EXPR, long_long_unsigned_type_node,
fd6481cf 433 count, t);
434 else
435 count = t;
436 if (TREE_CODE (count) != INTEGER_CST)
437 count = NULL_TREE;
438 }
439 else
440 count = NULL_TREE;
441 }
442 }
443
444 if (count)
445 {
446 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
447 iter_type = long_long_unsigned_type_node;
448 else
449 iter_type = long_integer_type_node;
450 }
451 else if (collapse_iter && *collapse_iter != NULL)
452 iter_type = TREE_TYPE (*collapse_iter);
453 fd->iter_type = iter_type;
454 if (collapse_iter && *collapse_iter == NULL)
455 *collapse_iter = create_tmp_var (iter_type, ".iter");
456 if (collapse_count && *collapse_count == NULL)
457 {
458 if (count)
389dd41b 459 *collapse_count = fold_convert_loc (loc, iter_type, count);
fd6481cf 460 else
461 *collapse_count = create_tmp_var (iter_type, ".count");
462 }
463
464 if (fd->collapse > 1)
465 {
466 fd->loop.v = *collapse_iter;
467 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
468 fd->loop.n2 = *collapse_count;
469 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
470 fd->loop.cond_code = LT_EXPR;
471 }
773c5ba7 472}
473
474
475/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
476 is the immediate dominator of PAR_ENTRY_BB, return true if there
477 are no data dependencies that would prevent expanding the parallel
478 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
479
480 When expanding a combined parallel+workshare region, the call to
481 the child function may need additional arguments in the case of
75a70cf9 482 GIMPLE_OMP_FOR regions. In some cases, these arguments are
483 computed out of variables passed in from the parent to the child
484 via 'struct .omp_data_s'. For instance:
773c5ba7 485
486 #pragma omp parallel for schedule (guided, i * 4)
487 for (j ...)
488
489 Is lowered into:
490
491 # BLOCK 2 (PAR_ENTRY_BB)
492 .omp_data_o.i = i;
493 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
48e1416a 494
773c5ba7 495 # BLOCK 3 (WS_ENTRY_BB)
496 .omp_data_i = &.omp_data_o;
497 D.1667 = .omp_data_i->i;
498 D.1598 = D.1667 * 4;
499 #pragma omp for schedule (guided, D.1598)
500
501 When we outline the parallel region, the call to the child function
502 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
503 that value is computed *after* the call site. So, in principle we
504 cannot do the transformation.
505
506 To see whether the code in WS_ENTRY_BB blocks the combined
507 parallel+workshare call, we collect all the variables used in the
75a70cf9 508 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
773c5ba7 509 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
510 call.
511
512 FIXME. If we had the SSA form built at this point, we could merely
513 hoist the code in block 3 into block 2 and be done with it. But at
514 this point we don't have dataflow information and though we could
515 hack something up here, it is really not worth the aggravation. */
516
517static bool
f018d957 518workshare_safe_to_combine_p (basic_block ws_entry_bb)
773c5ba7 519{
520 struct omp_for_data fd;
f018d957 521 gimple ws_stmt = last_stmt (ws_entry_bb);
773c5ba7 522
75a70cf9 523 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
773c5ba7 524 return true;
525
75a70cf9 526 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
773c5ba7 527
fd6481cf 528 extract_omp_for_data (ws_stmt, &fd, NULL);
529
530 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
531 return false;
532 if (fd.iter_type != long_integer_type_node)
533 return false;
773c5ba7 534
535 /* FIXME. We give up too easily here. If any of these arguments
536 are not constants, they will likely involve variables that have
537 been mapped into fields of .omp_data_s for sharing with the child
538 function. With appropriate data flow, it would be possible to
539 see through this. */
fd6481cf 540 if (!is_gimple_min_invariant (fd.loop.n1)
541 || !is_gimple_min_invariant (fd.loop.n2)
542 || !is_gimple_min_invariant (fd.loop.step)
773c5ba7 543 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
544 return false;
545
546 return true;
547}
548
549
550/* Collect additional arguments needed to emit a combined
551 parallel+workshare call. WS_STMT is the workshare directive being
552 expanded. */
553
414c3a2c 554static VEC(tree,gc) *
75a70cf9 555get_ws_args_for (gimple ws_stmt)
773c5ba7 556{
557 tree t;
389dd41b 558 location_t loc = gimple_location (ws_stmt);
414c3a2c 559 VEC(tree,gc) *ws_args;
773c5ba7 560
75a70cf9 561 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
773c5ba7 562 {
563 struct omp_for_data fd;
773c5ba7 564
fd6481cf 565 extract_omp_for_data (ws_stmt, &fd, NULL);
773c5ba7 566
414c3a2c 567 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
773c5ba7 568
414c3a2c 569 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
570 VEC_quick_push (tree, ws_args, t);
773c5ba7 571
389dd41b 572 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
414c3a2c 573 VEC_quick_push (tree, ws_args, t);
773c5ba7 574
414c3a2c 575 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
576 VEC_quick_push (tree, ws_args, t);
577
578 if (fd.chunk_size)
579 {
580 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
581 VEC_quick_push (tree, ws_args, t);
582 }
773c5ba7 583
584 return ws_args;
585 }
75a70cf9 586 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
773c5ba7 587 {
ac6e3339 588 /* Number of sections is equal to the number of edges from the
75a70cf9 589 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
590 the exit of the sections region. */
591 basic_block bb = single_succ (gimple_bb (ws_stmt));
ac6e3339 592 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
414c3a2c 593 ws_args = VEC_alloc (tree, gc, 1);
594 VEC_quick_push (tree, ws_args, t);
595 return ws_args;
773c5ba7 596 }
597
598 gcc_unreachable ();
599}
600
601
602/* Discover whether REGION is a combined parallel+workshare region. */
603
604static void
605determine_parallel_type (struct omp_region *region)
1e8e9920 606{
773c5ba7 607 basic_block par_entry_bb, par_exit_bb;
608 basic_block ws_entry_bb, ws_exit_bb;
609
03ed154b 610 if (region == NULL || region->inner == NULL
ac6e3339 611 || region->exit == NULL || region->inner->exit == NULL
612 || region->inner->cont == NULL)
773c5ba7 613 return;
614
615 /* We only support parallel+for and parallel+sections. */
75a70cf9 616 if (region->type != GIMPLE_OMP_PARALLEL
617 || (region->inner->type != GIMPLE_OMP_FOR
618 && region->inner->type != GIMPLE_OMP_SECTIONS))
773c5ba7 619 return;
620
621 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
622 WS_EXIT_BB -> PAR_EXIT_BB. */
61e47ac8 623 par_entry_bb = region->entry;
624 par_exit_bb = region->exit;
625 ws_entry_bb = region->inner->entry;
626 ws_exit_bb = region->inner->exit;
773c5ba7 627
628 if (single_succ (par_entry_bb) == ws_entry_bb
629 && single_succ (ws_exit_bb) == par_exit_bb
f018d957 630 && workshare_safe_to_combine_p (ws_entry_bb)
75a70cf9 631 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
de7ef844 632 || (last_and_only_stmt (ws_entry_bb)
633 && last_and_only_stmt (par_exit_bb))))
773c5ba7 634 {
75a70cf9 635 gimple ws_stmt = last_stmt (ws_entry_bb);
61e47ac8 636
75a70cf9 637 if (region->inner->type == GIMPLE_OMP_FOR)
773c5ba7 638 {
639 /* If this is a combined parallel loop, we need to determine
640 whether or not to use the combined library calls. There
641 are two cases where we do not apply the transformation:
642 static loops and any kind of ordered loop. In the first
643 case, we already open code the loop so there is no need
644 to do anything else. In the latter case, the combined
645 parallel loop call would still need extra synchronization
646 to implement ordered semantics, so there would not be any
647 gain in using the combined call. */
75a70cf9 648 tree clauses = gimple_omp_for_clauses (ws_stmt);
773c5ba7 649 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
650 if (c == NULL
651 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
652 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
653 {
654 region->is_combined_parallel = false;
655 region->inner->is_combined_parallel = false;
656 return;
657 }
658 }
659
660 region->is_combined_parallel = true;
661 region->inner->is_combined_parallel = true;
61e47ac8 662 region->ws_args = get_ws_args_for (ws_stmt);
773c5ba7 663 }
1e8e9920 664}
665
773c5ba7 666
1e8e9920 667/* Return true if EXPR is variable sized. */
668
669static inline bool
1f1872fd 670is_variable_sized (const_tree expr)
1e8e9920 671{
672 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
673}
674
675/* Return true if DECL is a reference type. */
676
677static inline bool
678is_reference (tree decl)
679{
680 return lang_hooks.decls.omp_privatize_by_reference (decl);
681}
682
683/* Lookup variables in the decl or field splay trees. The "maybe" form
684 allows for the variable form to not have been entered, otherwise we
685 assert that the variable must have been entered. */
686
687static inline tree
688lookup_decl (tree var, omp_context *ctx)
689{
e3022db7 690 tree *n;
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return *n;
1e8e9920 693}
694
695static inline tree
e8a588af 696maybe_lookup_decl (const_tree var, omp_context *ctx)
1e8e9920 697{
e3022db7 698 tree *n;
699 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
700 return n ? *n : NULL_TREE;
1e8e9920 701}
702
703static inline tree
704lookup_field (tree var, omp_context *ctx)
705{
706 splay_tree_node n;
707 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
708 return (tree) n->value;
709}
710
fd6481cf 711static inline tree
712lookup_sfield (tree var, omp_context *ctx)
713{
714 splay_tree_node n;
715 n = splay_tree_lookup (ctx->sfield_map
716 ? ctx->sfield_map : ctx->field_map,
717 (splay_tree_key) var);
718 return (tree) n->value;
719}
720
1e8e9920 721static inline tree
722maybe_lookup_field (tree var, omp_context *ctx)
723{
724 splay_tree_node n;
725 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
726 return n ? (tree) n->value : NULL_TREE;
727}
728
e8a588af 729/* Return true if DECL should be copied by pointer. SHARED_CTX is
730 the parallel context if DECL is to be shared. */
1e8e9920 731
732static bool
fd6481cf 733use_pointer_for_field (tree decl, omp_context *shared_ctx)
1e8e9920 734{
735 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
736 return true;
737
554f2707 738 /* We can only use copy-in/copy-out semantics for shared variables
1e8e9920 739 when we know the value is not accessible from an outer scope. */
e8a588af 740 if (shared_ctx)
1e8e9920 741 {
742 /* ??? Trivially accessible from anywhere. But why would we even
743 be passing an address in this case? Should we simply assert
744 this to be false, or should we have a cleanup pass that removes
745 these from the list of mappings? */
746 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
747 return true;
748
749 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
750 without analyzing the expression whether or not its location
751 is accessible to anyone else. In the case of nested parallel
752 regions it certainly may be. */
df2c34fc 753 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
1e8e9920 754 return true;
755
756 /* Do not use copy-in/copy-out for variables that have their
757 address taken. */
758 if (TREE_ADDRESSABLE (decl))
759 return true;
e8a588af 760
761 /* Disallow copy-in/out in nested parallel if
762 decl is shared in outer parallel, otherwise
763 each thread could store the shared variable
764 in its own copy-in location, making the
765 variable no longer really shared. */
766 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
767 {
768 omp_context *up;
769
770 for (up = shared_ctx->outer; up; up = up->outer)
0cb159ec 771 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
e8a588af 772 break;
773
0cb159ec 774 if (up)
e8a588af 775 {
776 tree c;
777
75a70cf9 778 for (c = gimple_omp_taskreg_clauses (up->stmt);
e8a588af 779 c; c = OMP_CLAUSE_CHAIN (c))
780 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
781 && OMP_CLAUSE_DECL (c) == decl)
782 break;
783
784 if (c)
784ad964 785 goto maybe_mark_addressable_and_ret;
e8a588af 786 }
787 }
fd6481cf 788
789 /* For tasks avoid using copy-in/out, unless they are readonly
790 (in which case just copy-in is used). As tasks can be
791 deferred or executed in different thread, when GOMP_task
792 returns, the task hasn't necessarily terminated. */
793 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
794 {
784ad964 795 tree outer;
796 maybe_mark_addressable_and_ret:
797 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
fd6481cf 798 if (is_gimple_reg (outer))
799 {
800 /* Taking address of OUTER in lower_send_shared_vars
801 might need regimplification of everything that uses the
802 variable. */
803 if (!task_shared_vars)
804 task_shared_vars = BITMAP_ALLOC (NULL);
805 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
806 TREE_ADDRESSABLE (outer) = 1;
807 }
808 return true;
809 }
1e8e9920 810 }
811
812 return false;
813}
814
79acaae1 815/* Create a new VAR_DECL and copy information from VAR to it. */
1e8e9920 816
79acaae1 817tree
818copy_var_decl (tree var, tree name, tree type)
1e8e9920 819{
e60a6f7b 820 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
1e8e9920 821
822 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
79acaae1 823 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
8ea8de24 824 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
1e8e9920 825 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
826 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
79acaae1 827 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
1e8e9920 828 TREE_USED (copy) = 1;
1e8e9920 829 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
830
79acaae1 831 return copy;
832}
833
834/* Construct a new automatic decl similar to VAR. */
835
836static tree
837omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
838{
839 tree copy = copy_var_decl (var, name, type);
840
841 DECL_CONTEXT (copy) = current_function_decl;
1767a056 842 DECL_CHAIN (copy) = ctx->block_vars;
1e8e9920 843 ctx->block_vars = copy;
844
845 return copy;
846}
847
848static tree
849omp_copy_decl_1 (tree var, omp_context *ctx)
850{
851 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
852}
853
854/* Build tree nodes to access the field for VAR on the receiver side. */
855
856static tree
857build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
858{
859 tree x, field = lookup_field (var, ctx);
860
861 /* If the receiver record type was remapped in the child function,
862 remap the field into the new record type. */
863 x = maybe_lookup_field (field, ctx);
864 if (x != NULL)
865 field = x;
866
182cf5a9 867 x = build_simple_mem_ref (ctx->receiver_decl);
1e8e9920 868 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
869 if (by_ref)
182cf5a9 870 x = build_simple_mem_ref (x);
1e8e9920 871
872 return x;
873}
874
875/* Build tree nodes to access VAR in the scope outer to CTX. In the case
876 of a parallel, this is a component reference; for workshare constructs
877 this is some variable. */
878
879static tree
880build_outer_var_ref (tree var, omp_context *ctx)
881{
882 tree x;
883
f49d7bb5 884 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1e8e9920 885 x = var;
886 else if (is_variable_sized (var))
887 {
888 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
889 x = build_outer_var_ref (x, ctx);
182cf5a9 890 x = build_simple_mem_ref (x);
1e8e9920 891 }
fd6481cf 892 else if (is_taskreg_ctx (ctx))
1e8e9920 893 {
e8a588af 894 bool by_ref = use_pointer_for_field (var, NULL);
1e8e9920 895 x = build_receiver_ref (var, by_ref, ctx);
896 }
897 else if (ctx->outer)
898 x = lookup_decl (var, ctx->outer);
9438af57 899 else if (is_reference (var))
900 /* This can happen with orphaned constructs. If var is reference, it is
901 possible it is shared and as such valid. */
902 x = var;
1e8e9920 903 else
904 gcc_unreachable ();
905
906 if (is_reference (var))
182cf5a9 907 x = build_simple_mem_ref (x);
1e8e9920 908
909 return x;
910}
911
912/* Build tree nodes to access the field for VAR on the sender side. */
913
914static tree
915build_sender_ref (tree var, omp_context *ctx)
916{
fd6481cf 917 tree field = lookup_sfield (var, ctx);
1e8e9920 918 return build3 (COMPONENT_REF, TREE_TYPE (field),
919 ctx->sender_decl, field, NULL);
920}
921
922/* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
923
924static void
fd6481cf 925install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1e8e9920 926{
fd6481cf 927 tree field, type, sfield = NULL_TREE;
1e8e9920 928
fd6481cf 929 gcc_assert ((mask & 1) == 0
930 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
931 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
932 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1e8e9920 933
934 type = TREE_TYPE (var);
935 if (by_ref)
936 type = build_pointer_type (type);
fd6481cf 937 else if ((mask & 3) == 1 && is_reference (var))
938 type = TREE_TYPE (type);
1e8e9920 939
e60a6f7b 940 field = build_decl (DECL_SOURCE_LOCATION (var),
941 FIELD_DECL, DECL_NAME (var), type);
1e8e9920 942
943 /* Remember what variable this field was created for. This does have a
944 side effect of making dwarf2out ignore this member, so for helpful
945 debugging we clear it later in delete_omp_context. */
946 DECL_ABSTRACT_ORIGIN (field) = var;
fd6481cf 947 if (type == TREE_TYPE (var))
948 {
949 DECL_ALIGN (field) = DECL_ALIGN (var);
950 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
951 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
952 }
953 else
954 DECL_ALIGN (field) = TYPE_ALIGN (type);
1e8e9920 955
fd6481cf 956 if ((mask & 3) == 3)
957 {
958 insert_field_into_struct (ctx->record_type, field);
959 if (ctx->srecord_type)
960 {
e60a6f7b 961 sfield = build_decl (DECL_SOURCE_LOCATION (var),
962 FIELD_DECL, DECL_NAME (var), type);
fd6481cf 963 DECL_ABSTRACT_ORIGIN (sfield) = var;
964 DECL_ALIGN (sfield) = DECL_ALIGN (field);
965 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
966 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
967 insert_field_into_struct (ctx->srecord_type, sfield);
968 }
969 }
970 else
971 {
972 if (ctx->srecord_type == NULL_TREE)
973 {
974 tree t;
975
976 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
977 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
978 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
979 {
e60a6f7b 980 sfield = build_decl (DECL_SOURCE_LOCATION (var),
981 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
fd6481cf 982 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
983 insert_field_into_struct (ctx->srecord_type, sfield);
984 splay_tree_insert (ctx->sfield_map,
985 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
986 (splay_tree_value) sfield);
987 }
988 }
989 sfield = field;
990 insert_field_into_struct ((mask & 1) ? ctx->record_type
991 : ctx->srecord_type, field);
992 }
1e8e9920 993
fd6481cf 994 if (mask & 1)
995 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
996 (splay_tree_value) field);
997 if ((mask & 2) && ctx->sfield_map)
998 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
999 (splay_tree_value) sfield);
1e8e9920 1000}
1001
1002static tree
1003install_var_local (tree var, omp_context *ctx)
1004{
1005 tree new_var = omp_copy_decl_1 (var, ctx);
1006 insert_decl_map (&ctx->cb, var, new_var);
1007 return new_var;
1008}
1009
1010/* Adjust the replacement for DECL in CTX for the new context. This means
1011 copying the DECL_VALUE_EXPR, and fixing up the type. */
1012
1013static void
1014fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1015{
1016 tree new_decl, size;
1017
1018 new_decl = lookup_decl (decl, ctx);
1019
1020 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1021
1022 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1023 && DECL_HAS_VALUE_EXPR_P (decl))
1024 {
1025 tree ve = DECL_VALUE_EXPR (decl);
75a70cf9 1026 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1e8e9920 1027 SET_DECL_VALUE_EXPR (new_decl, ve);
1028 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1029 }
1030
1031 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1032 {
1033 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1034 if (size == error_mark_node)
1035 size = TYPE_SIZE (TREE_TYPE (new_decl));
1036 DECL_SIZE (new_decl) = size;
1037
1038 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1039 if (size == error_mark_node)
1040 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1041 DECL_SIZE_UNIT (new_decl) = size;
1042 }
1043}
1044
1045/* The callback for remap_decl. Search all containing contexts for a
1046 mapping of the variable; this avoids having to duplicate the splay
1047 tree ahead of time. We know a mapping doesn't already exist in the
1048 given context. Create new mappings to implement default semantics. */
1049
1050static tree
1051omp_copy_decl (tree var, copy_body_data *cb)
1052{
1053 omp_context *ctx = (omp_context *) cb;
1054 tree new_var;
1055
1e8e9920 1056 if (TREE_CODE (var) == LABEL_DECL)
1057 {
e60a6f7b 1058 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
773c5ba7 1059 DECL_CONTEXT (new_var) = current_function_decl;
1e8e9920 1060 insert_decl_map (&ctx->cb, var, new_var);
1061 return new_var;
1062 }
1063
fd6481cf 1064 while (!is_taskreg_ctx (ctx))
1e8e9920 1065 {
1066 ctx = ctx->outer;
1067 if (ctx == NULL)
1068 return var;
1069 new_var = maybe_lookup_decl (var, ctx);
1070 if (new_var)
1071 return new_var;
1072 }
1073
f49d7bb5 1074 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1075 return var;
1076
1e8e9920 1077 return error_mark_node;
1078}
1079
773c5ba7 1080
1081/* Return the parallel region associated with STMT. */
1082
773c5ba7 1083/* Debugging dumps for parallel regions. */
1084void dump_omp_region (FILE *, struct omp_region *, int);
1085void debug_omp_region (struct omp_region *);
1086void debug_all_omp_regions (void);
1087
1088/* Dump the parallel region tree rooted at REGION. */
1089
1090void
1091dump_omp_region (FILE *file, struct omp_region *region, int indent)
1092{
61e47ac8 1093 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
75a70cf9 1094 gimple_code_name[region->type]);
773c5ba7 1095
1096 if (region->inner)
1097 dump_omp_region (file, region->inner, indent + 4);
1098
61e47ac8 1099 if (region->cont)
1100 {
75a70cf9 1101 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
61e47ac8 1102 region->cont->index);
1103 }
48e1416a 1104
773c5ba7 1105 if (region->exit)
75a70cf9 1106 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
61e47ac8 1107 region->exit->index);
773c5ba7 1108 else
61e47ac8 1109 fprintf (file, "%*s[no exit marker]\n", indent, "");
773c5ba7 1110
1111 if (region->next)
61e47ac8 1112 dump_omp_region (file, region->next, indent);
773c5ba7 1113}
1114
4b987fac 1115DEBUG_FUNCTION void
773c5ba7 1116debug_omp_region (struct omp_region *region)
1117{
1118 dump_omp_region (stderr, region, 0);
1119}
1120
4b987fac 1121DEBUG_FUNCTION void
773c5ba7 1122debug_all_omp_regions (void)
1123{
1124 dump_omp_region (stderr, root_omp_region, 0);
1125}
1126
1127
1128/* Create a new parallel region starting at STMT inside region PARENT. */
1129
61e47ac8 1130struct omp_region *
75a70cf9 1131new_omp_region (basic_block bb, enum gimple_code type,
1132 struct omp_region *parent)
773c5ba7 1133{
4077bf7a 1134 struct omp_region *region = XCNEW (struct omp_region);
773c5ba7 1135
1136 region->outer = parent;
61e47ac8 1137 region->entry = bb;
1138 region->type = type;
773c5ba7 1139
1140 if (parent)
1141 {
1142 /* This is a nested region. Add it to the list of inner
1143 regions in PARENT. */
1144 region->next = parent->inner;
1145 parent->inner = region;
1146 }
61e47ac8 1147 else
773c5ba7 1148 {
1149 /* This is a toplevel region. Add it to the list of toplevel
1150 regions in ROOT_OMP_REGION. */
1151 region->next = root_omp_region;
1152 root_omp_region = region;
1153 }
61e47ac8 1154
1155 return region;
1156}
1157
1158/* Release the memory associated with the region tree rooted at REGION. */
1159
1160static void
1161free_omp_region_1 (struct omp_region *region)
1162{
1163 struct omp_region *i, *n;
1164
1165 for (i = region->inner; i ; i = n)
773c5ba7 1166 {
61e47ac8 1167 n = i->next;
1168 free_omp_region_1 (i);
773c5ba7 1169 }
1170
61e47ac8 1171 free (region);
1172}
773c5ba7 1173
61e47ac8 1174/* Release the memory for the entire omp region tree. */
1175
1176void
1177free_omp_regions (void)
1178{
1179 struct omp_region *r, *n;
1180 for (r = root_omp_region; r ; r = n)
1181 {
1182 n = r->next;
1183 free_omp_region_1 (r);
1184 }
1185 root_omp_region = NULL;
773c5ba7 1186}
1187
1188
1e8e9920 1189/* Create a new context, with OUTER_CTX being the surrounding context. */
1190
1191static omp_context *
75a70cf9 1192new_omp_context (gimple stmt, omp_context *outer_ctx)
1e8e9920 1193{
1194 omp_context *ctx = XCNEW (omp_context);
1195
1196 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1197 (splay_tree_value) ctx);
1198 ctx->stmt = stmt;
1199
1200 if (outer_ctx)
1201 {
1202 ctx->outer = outer_ctx;
1203 ctx->cb = outer_ctx->cb;
1204 ctx->cb.block = NULL;
1205 ctx->depth = outer_ctx->depth + 1;
1206 }
1207 else
1208 {
1209 ctx->cb.src_fn = current_function_decl;
1210 ctx->cb.dst_fn = current_function_decl;
53f79206 1211 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1212 gcc_checking_assert (ctx->cb.src_node);
1e8e9920 1213 ctx->cb.dst_node = ctx->cb.src_node;
1214 ctx->cb.src_cfun = cfun;
1215 ctx->cb.copy_decl = omp_copy_decl;
e38def9c 1216 ctx->cb.eh_lp_nr = 0;
1e8e9920 1217 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1218 ctx->depth = 1;
1219 }
1220
e3022db7 1221 ctx->cb.decl_map = pointer_map_create ();
1e8e9920 1222
1223 return ctx;
1224}
1225
75a70cf9 1226static gimple_seq maybe_catch_exception (gimple_seq);
f6430caa 1227
1228/* Finalize task copyfn. */
1229
1230static void
75a70cf9 1231finalize_task_copyfn (gimple task_stmt)
f6430caa 1232{
1233 struct function *child_cfun;
1234 tree child_fn, old_fn;
75a70cf9 1235 gimple_seq seq, new_seq;
1236 gimple bind;
f6430caa 1237
75a70cf9 1238 child_fn = gimple_omp_task_copy_fn (task_stmt);
f6430caa 1239 if (child_fn == NULL_TREE)
1240 return;
1241
1242 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1243
1244 /* Inform the callgraph about the new function. */
1245 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1246 = cfun->curr_properties;
1247
1248 old_fn = current_function_decl;
1249 push_cfun (child_cfun);
1250 current_function_decl = child_fn;
75a70cf9 1251 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1252 seq = gimple_seq_alloc ();
1253 gimple_seq_add_stmt (&seq, bind);
1254 new_seq = maybe_catch_exception (seq);
1255 if (new_seq != seq)
1256 {
1257 bind = gimple_build_bind (NULL, new_seq, NULL);
1258 seq = gimple_seq_alloc ();
1259 gimple_seq_add_stmt (&seq, bind);
1260 }
1261 gimple_set_body (child_fn, seq);
f6430caa 1262 pop_cfun ();
1263 current_function_decl = old_fn;
1264
1265 cgraph_add_new_function (child_fn, false);
1266}
1267
1e8e9920 1268/* Destroy a omp_context data structures. Called through the splay tree
1269 value delete callback. */
1270
1271static void
1272delete_omp_context (splay_tree_value value)
1273{
1274 omp_context *ctx = (omp_context *) value;
1275
e3022db7 1276 pointer_map_destroy (ctx->cb.decl_map);
1e8e9920 1277
1278 if (ctx->field_map)
1279 splay_tree_delete (ctx->field_map);
fd6481cf 1280 if (ctx->sfield_map)
1281 splay_tree_delete (ctx->sfield_map);
1e8e9920 1282
1283 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1284 it produces corrupt debug information. */
1285 if (ctx->record_type)
1286 {
1287 tree t;
1767a056 1288 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1e8e9920 1289 DECL_ABSTRACT_ORIGIN (t) = NULL;
1290 }
fd6481cf 1291 if (ctx->srecord_type)
1292 {
1293 tree t;
1767a056 1294 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
fd6481cf 1295 DECL_ABSTRACT_ORIGIN (t) = NULL;
1296 }
1e8e9920 1297
f6430caa 1298 if (is_task_ctx (ctx))
1299 finalize_task_copyfn (ctx->stmt);
1300
1e8e9920 1301 XDELETE (ctx);
1302}
1303
1304/* Fix up RECEIVER_DECL with a type that has been remapped to the child
1305 context. */
1306
1307static void
1308fixup_child_record_type (omp_context *ctx)
1309{
1310 tree f, type = ctx->record_type;
1311
1312 /* ??? It isn't sufficient to just call remap_type here, because
1313 variably_modified_type_p doesn't work the way we expect for
1314 record types. Testing each field for whether it needs remapping
1315 and creating a new record by hand works, however. */
1767a056 1316 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1e8e9920 1317 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1318 break;
1319 if (f)
1320 {
1321 tree name, new_fields = NULL;
1322
1323 type = lang_hooks.types.make_type (RECORD_TYPE);
1324 name = DECL_NAME (TYPE_NAME (ctx->record_type));
e60a6f7b 1325 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1326 TYPE_DECL, name, type);
1e8e9920 1327 TYPE_NAME (type) = name;
1328
1767a056 1329 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1e8e9920 1330 {
1331 tree new_f = copy_node (f);
1332 DECL_CONTEXT (new_f) = type;
1333 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1767a056 1334 DECL_CHAIN (new_f) = new_fields;
75a70cf9 1335 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1336 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1337 &ctx->cb, NULL);
1338 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1339 &ctx->cb, NULL);
1e8e9920 1340 new_fields = new_f;
1341
1342 /* Arrange to be able to look up the receiver field
1343 given the sender field. */
1344 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1345 (splay_tree_value) new_f);
1346 }
1347 TYPE_FIELDS (type) = nreverse (new_fields);
1348 layout_type (type);
1349 }
1350
1351 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1352}
1353
1354/* Instantiate decls as necessary in CTX to satisfy the data sharing
1355 specified by CLAUSES. */
1356
1357static void
1358scan_sharing_clauses (tree clauses, omp_context *ctx)
1359{
1360 tree c, decl;
1361 bool scan_array_reductions = false;
1362
1363 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1364 {
1365 bool by_ref;
1366
55d6e7cd 1367 switch (OMP_CLAUSE_CODE (c))
1e8e9920 1368 {
1369 case OMP_CLAUSE_PRIVATE:
1370 decl = OMP_CLAUSE_DECL (c);
fd6481cf 1371 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1372 goto do_private;
1373 else if (!is_variable_sized (decl))
1e8e9920 1374 install_var_local (decl, ctx);
1375 break;
1376
1377 case OMP_CLAUSE_SHARED:
fd6481cf 1378 gcc_assert (is_taskreg_ctx (ctx));
1e8e9920 1379 decl = OMP_CLAUSE_DECL (c);
e7327393 1380 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1381 || !is_variable_sized (decl));
f49d7bb5 1382 /* Global variables don't need to be copied,
1383 the receiver side will use them directly. */
1384 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1385 break;
fd6481cf 1386 by_ref = use_pointer_for_field (decl, ctx);
1e8e9920 1387 if (! TREE_READONLY (decl)
1388 || TREE_ADDRESSABLE (decl)
1389 || by_ref
1390 || is_reference (decl))
1391 {
fd6481cf 1392 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1393 install_var_local (decl, ctx);
1394 break;
1395 }
1396 /* We don't need to copy const scalar vars back. */
55d6e7cd 1397 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1e8e9920 1398 goto do_private;
1399
1400 case OMP_CLAUSE_LASTPRIVATE:
1401 /* Let the corresponding firstprivate clause create
1402 the variable. */
1403 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1404 break;
1405 /* FALLTHRU */
1406
1407 case OMP_CLAUSE_FIRSTPRIVATE:
1408 case OMP_CLAUSE_REDUCTION:
1409 decl = OMP_CLAUSE_DECL (c);
1410 do_private:
1411 if (is_variable_sized (decl))
1e8e9920 1412 {
fd6481cf 1413 if (is_task_ctx (ctx))
1414 install_var_field (decl, false, 1, ctx);
1415 break;
1416 }
1417 else if (is_taskreg_ctx (ctx))
1418 {
1419 bool global
1420 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
e8a588af 1421 by_ref = use_pointer_for_field (decl, NULL);
fd6481cf 1422
1423 if (is_task_ctx (ctx)
1424 && (global || by_ref || is_reference (decl)))
1425 {
1426 install_var_field (decl, false, 1, ctx);
1427 if (!global)
1428 install_var_field (decl, by_ref, 2, ctx);
1429 }
1430 else if (!global)
1431 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1432 }
1433 install_var_local (decl, ctx);
1434 break;
1435
1436 case OMP_CLAUSE_COPYPRIVATE:
1e8e9920 1437 case OMP_CLAUSE_COPYIN:
1438 decl = OMP_CLAUSE_DECL (c);
e8a588af 1439 by_ref = use_pointer_for_field (decl, NULL);
fd6481cf 1440 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1441 break;
1442
1443 case OMP_CLAUSE_DEFAULT:
1444 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1445 break;
1446
2169f33b 1447 case OMP_CLAUSE_FINAL:
1e8e9920 1448 case OMP_CLAUSE_IF:
1449 case OMP_CLAUSE_NUM_THREADS:
1450 case OMP_CLAUSE_SCHEDULE:
1451 if (ctx->outer)
75a70cf9 1452 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1e8e9920 1453 break;
1454
1455 case OMP_CLAUSE_NOWAIT:
1456 case OMP_CLAUSE_ORDERED:
fd6481cf 1457 case OMP_CLAUSE_COLLAPSE:
1458 case OMP_CLAUSE_UNTIED:
2169f33b 1459 case OMP_CLAUSE_MERGEABLE:
1e8e9920 1460 break;
1461
1462 default:
1463 gcc_unreachable ();
1464 }
1465 }
1466
1467 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1468 {
55d6e7cd 1469 switch (OMP_CLAUSE_CODE (c))
1e8e9920 1470 {
1471 case OMP_CLAUSE_LASTPRIVATE:
1472 /* Let the corresponding firstprivate clause create
1473 the variable. */
75a70cf9 1474 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
fd6481cf 1475 scan_array_reductions = true;
1e8e9920 1476 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1477 break;
1478 /* FALLTHRU */
1479
1480 case OMP_CLAUSE_PRIVATE:
1481 case OMP_CLAUSE_FIRSTPRIVATE:
1482 case OMP_CLAUSE_REDUCTION:
1483 decl = OMP_CLAUSE_DECL (c);
1484 if (is_variable_sized (decl))
1485 install_var_local (decl, ctx);
1486 fixup_remapped_decl (decl, ctx,
55d6e7cd 1487 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1e8e9920 1488 && OMP_CLAUSE_PRIVATE_DEBUG (c));
55d6e7cd 1489 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1e8e9920 1490 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1491 scan_array_reductions = true;
1492 break;
1493
1494 case OMP_CLAUSE_SHARED:
1495 decl = OMP_CLAUSE_DECL (c);
f49d7bb5 1496 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1497 fixup_remapped_decl (decl, ctx, false);
1e8e9920 1498 break;
1499
1500 case OMP_CLAUSE_COPYPRIVATE:
1501 case OMP_CLAUSE_COPYIN:
1502 case OMP_CLAUSE_DEFAULT:
1503 case OMP_CLAUSE_IF:
1504 case OMP_CLAUSE_NUM_THREADS:
1505 case OMP_CLAUSE_SCHEDULE:
1506 case OMP_CLAUSE_NOWAIT:
1507 case OMP_CLAUSE_ORDERED:
fd6481cf 1508 case OMP_CLAUSE_COLLAPSE:
1509 case OMP_CLAUSE_UNTIED:
2169f33b 1510 case OMP_CLAUSE_FINAL:
1511 case OMP_CLAUSE_MERGEABLE:
1e8e9920 1512 break;
1513
1514 default:
1515 gcc_unreachable ();
1516 }
1517 }
1518
1519 if (scan_array_reductions)
1520 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
55d6e7cd 1521 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1e8e9920 1522 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1523 {
75a70cf9 1524 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1525 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1e8e9920 1526 }
fd6481cf 1527 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
75a70cf9 1528 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1529 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1e8e9920 1530}
1531
1532/* Create a new name for omp child function. Returns an identifier. */
1533
1534static GTY(()) unsigned int tmp_ompfn_id_num;
1535
1536static tree
fd6481cf 1537create_omp_child_function_name (bool task_copy)
1e8e9920 1538{
a70a5e2c 1539 return (clone_function_name (current_function_decl,
1540 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1e8e9920 1541}
1542
1543/* Build a decl for the omp child function. It'll not contain a body
1544 yet, just the bare decl. */
1545
1546static void
fd6481cf 1547create_omp_child_function (omp_context *ctx, bool task_copy)
1e8e9920 1548{
1549 tree decl, type, name, t;
1550
fd6481cf 1551 name = create_omp_child_function_name (task_copy);
1552 if (task_copy)
1553 type = build_function_type_list (void_type_node, ptr_type_node,
1554 ptr_type_node, NULL_TREE);
1555 else
1556 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1e8e9920 1557
e60a6f7b 1558 decl = build_decl (gimple_location (ctx->stmt),
1559 FUNCTION_DECL, name, type);
1e8e9920 1560
fd6481cf 1561 if (!task_copy)
1562 ctx->cb.dst_fn = decl;
1563 else
75a70cf9 1564 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1e8e9920 1565
1566 TREE_STATIC (decl) = 1;
1567 TREE_USED (decl) = 1;
1568 DECL_ARTIFICIAL (decl) = 1;
84bfaaeb 1569 DECL_NAMELESS (decl) = 1;
1e8e9920 1570 DECL_IGNORED_P (decl) = 0;
1571 TREE_PUBLIC (decl) = 0;
1572 DECL_UNINLINABLE (decl) = 1;
1573 DECL_EXTERNAL (decl) = 0;
1574 DECL_CONTEXT (decl) = NULL_TREE;
773c5ba7 1575 DECL_INITIAL (decl) = make_node (BLOCK);
1e8e9920 1576
e60a6f7b 1577 t = build_decl (DECL_SOURCE_LOCATION (decl),
1578 RESULT_DECL, NULL_TREE, void_type_node);
1e8e9920 1579 DECL_ARTIFICIAL (t) = 1;
1580 DECL_IGNORED_P (t) = 1;
8e5b4ed6 1581 DECL_CONTEXT (t) = decl;
1e8e9920 1582 DECL_RESULT (decl) = t;
1583
e60a6f7b 1584 t = build_decl (DECL_SOURCE_LOCATION (decl),
1585 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1e8e9920 1586 DECL_ARTIFICIAL (t) = 1;
84bfaaeb 1587 DECL_NAMELESS (t) = 1;
1e8e9920 1588 DECL_ARG_TYPE (t) = ptr_type_node;
773c5ba7 1589 DECL_CONTEXT (t) = current_function_decl;
1e8e9920 1590 TREE_USED (t) = 1;
1591 DECL_ARGUMENTS (decl) = t;
fd6481cf 1592 if (!task_copy)
1593 ctx->receiver_decl = t;
1594 else
1595 {
e60a6f7b 1596 t = build_decl (DECL_SOURCE_LOCATION (decl),
1597 PARM_DECL, get_identifier (".omp_data_o"),
fd6481cf 1598 ptr_type_node);
1599 DECL_ARTIFICIAL (t) = 1;
84bfaaeb 1600 DECL_NAMELESS (t) = 1;
fd6481cf 1601 DECL_ARG_TYPE (t) = ptr_type_node;
1602 DECL_CONTEXT (t) = current_function_decl;
1603 TREE_USED (t) = 1;
86f2ad37 1604 TREE_ADDRESSABLE (t) = 1;
1767a056 1605 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
fd6481cf 1606 DECL_ARGUMENTS (decl) = t;
1607 }
1e8e9920 1608
48e1416a 1609 /* Allocate memory for the function structure. The call to
773c5ba7 1610 allocate_struct_function clobbers CFUN, so we need to restore
1e8e9920 1611 it afterward. */
87d4aa85 1612 push_struct_function (decl);
75a70cf9 1613 cfun->function_end_locus = gimple_location (ctx->stmt);
87d4aa85 1614 pop_cfun ();
1e8e9920 1615}
1616
1e8e9920 1617
1618/* Scan an OpenMP parallel directive. */
1619
1620static void
75a70cf9 1621scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1e8e9920 1622{
1623 omp_context *ctx;
1624 tree name;
75a70cf9 1625 gimple stmt = gsi_stmt (*gsi);
1e8e9920 1626
1627 /* Ignore parallel directives with empty bodies, unless there
1628 are copyin clauses. */
1629 if (optimize > 0
75a70cf9 1630 && empty_body_p (gimple_omp_body (stmt))
1631 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1632 OMP_CLAUSE_COPYIN) == NULL)
1e8e9920 1633 {
75a70cf9 1634 gsi_replace (gsi, gimple_build_nop (), false);
1e8e9920 1635 return;
1636 }
1637
75a70cf9 1638 ctx = new_omp_context (stmt, outer_ctx);
fd6481cf 1639 if (taskreg_nesting_level > 1)
773c5ba7 1640 ctx->is_nested = true;
1e8e9920 1641 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1e8e9920 1642 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1643 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1e8e9920 1644 name = create_tmp_var_name (".omp_data_s");
e60a6f7b 1645 name = build_decl (gimple_location (stmt),
1646 TYPE_DECL, name, ctx->record_type);
84bfaaeb 1647 DECL_ARTIFICIAL (name) = 1;
1648 DECL_NAMELESS (name) = 1;
1e8e9920 1649 TYPE_NAME (ctx->record_type) = name;
fd6481cf 1650 create_omp_child_function (ctx, false);
75a70cf9 1651 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1e8e9920 1652
75a70cf9 1653 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1654 scan_omp (gimple_omp_body (stmt), ctx);
1e8e9920 1655
1656 if (TYPE_FIELDS (ctx->record_type) == NULL)
1657 ctx->record_type = ctx->receiver_decl = NULL;
1658 else
1659 {
1660 layout_type (ctx->record_type);
1661 fixup_child_record_type (ctx);
1662 }
1663}
1664
fd6481cf 1665/* Scan an OpenMP task directive. */
1666
1667static void
75a70cf9 1668scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
fd6481cf 1669{
1670 omp_context *ctx;
75a70cf9 1671 tree name, t;
1672 gimple stmt = gsi_stmt (*gsi);
389dd41b 1673 location_t loc = gimple_location (stmt);
fd6481cf 1674
1675 /* Ignore task directives with empty bodies. */
1676 if (optimize > 0
75a70cf9 1677 && empty_body_p (gimple_omp_body (stmt)))
fd6481cf 1678 {
75a70cf9 1679 gsi_replace (gsi, gimple_build_nop (), false);
fd6481cf 1680 return;
1681 }
1682
75a70cf9 1683 ctx = new_omp_context (stmt, outer_ctx);
fd6481cf 1684 if (taskreg_nesting_level > 1)
1685 ctx->is_nested = true;
1686 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1687 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1688 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1689 name = create_tmp_var_name (".omp_data_s");
e60a6f7b 1690 name = build_decl (gimple_location (stmt),
1691 TYPE_DECL, name, ctx->record_type);
84bfaaeb 1692 DECL_ARTIFICIAL (name) = 1;
1693 DECL_NAMELESS (name) = 1;
fd6481cf 1694 TYPE_NAME (ctx->record_type) = name;
1695 create_omp_child_function (ctx, false);
75a70cf9 1696 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
fd6481cf 1697
75a70cf9 1698 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
fd6481cf 1699
1700 if (ctx->srecord_type)
1701 {
1702 name = create_tmp_var_name (".omp_data_a");
e60a6f7b 1703 name = build_decl (gimple_location (stmt),
1704 TYPE_DECL, name, ctx->srecord_type);
84bfaaeb 1705 DECL_ARTIFICIAL (name) = 1;
1706 DECL_NAMELESS (name) = 1;
fd6481cf 1707 TYPE_NAME (ctx->srecord_type) = name;
1708 create_omp_child_function (ctx, true);
1709 }
1710
75a70cf9 1711 scan_omp (gimple_omp_body (stmt), ctx);
fd6481cf 1712
1713 if (TYPE_FIELDS (ctx->record_type) == NULL)
1714 {
1715 ctx->record_type = ctx->receiver_decl = NULL;
75a70cf9 1716 t = build_int_cst (long_integer_type_node, 0);
1717 gimple_omp_task_set_arg_size (stmt, t);
1718 t = build_int_cst (long_integer_type_node, 1);
1719 gimple_omp_task_set_arg_align (stmt, t);
fd6481cf 1720 }
1721 else
1722 {
1723 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1724 /* Move VLA fields to the end. */
1725 p = &TYPE_FIELDS (ctx->record_type);
1726 while (*p)
1727 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1728 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1729 {
1730 *q = *p;
1731 *p = TREE_CHAIN (*p);
1732 TREE_CHAIN (*q) = NULL_TREE;
1733 q = &TREE_CHAIN (*q);
1734 }
1735 else
1767a056 1736 p = &DECL_CHAIN (*p);
fd6481cf 1737 *p = vla_fields;
1738 layout_type (ctx->record_type);
1739 fixup_child_record_type (ctx);
1740 if (ctx->srecord_type)
1741 layout_type (ctx->srecord_type);
389dd41b 1742 t = fold_convert_loc (loc, long_integer_type_node,
fd6481cf 1743 TYPE_SIZE_UNIT (ctx->record_type));
75a70cf9 1744 gimple_omp_task_set_arg_size (stmt, t);
1745 t = build_int_cst (long_integer_type_node,
fd6481cf 1746 TYPE_ALIGN_UNIT (ctx->record_type));
75a70cf9 1747 gimple_omp_task_set_arg_align (stmt, t);
fd6481cf 1748 }
1749}
1750
1e8e9920 1751
773c5ba7 1752/* Scan an OpenMP loop directive. */
1e8e9920 1753
1754static void
75a70cf9 1755scan_omp_for (gimple stmt, omp_context *outer_ctx)
1e8e9920 1756{
773c5ba7 1757 omp_context *ctx;
75a70cf9 1758 size_t i;
1e8e9920 1759
773c5ba7 1760 ctx = new_omp_context (stmt, outer_ctx);
1e8e9920 1761
75a70cf9 1762 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1e8e9920 1763
75a70cf9 1764 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1765 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
fd6481cf 1766 {
75a70cf9 1767 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1768 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1769 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1770 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
fd6481cf 1771 }
75a70cf9 1772 scan_omp (gimple_omp_body (stmt), ctx);
1e8e9920 1773}
1774
1775/* Scan an OpenMP sections directive. */
1776
1777static void
75a70cf9 1778scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1e8e9920 1779{
1e8e9920 1780 omp_context *ctx;
1781
1782 ctx = new_omp_context (stmt, outer_ctx);
75a70cf9 1783 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1784 scan_omp (gimple_omp_body (stmt), ctx);
1e8e9920 1785}
1786
1787/* Scan an OpenMP single directive. */
1788
1789static void
75a70cf9 1790scan_omp_single (gimple stmt, omp_context *outer_ctx)
1e8e9920 1791{
1e8e9920 1792 omp_context *ctx;
1793 tree name;
1794
1795 ctx = new_omp_context (stmt, outer_ctx);
1796 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1797 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1798 name = create_tmp_var_name (".omp_copy_s");
e60a6f7b 1799 name = build_decl (gimple_location (stmt),
1800 TYPE_DECL, name, ctx->record_type);
1e8e9920 1801 TYPE_NAME (ctx->record_type) = name;
1802
75a70cf9 1803 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1804 scan_omp (gimple_omp_body (stmt), ctx);
1e8e9920 1805
1806 if (TYPE_FIELDS (ctx->record_type) == NULL)
1807 ctx->record_type = NULL;
1808 else
1809 layout_type (ctx->record_type);
1810}
1811
1e8e9920 1812
c1d127dd 1813/* Check OpenMP nesting restrictions. */
1814static void
75a70cf9 1815check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
c1d127dd 1816{
75a70cf9 1817 switch (gimple_code (stmt))
c1d127dd 1818 {
75a70cf9 1819 case GIMPLE_OMP_FOR:
1820 case GIMPLE_OMP_SECTIONS:
1821 case GIMPLE_OMP_SINGLE:
1822 case GIMPLE_CALL:
c1d127dd 1823 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1824 switch (gimple_code (ctx->stmt))
c1d127dd 1825 {
75a70cf9 1826 case GIMPLE_OMP_FOR:
1827 case GIMPLE_OMP_SECTIONS:
1828 case GIMPLE_OMP_SINGLE:
1829 case GIMPLE_OMP_ORDERED:
1830 case GIMPLE_OMP_MASTER:
1831 case GIMPLE_OMP_TASK:
1832 if (is_gimple_call (stmt))
fd6481cf 1833 {
1834 warning (0, "barrier region may not be closely nested inside "
1835 "of work-sharing, critical, ordered, master or "
1836 "explicit task region");
1837 return;
1838 }
c1d127dd 1839 warning (0, "work-sharing region may not be closely nested inside "
fd6481cf 1840 "of work-sharing, critical, ordered, master or explicit "
1841 "task region");
c1d127dd 1842 return;
75a70cf9 1843 case GIMPLE_OMP_PARALLEL:
c1d127dd 1844 return;
1845 default:
1846 break;
1847 }
1848 break;
75a70cf9 1849 case GIMPLE_OMP_MASTER:
c1d127dd 1850 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1851 switch (gimple_code (ctx->stmt))
c1d127dd 1852 {
75a70cf9 1853 case GIMPLE_OMP_FOR:
1854 case GIMPLE_OMP_SECTIONS:
1855 case GIMPLE_OMP_SINGLE:
1856 case GIMPLE_OMP_TASK:
c1d127dd 1857 warning (0, "master region may not be closely nested inside "
fd6481cf 1858 "of work-sharing or explicit task region");
c1d127dd 1859 return;
75a70cf9 1860 case GIMPLE_OMP_PARALLEL:
c1d127dd 1861 return;
1862 default:
1863 break;
1864 }
1865 break;
75a70cf9 1866 case GIMPLE_OMP_ORDERED:
c1d127dd 1867 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1868 switch (gimple_code (ctx->stmt))
c1d127dd 1869 {
75a70cf9 1870 case GIMPLE_OMP_CRITICAL:
1871 case GIMPLE_OMP_TASK:
c1d127dd 1872 warning (0, "ordered region may not be closely nested inside "
fd6481cf 1873 "of critical or explicit task region");
c1d127dd 1874 return;
75a70cf9 1875 case GIMPLE_OMP_FOR:
1876 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
c1d127dd 1877 OMP_CLAUSE_ORDERED) == NULL)
1878 warning (0, "ordered region must be closely nested inside "
1879 "a loop region with an ordered clause");
1880 return;
75a70cf9 1881 case GIMPLE_OMP_PARALLEL:
c1d127dd 1882 return;
1883 default:
1884 break;
1885 }
1886 break;
75a70cf9 1887 case GIMPLE_OMP_CRITICAL:
c1d127dd 1888 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1889 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1890 && (gimple_omp_critical_name (stmt)
1891 == gimple_omp_critical_name (ctx->stmt)))
c1d127dd 1892 {
1893 warning (0, "critical region may not be nested inside a critical "
1894 "region with the same name");
1895 return;
1896 }
1897 break;
1898 default:
1899 break;
1900 }
1901}
1902
1903
75a70cf9 1904/* Helper function scan_omp.
1905
1906 Callback for walk_tree or operators in walk_gimple_stmt used to
1907 scan for OpenMP directives in TP. */
1e8e9920 1908
1909static tree
75a70cf9 1910scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1e8e9920 1911{
4077bf7a 1912 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1913 omp_context *ctx = (omp_context *) wi->info;
1e8e9920 1914 tree t = *tp;
1915
75a70cf9 1916 switch (TREE_CODE (t))
1917 {
1918 case VAR_DECL:
1919 case PARM_DECL:
1920 case LABEL_DECL:
1921 case RESULT_DECL:
1922 if (ctx)
1923 *tp = remap_decl (t, &ctx->cb);
1924 break;
1925
1926 default:
1927 if (ctx && TYPE_P (t))
1928 *tp = remap_type (t, &ctx->cb);
1929 else if (!DECL_P (t))
7cf869dd 1930 {
1931 *walk_subtrees = 1;
1932 if (ctx)
182cf5a9 1933 {
1934 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1935 if (tem != TREE_TYPE (t))
1936 {
1937 if (TREE_CODE (t) == INTEGER_CST)
1938 *tp = build_int_cst_wide (tem,
1939 TREE_INT_CST_LOW (t),
1940 TREE_INT_CST_HIGH (t));
1941 else
1942 TREE_TYPE (t) = tem;
1943 }
1944 }
7cf869dd 1945 }
75a70cf9 1946 break;
1947 }
1948
1949 return NULL_TREE;
1950}
1951
1952
1953/* Helper function for scan_omp.
1954
1955 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1956 the current statement in GSI. */
1957
1958static tree
1959scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1960 struct walk_stmt_info *wi)
1961{
1962 gimple stmt = gsi_stmt (*gsi);
1963 omp_context *ctx = (omp_context *) wi->info;
1964
1965 if (gimple_has_location (stmt))
1966 input_location = gimple_location (stmt);
1e8e9920 1967
c1d127dd 1968 /* Check the OpenMP nesting restrictions. */
fd6481cf 1969 if (ctx != NULL)
1970 {
75a70cf9 1971 if (is_gimple_omp (stmt))
1972 check_omp_nesting_restrictions (stmt, ctx);
1973 else if (is_gimple_call (stmt))
fd6481cf 1974 {
75a70cf9 1975 tree fndecl = gimple_call_fndecl (stmt);
fd6481cf 1976 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1977 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
75a70cf9 1978 check_omp_nesting_restrictions (stmt, ctx);
fd6481cf 1979 }
1980 }
c1d127dd 1981
75a70cf9 1982 *handled_ops_p = true;
1983
1984 switch (gimple_code (stmt))
1e8e9920 1985 {
75a70cf9 1986 case GIMPLE_OMP_PARALLEL:
fd6481cf 1987 taskreg_nesting_level++;
75a70cf9 1988 scan_omp_parallel (gsi, ctx);
fd6481cf 1989 taskreg_nesting_level--;
1990 break;
1991
75a70cf9 1992 case GIMPLE_OMP_TASK:
fd6481cf 1993 taskreg_nesting_level++;
75a70cf9 1994 scan_omp_task (gsi, ctx);
fd6481cf 1995 taskreg_nesting_level--;
1e8e9920 1996 break;
1997
75a70cf9 1998 case GIMPLE_OMP_FOR:
1999 scan_omp_for (stmt, ctx);
1e8e9920 2000 break;
2001
75a70cf9 2002 case GIMPLE_OMP_SECTIONS:
2003 scan_omp_sections (stmt, ctx);
1e8e9920 2004 break;
2005
75a70cf9 2006 case GIMPLE_OMP_SINGLE:
2007 scan_omp_single (stmt, ctx);
1e8e9920 2008 break;
2009
75a70cf9 2010 case GIMPLE_OMP_SECTION:
2011 case GIMPLE_OMP_MASTER:
2012 case GIMPLE_OMP_ORDERED:
2013 case GIMPLE_OMP_CRITICAL:
2014 ctx = new_omp_context (stmt, ctx);
2015 scan_omp (gimple_omp_body (stmt), ctx);
1e8e9920 2016 break;
2017
75a70cf9 2018 case GIMPLE_BIND:
1e8e9920 2019 {
2020 tree var;
1e8e9920 2021
75a70cf9 2022 *handled_ops_p = false;
2023 if (ctx)
1767a056 2024 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
75a70cf9 2025 insert_decl_map (&ctx->cb, var, var);
1e8e9920 2026 }
2027 break;
1e8e9920 2028 default:
75a70cf9 2029 *handled_ops_p = false;
1e8e9920 2030 break;
2031 }
2032
2033 return NULL_TREE;
2034}
2035
2036
75a70cf9 2037/* Scan all the statements starting at the current statement. CTX
2038 contains context information about the OpenMP directives and
2039 clauses found during the scan. */
1e8e9920 2040
2041static void
75a70cf9 2042scan_omp (gimple_seq body, omp_context *ctx)
1e8e9920 2043{
2044 location_t saved_location;
2045 struct walk_stmt_info wi;
2046
2047 memset (&wi, 0, sizeof (wi));
1e8e9920 2048 wi.info = ctx;
1e8e9920 2049 wi.want_locations = true;
2050
2051 saved_location = input_location;
75a70cf9 2052 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
1e8e9920 2053 input_location = saved_location;
2054}
2055\f
2056/* Re-gimplification and code generation routines. */
2057
2058/* Build a call to GOMP_barrier. */
2059
79acaae1 2060static tree
2061build_omp_barrier (void)
1e8e9920 2062{
b9a16870 2063 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
1e8e9920 2064}
2065
2066/* If a context was created for STMT when it was scanned, return it. */
2067
2068static omp_context *
75a70cf9 2069maybe_lookup_ctx (gimple stmt)
1e8e9920 2070{
2071 splay_tree_node n;
2072 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2073 return n ? (omp_context *) n->value : NULL;
2074}
2075
773c5ba7 2076
2077/* Find the mapping for DECL in CTX or the immediately enclosing
2078 context that has a mapping for DECL.
2079
2080 If CTX is a nested parallel directive, we may have to use the decl
2081 mappings created in CTX's parent context. Suppose that we have the
2082 following parallel nesting (variable UIDs showed for clarity):
2083
2084 iD.1562 = 0;
2085 #omp parallel shared(iD.1562) -> outer parallel
2086 iD.1562 = iD.1562 + 1;
2087
2088 #omp parallel shared (iD.1562) -> inner parallel
2089 iD.1562 = iD.1562 - 1;
2090
2091 Each parallel structure will create a distinct .omp_data_s structure
2092 for copying iD.1562 in/out of the directive:
2093
2094 outer parallel .omp_data_s.1.i -> iD.1562
2095 inner parallel .omp_data_s.2.i -> iD.1562
2096
2097 A shared variable mapping will produce a copy-out operation before
2098 the parallel directive and a copy-in operation after it. So, in
2099 this case we would have:
2100
2101 iD.1562 = 0;
2102 .omp_data_o.1.i = iD.1562;
2103 #omp parallel shared(iD.1562) -> outer parallel
2104 .omp_data_i.1 = &.omp_data_o.1
2105 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2106
2107 .omp_data_o.2.i = iD.1562; -> **
2108 #omp parallel shared(iD.1562) -> inner parallel
2109 .omp_data_i.2 = &.omp_data_o.2
2110 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2111
2112
2113 ** This is a problem. The symbol iD.1562 cannot be referenced
2114 inside the body of the outer parallel region. But since we are
2115 emitting this copy operation while expanding the inner parallel
2116 directive, we need to access the CTX structure of the outer
2117 parallel directive to get the correct mapping:
2118
2119 .omp_data_o.2.i = .omp_data_i.1->i
2120
2121 Since there may be other workshare or parallel directives enclosing
2122 the parallel directive, it may be necessary to walk up the context
2123 parent chain. This is not a problem in general because nested
2124 parallelism happens only rarely. */
2125
2126static tree
2127lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2128{
2129 tree t;
2130 omp_context *up;
2131
773c5ba7 2132 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2133 t = maybe_lookup_decl (decl, up);
2134
87b31375 2135 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
773c5ba7 2136
c37594c7 2137 return t ? t : decl;
773c5ba7 2138}
2139
2140
f49d7bb5 2141/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2142 in outer contexts. */
2143
2144static tree
2145maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2146{
2147 tree t = NULL;
2148 omp_context *up;
2149
87b31375 2150 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2151 t = maybe_lookup_decl (decl, up);
f49d7bb5 2152
2153 return t ? t : decl;
2154}
2155
2156
1e8e9920 2157/* Construct the initialization value for reduction CLAUSE. */
2158
2159tree
2160omp_reduction_init (tree clause, tree type)
2161{
389dd41b 2162 location_t loc = OMP_CLAUSE_LOCATION (clause);
1e8e9920 2163 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2164 {
2165 case PLUS_EXPR:
2166 case MINUS_EXPR:
2167 case BIT_IOR_EXPR:
2168 case BIT_XOR_EXPR:
2169 case TRUTH_OR_EXPR:
2170 case TRUTH_ORIF_EXPR:
2171 case TRUTH_XOR_EXPR:
2172 case NE_EXPR:
385f3f36 2173 return build_zero_cst (type);
1e8e9920 2174
2175 case MULT_EXPR:
2176 case TRUTH_AND_EXPR:
2177 case TRUTH_ANDIF_EXPR:
2178 case EQ_EXPR:
389dd41b 2179 return fold_convert_loc (loc, type, integer_one_node);
1e8e9920 2180
2181 case BIT_AND_EXPR:
389dd41b 2182 return fold_convert_loc (loc, type, integer_minus_one_node);
1e8e9920 2183
2184 case MAX_EXPR:
2185 if (SCALAR_FLOAT_TYPE_P (type))
2186 {
2187 REAL_VALUE_TYPE max, min;
2188 if (HONOR_INFINITIES (TYPE_MODE (type)))
2189 {
2190 real_inf (&max);
2191 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2192 }
2193 else
2194 real_maxval (&min, 1, TYPE_MODE (type));
2195 return build_real (type, min);
2196 }
2197 else
2198 {
2199 gcc_assert (INTEGRAL_TYPE_P (type));
2200 return TYPE_MIN_VALUE (type);
2201 }
2202
2203 case MIN_EXPR:
2204 if (SCALAR_FLOAT_TYPE_P (type))
2205 {
2206 REAL_VALUE_TYPE max;
2207 if (HONOR_INFINITIES (TYPE_MODE (type)))
2208 real_inf (&max);
2209 else
2210 real_maxval (&max, 0, TYPE_MODE (type));
2211 return build_real (type, max);
2212 }
2213 else
2214 {
2215 gcc_assert (INTEGRAL_TYPE_P (type));
2216 return TYPE_MAX_VALUE (type);
2217 }
2218
2219 default:
2220 gcc_unreachable ();
2221 }
2222}
2223
2224/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2225 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2226 private variables. Initialization statements go in ILIST, while calls
2227 to destructors go in DLIST. */
2228
2229static void
75a70cf9 2230lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
1e4afe3c 2231 omp_context *ctx)
1e8e9920 2232{
75a70cf9 2233 gimple_stmt_iterator diter;
c2f47e15 2234 tree c, dtor, copyin_seq, x, ptr;
1e8e9920 2235 bool copyin_by_ref = false;
f49d7bb5 2236 bool lastprivate_firstprivate = false;
1e8e9920 2237 int pass;
2238
75a70cf9 2239 *dlist = gimple_seq_alloc ();
2240 diter = gsi_start (*dlist);
1e8e9920 2241 copyin_seq = NULL;
2242
2243 /* Do all the fixed sized types in the first pass, and the variable sized
2244 types in the second pass. This makes sure that the scalar arguments to
48e1416a 2245 the variable sized types are processed before we use them in the
1e8e9920 2246 variable sized operations. */
2247 for (pass = 0; pass < 2; ++pass)
2248 {
2249 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2250 {
55d6e7cd 2251 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
1e8e9920 2252 tree var, new_var;
2253 bool by_ref;
389dd41b 2254 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2255
2256 switch (c_kind)
2257 {
2258 case OMP_CLAUSE_PRIVATE:
2259 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2260 continue;
2261 break;
2262 case OMP_CLAUSE_SHARED:
f49d7bb5 2263 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2264 {
2265 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2266 continue;
2267 }
1e8e9920 2268 case OMP_CLAUSE_FIRSTPRIVATE:
1e8e9920 2269 case OMP_CLAUSE_COPYIN:
2270 case OMP_CLAUSE_REDUCTION:
2271 break;
df2c34fc 2272 case OMP_CLAUSE_LASTPRIVATE:
f49d7bb5 2273 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2274 {
2275 lastprivate_firstprivate = true;
2276 if (pass != 0)
2277 continue;
2278 }
df2c34fc 2279 break;
1e8e9920 2280 default:
2281 continue;
2282 }
2283
2284 new_var = var = OMP_CLAUSE_DECL (c);
2285 if (c_kind != OMP_CLAUSE_COPYIN)
2286 new_var = lookup_decl (var, ctx);
2287
2288 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2289 {
2290 if (pass != 0)
2291 continue;
2292 }
1e8e9920 2293 else if (is_variable_sized (var))
2294 {
773c5ba7 2295 /* For variable sized types, we need to allocate the
2296 actual storage here. Call alloca and store the
2297 result in the pointer decl that we created elsewhere. */
1e8e9920 2298 if (pass == 0)
2299 continue;
2300
fd6481cf 2301 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2302 {
75a70cf9 2303 gimple stmt;
b9a16870 2304 tree tmp, atmp;
75a70cf9 2305
fd6481cf 2306 ptr = DECL_VALUE_EXPR (new_var);
2307 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2308 ptr = TREE_OPERAND (ptr, 0);
2309 gcc_assert (DECL_P (ptr));
2310 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
75a70cf9 2311
2312 /* void *tmp = __builtin_alloca */
b9a16870 2313 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2314 stmt = gimple_build_call (atmp, 1, x);
75a70cf9 2315 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2316 gimple_add_tmp_var (tmp);
2317 gimple_call_set_lhs (stmt, tmp);
2318
2319 gimple_seq_add_stmt (ilist, stmt);
2320
389dd41b 2321 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
75a70cf9 2322 gimplify_assign (ptr, x, ilist);
fd6481cf 2323 }
1e8e9920 2324 }
1e8e9920 2325 else if (is_reference (var))
2326 {
773c5ba7 2327 /* For references that are being privatized for Fortran,
2328 allocate new backing storage for the new pointer
2329 variable. This allows us to avoid changing all the
2330 code that expects a pointer to something that expects
2331 a direct variable. Note that this doesn't apply to
2332 C++, since reference types are disallowed in data
df2c34fc 2333 sharing clauses there, except for NRV optimized
2334 return values. */
1e8e9920 2335 if (pass == 0)
2336 continue;
2337
2338 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
fd6481cf 2339 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2340 {
2341 x = build_receiver_ref (var, false, ctx);
389dd41b 2342 x = build_fold_addr_expr_loc (clause_loc, x);
fd6481cf 2343 }
2344 else if (TREE_CONSTANT (x))
1e8e9920 2345 {
2346 const char *name = NULL;
2347 if (DECL_NAME (var))
2348 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2349
df2c34fc 2350 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2351 name);
2352 gimple_add_tmp_var (x);
86f2ad37 2353 TREE_ADDRESSABLE (x) = 1;
389dd41b 2354 x = build_fold_addr_expr_loc (clause_loc, x);
1e8e9920 2355 }
2356 else
2357 {
b9a16870 2358 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2359 x = build_call_expr_loc (clause_loc, atmp, 1, x);
1e8e9920 2360 }
2361
389dd41b 2362 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
75a70cf9 2363 gimplify_assign (new_var, x, ilist);
1e8e9920 2364
182cf5a9 2365 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
1e8e9920 2366 }
2367 else if (c_kind == OMP_CLAUSE_REDUCTION
2368 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2369 {
2370 if (pass == 0)
2371 continue;
2372 }
2373 else if (pass != 0)
2374 continue;
2375
55d6e7cd 2376 switch (OMP_CLAUSE_CODE (c))
1e8e9920 2377 {
2378 case OMP_CLAUSE_SHARED:
f49d7bb5 2379 /* Shared global vars are just accessed directly. */
2380 if (is_global_var (new_var))
2381 break;
1e8e9920 2382 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2383 needs to be delayed until after fixup_child_record_type so
2384 that we get the correct type during the dereference. */
e8a588af 2385 by_ref = use_pointer_for_field (var, ctx);
1e8e9920 2386 x = build_receiver_ref (var, by_ref, ctx);
2387 SET_DECL_VALUE_EXPR (new_var, x);
2388 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2389
2390 /* ??? If VAR is not passed by reference, and the variable
2391 hasn't been initialized yet, then we'll get a warning for
2392 the store into the omp_data_s structure. Ideally, we'd be
48e1416a 2393 able to notice this and not store anything at all, but
1e8e9920 2394 we're generating code too early. Suppress the warning. */
2395 if (!by_ref)
2396 TREE_NO_WARNING (var) = 1;
2397 break;
2398
2399 case OMP_CLAUSE_LASTPRIVATE:
2400 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2401 break;
2402 /* FALLTHRU */
2403
2404 case OMP_CLAUSE_PRIVATE:
fd6481cf 2405 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2406 x = build_outer_var_ref (var, ctx);
2407 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2408 {
2409 if (is_task_ctx (ctx))
2410 x = build_receiver_ref (var, false, ctx);
2411 else
2412 x = build_outer_var_ref (var, ctx);
2413 }
2414 else
2415 x = NULL;
2416 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
1e8e9920 2417 if (x)
2418 gimplify_and_add (x, ilist);
2419 /* FALLTHRU */
2420
2421 do_dtor:
2422 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2423 if (x)
2424 {
75a70cf9 2425 gimple_seq tseq = NULL;
2426
1e8e9920 2427 dtor = x;
75a70cf9 2428 gimplify_stmt (&dtor, &tseq);
2429 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
1e8e9920 2430 }
2431 break;
2432
2433 case OMP_CLAUSE_FIRSTPRIVATE:
fd6481cf 2434 if (is_task_ctx (ctx))
2435 {
2436 if (is_reference (var) || is_variable_sized (var))
2437 goto do_dtor;
2438 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2439 ctx))
2440 || use_pointer_for_field (var, NULL))
2441 {
2442 x = build_receiver_ref (var, false, ctx);
2443 SET_DECL_VALUE_EXPR (new_var, x);
2444 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2445 goto do_dtor;
2446 }
2447 }
1e8e9920 2448 x = build_outer_var_ref (var, ctx);
2449 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2450 gimplify_and_add (x, ilist);
2451 goto do_dtor;
2452 break;
2453
2454 case OMP_CLAUSE_COPYIN:
e8a588af 2455 by_ref = use_pointer_for_field (var, NULL);
1e8e9920 2456 x = build_receiver_ref (var, by_ref, ctx);
2457 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2458 append_to_statement_list (x, &copyin_seq);
2459 copyin_by_ref |= by_ref;
2460 break;
2461
2462 case OMP_CLAUSE_REDUCTION:
2463 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2464 {
fd6481cf 2465 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2466 x = build_outer_var_ref (var, ctx);
2467
2468 if (is_reference (var))
389dd41b 2469 x = build_fold_addr_expr_loc (clause_loc, x);
fd6481cf 2470 SET_DECL_VALUE_EXPR (placeholder, x);
2471 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
75a70cf9 2472 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2473 gimple_seq_add_seq (ilist,
2474 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2475 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
fd6481cf 2476 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
1e8e9920 2477 }
2478 else
2479 {
2480 x = omp_reduction_init (c, TREE_TYPE (new_var));
2481 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
75a70cf9 2482 gimplify_assign (new_var, x, ilist);
1e8e9920 2483 }
2484 break;
2485
2486 default:
2487 gcc_unreachable ();
2488 }
2489 }
2490 }
2491
2492 /* The copyin sequence is not to be executed by the main thread, since
2493 that would result in self-copies. Perhaps not visible to scalars,
2494 but it certainly is to C++ operator=. */
2495 if (copyin_seq)
2496 {
b9a16870 2497 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2498 0);
1e8e9920 2499 x = build2 (NE_EXPR, boolean_type_node, x,
2500 build_int_cst (TREE_TYPE (x), 0));
2501 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2502 gimplify_and_add (x, ilist);
2503 }
2504
2505 /* If any copyin variable is passed by reference, we must ensure the
2506 master thread doesn't modify it before it is copied over in all
f49d7bb5 2507 threads. Similarly for variables in both firstprivate and
2508 lastprivate clauses we need to ensure the lastprivate copying
2509 happens after firstprivate copying in all threads. */
2510 if (copyin_by_ref || lastprivate_firstprivate)
79acaae1 2511 gimplify_and_add (build_omp_barrier (), ilist);
1e8e9920 2512}
2513
773c5ba7 2514
1e8e9920 2515/* Generate code to implement the LASTPRIVATE clauses. This is used for
2516 both parallel and workshare constructs. PREDICATE may be NULL if it's
2517 always true. */
2518
2519static void
75a70cf9 2520lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2521 omp_context *ctx)
1e8e9920 2522{
75a70cf9 2523 tree x, c, label = NULL;
fd6481cf 2524 bool par_clauses = false;
1e8e9920 2525
2526 /* Early exit if there are no lastprivate clauses. */
2527 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2528 if (clauses == NULL)
2529 {
2530 /* If this was a workshare clause, see if it had been combined
2531 with its parallel. In that case, look for the clauses on the
2532 parallel statement itself. */
2533 if (is_parallel_ctx (ctx))
2534 return;
2535
2536 ctx = ctx->outer;
2537 if (ctx == NULL || !is_parallel_ctx (ctx))
2538 return;
2539
75a70cf9 2540 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
1e8e9920 2541 OMP_CLAUSE_LASTPRIVATE);
2542 if (clauses == NULL)
2543 return;
fd6481cf 2544 par_clauses = true;
1e8e9920 2545 }
2546
75a70cf9 2547 if (predicate)
2548 {
2549 gimple stmt;
2550 tree label_true, arm1, arm2;
2551
e60a6f7b 2552 label = create_artificial_label (UNKNOWN_LOCATION);
2553 label_true = create_artificial_label (UNKNOWN_LOCATION);
75a70cf9 2554 arm1 = TREE_OPERAND (predicate, 0);
2555 arm2 = TREE_OPERAND (predicate, 1);
2556 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2557 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2558 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2559 label_true, label);
2560 gimple_seq_add_stmt (stmt_list, stmt);
2561 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2562 }
1e8e9920 2563
fd6481cf 2564 for (c = clauses; c ;)
1e8e9920 2565 {
2566 tree var, new_var;
389dd41b 2567 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2568
fd6481cf 2569 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2570 {
2571 var = OMP_CLAUSE_DECL (c);
2572 new_var = lookup_decl (var, ctx);
1e8e9920 2573
75a70cf9 2574 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2575 {
2576 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2577 gimple_seq_add_seq (stmt_list,
2578 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2579 }
2580 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
1e8e9920 2581
fd6481cf 2582 x = build_outer_var_ref (var, ctx);
2583 if (is_reference (var))
182cf5a9 2584 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
fd6481cf 2585 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
75a70cf9 2586 gimplify_and_add (x, stmt_list);
fd6481cf 2587 }
2588 c = OMP_CLAUSE_CHAIN (c);
2589 if (c == NULL && !par_clauses)
2590 {
2591 /* If this was a workshare clause, see if it had been combined
2592 with its parallel. In that case, continue looking for the
2593 clauses also on the parallel statement itself. */
2594 if (is_parallel_ctx (ctx))
2595 break;
2596
2597 ctx = ctx->outer;
2598 if (ctx == NULL || !is_parallel_ctx (ctx))
2599 break;
2600
75a70cf9 2601 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
fd6481cf 2602 OMP_CLAUSE_LASTPRIVATE);
2603 par_clauses = true;
2604 }
1e8e9920 2605 }
2606
75a70cf9 2607 if (label)
2608 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
1e8e9920 2609}
2610
773c5ba7 2611
1e8e9920 2612/* Generate code to implement the REDUCTION clauses. */
2613
2614static void
75a70cf9 2615lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
1e8e9920 2616{
75a70cf9 2617 gimple_seq sub_seq = NULL;
2618 gimple stmt;
2619 tree x, c;
1e8e9920 2620 int count = 0;
2621
2622 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2623 update in that case, otherwise use a lock. */
2624 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
55d6e7cd 2625 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
1e8e9920 2626 {
2627 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2628 {
2629 /* Never use OMP_ATOMIC for array reductions. */
2630 count = -1;
2631 break;
2632 }
2633 count++;
2634 }
2635
2636 if (count == 0)
2637 return;
2638
2639 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2640 {
2641 tree var, ref, new_var;
2642 enum tree_code code;
389dd41b 2643 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2644
55d6e7cd 2645 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
1e8e9920 2646 continue;
2647
2648 var = OMP_CLAUSE_DECL (c);
2649 new_var = lookup_decl (var, ctx);
2650 if (is_reference (var))
182cf5a9 2651 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
1e8e9920 2652 ref = build_outer_var_ref (var, ctx);
2653 code = OMP_CLAUSE_REDUCTION_CODE (c);
773c5ba7 2654
2655 /* reduction(-:var) sums up the partial results, so it acts
2656 identically to reduction(+:var). */
1e8e9920 2657 if (code == MINUS_EXPR)
2658 code = PLUS_EXPR;
2659
2660 if (count == 1)
2661 {
389dd41b 2662 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
1e8e9920 2663
2664 addr = save_expr (addr);
2665 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
389dd41b 2666 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
1e8e9920 2667 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
75a70cf9 2668 gimplify_and_add (x, stmt_seqp);
1e8e9920 2669 return;
2670 }
2671
2672 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2673 {
2674 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2675
2676 if (is_reference (var))
389dd41b 2677 ref = build_fold_addr_expr_loc (clause_loc, ref);
1e8e9920 2678 SET_DECL_VALUE_EXPR (placeholder, ref);
2679 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
75a70cf9 2680 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2681 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2682 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
1e8e9920 2683 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2684 }
2685 else
2686 {
2687 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2688 ref = build_outer_var_ref (var, ctx);
75a70cf9 2689 gimplify_assign (ref, x, &sub_seq);
1e8e9920 2690 }
2691 }
2692
b9a16870 2693 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
2694 0);
75a70cf9 2695 gimple_seq_add_stmt (stmt_seqp, stmt);
1e8e9920 2696
75a70cf9 2697 gimple_seq_add_seq (stmt_seqp, sub_seq);
1e8e9920 2698
b9a16870 2699 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
2700 0);
75a70cf9 2701 gimple_seq_add_stmt (stmt_seqp, stmt);
1e8e9920 2702}
2703
773c5ba7 2704
1e8e9920 2705/* Generate code to implement the COPYPRIVATE clauses. */
2706
2707static void
75a70cf9 2708lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
1e8e9920 2709 omp_context *ctx)
2710{
2711 tree c;
2712
2713 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2714 {
cb561506 2715 tree var, new_var, ref, x;
1e8e9920 2716 bool by_ref;
389dd41b 2717 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2718
55d6e7cd 2719 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
1e8e9920 2720 continue;
2721
2722 var = OMP_CLAUSE_DECL (c);
e8a588af 2723 by_ref = use_pointer_for_field (var, NULL);
1e8e9920 2724
2725 ref = build_sender_ref (var, ctx);
cb561506 2726 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2727 if (by_ref)
2728 {
2729 x = build_fold_addr_expr_loc (clause_loc, new_var);
2730 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2731 }
75a70cf9 2732 gimplify_assign (ref, x, slist);
1e8e9920 2733
cb561506 2734 ref = build_receiver_ref (var, false, ctx);
2735 if (by_ref)
2736 {
2737 ref = fold_convert_loc (clause_loc,
2738 build_pointer_type (TREE_TYPE (new_var)),
2739 ref);
2740 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2741 }
1e8e9920 2742 if (is_reference (var))
2743 {
cb561506 2744 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
182cf5a9 2745 ref = build_simple_mem_ref_loc (clause_loc, ref);
2746 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
1e8e9920 2747 }
cb561506 2748 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
1e8e9920 2749 gimplify_and_add (x, rlist);
2750 }
2751}
2752
773c5ba7 2753
1e8e9920 2754/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2755 and REDUCTION from the sender (aka parent) side. */
2756
2757static void
75a70cf9 2758lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2759 omp_context *ctx)
1e8e9920 2760{
2761 tree c;
2762
2763 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2764 {
773c5ba7 2765 tree val, ref, x, var;
1e8e9920 2766 bool by_ref, do_in = false, do_out = false;
389dd41b 2767 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
1e8e9920 2768
55d6e7cd 2769 switch (OMP_CLAUSE_CODE (c))
1e8e9920 2770 {
fd6481cf 2771 case OMP_CLAUSE_PRIVATE:
2772 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2773 break;
2774 continue;
1e8e9920 2775 case OMP_CLAUSE_FIRSTPRIVATE:
2776 case OMP_CLAUSE_COPYIN:
2777 case OMP_CLAUSE_LASTPRIVATE:
2778 case OMP_CLAUSE_REDUCTION:
2779 break;
2780 default:
2781 continue;
2782 }
2783
87b31375 2784 val = OMP_CLAUSE_DECL (c);
2785 var = lookup_decl_in_outer_ctx (val, ctx);
773c5ba7 2786
f49d7bb5 2787 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2788 && is_global_var (var))
2789 continue;
1e8e9920 2790 if (is_variable_sized (val))
2791 continue;
e8a588af 2792 by_ref = use_pointer_for_field (val, NULL);
1e8e9920 2793
55d6e7cd 2794 switch (OMP_CLAUSE_CODE (c))
1e8e9920 2795 {
fd6481cf 2796 case OMP_CLAUSE_PRIVATE:
1e8e9920 2797 case OMP_CLAUSE_FIRSTPRIVATE:
2798 case OMP_CLAUSE_COPYIN:
2799 do_in = true;
2800 break;
2801
2802 case OMP_CLAUSE_LASTPRIVATE:
2803 if (by_ref || is_reference (val))
2804 {
2805 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2806 continue;
2807 do_in = true;
2808 }
2809 else
fd6481cf 2810 {
2811 do_out = true;
2812 if (lang_hooks.decls.omp_private_outer_ref (val))
2813 do_in = true;
2814 }
1e8e9920 2815 break;
2816
2817 case OMP_CLAUSE_REDUCTION:
2818 do_in = true;
2819 do_out = !(by_ref || is_reference (val));
2820 break;
2821
2822 default:
2823 gcc_unreachable ();
2824 }
2825
2826 if (do_in)
2827 {
2828 ref = build_sender_ref (val, ctx);
389dd41b 2829 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
75a70cf9 2830 gimplify_assign (ref, x, ilist);
fd6481cf 2831 if (is_task_ctx (ctx))
2832 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
1e8e9920 2833 }
773c5ba7 2834
1e8e9920 2835 if (do_out)
2836 {
2837 ref = build_sender_ref (val, ctx);
75a70cf9 2838 gimplify_assign (var, ref, olist);
1e8e9920 2839 }
2840 }
2841}
2842
75a70cf9 2843/* Generate code to implement SHARED from the sender (aka parent)
2844 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2845 list things that got automatically shared. */
1e8e9920 2846
2847static void
75a70cf9 2848lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
1e8e9920 2849{
fd6481cf 2850 tree var, ovar, nvar, f, x, record_type;
1e8e9920 2851
2852 if (ctx->record_type == NULL)
2853 return;
773c5ba7 2854
fd6481cf 2855 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
1767a056 2856 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
1e8e9920 2857 {
2858 ovar = DECL_ABSTRACT_ORIGIN (f);
2859 nvar = maybe_lookup_decl (ovar, ctx);
2860 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2861 continue;
2862
773c5ba7 2863 /* If CTX is a nested parallel directive. Find the immediately
2864 enclosing parallel or workshare construct that contains a
2865 mapping for OVAR. */
87b31375 2866 var = lookup_decl_in_outer_ctx (ovar, ctx);
773c5ba7 2867
e8a588af 2868 if (use_pointer_for_field (ovar, ctx))
1e8e9920 2869 {
2870 x = build_sender_ref (ovar, ctx);
773c5ba7 2871 var = build_fold_addr_expr (var);
75a70cf9 2872 gimplify_assign (x, var, ilist);
1e8e9920 2873 }
2874 else
2875 {
2876 x = build_sender_ref (ovar, ctx);
75a70cf9 2877 gimplify_assign (x, var, ilist);
1e8e9920 2878
d2263ebb 2879 if (!TREE_READONLY (var)
2880 /* We don't need to receive a new reference to a result
2881 or parm decl. In fact we may not store to it as we will
2882 invalidate any pending RSO and generate wrong gimple
2883 during inlining. */
2884 && !((TREE_CODE (var) == RESULT_DECL
2885 || TREE_CODE (var) == PARM_DECL)
2886 && DECL_BY_REFERENCE (var)))
fd6481cf 2887 {
2888 x = build_sender_ref (ovar, ctx);
75a70cf9 2889 gimplify_assign (var, x, olist);
fd6481cf 2890 }
1e8e9920 2891 }
2892 }
2893}
2894
75a70cf9 2895
2896/* A convenience function to build an empty GIMPLE_COND with just the
2897 condition. */
2898
2899static gimple
2900gimple_build_cond_empty (tree cond)
2901{
2902 enum tree_code pred_code;
2903 tree lhs, rhs;
2904
2905 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2906 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2907}
2908
2909
48e1416a 2910/* Build the function calls to GOMP_parallel_start etc to actually
773c5ba7 2911 generate the parallel operation. REGION is the parallel region
2912 being expanded. BB is the block where to insert the code. WS_ARGS
2913 will be set if this is a call to a combined parallel+workshare
2914 construct, it contains the list of additional arguments needed by
2915 the workshare construct. */
1e8e9920 2916
2917static void
61e47ac8 2918expand_parallel_call (struct omp_region *region, basic_block bb,
414c3a2c 2919 gimple entry_stmt, VEC(tree,gc) *ws_args)
1e8e9920 2920{
79acaae1 2921 tree t, t1, t2, val, cond, c, clauses;
75a70cf9 2922 gimple_stmt_iterator gsi;
2923 gimple stmt;
b9a16870 2924 enum built_in_function start_ix;
2925 int start_ix2;
389dd41b 2926 location_t clause_loc;
414c3a2c 2927 VEC(tree,gc) *args;
773c5ba7 2928
75a70cf9 2929 clauses = gimple_omp_parallel_clauses (entry_stmt);
773c5ba7 2930
334ec2d8 2931 /* Determine what flavor of GOMP_parallel_start we will be
773c5ba7 2932 emitting. */
2933 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2934 if (is_combined_parallel (region))
2935 {
61e47ac8 2936 switch (region->inner->type)
773c5ba7 2937 {
75a70cf9 2938 case GIMPLE_OMP_FOR:
fd6481cf 2939 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
b9a16870 2940 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2941 + (region->inner->sched_kind
2942 == OMP_CLAUSE_SCHEDULE_RUNTIME
2943 ? 3 : region->inner->sched_kind));
2944 start_ix = (enum built_in_function)start_ix2;
61e47ac8 2945 break;
75a70cf9 2946 case GIMPLE_OMP_SECTIONS:
61e47ac8 2947 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2948 break;
2949 default:
2950 gcc_unreachable ();
773c5ba7 2951 }
773c5ba7 2952 }
1e8e9920 2953
2954 /* By default, the value of NUM_THREADS is zero (selected at run time)
2955 and there is no conditional. */
2956 cond = NULL_TREE;
2957 val = build_int_cst (unsigned_type_node, 0);
2958
2959 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2960 if (c)
2961 cond = OMP_CLAUSE_IF_EXPR (c);
2962
2963 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2964 if (c)
389dd41b 2965 {
2966 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2967 clause_loc = OMP_CLAUSE_LOCATION (c);
2968 }
2969 else
2970 clause_loc = gimple_location (entry_stmt);
1e8e9920 2971
2972 /* Ensure 'val' is of the correct type. */
389dd41b 2973 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
1e8e9920 2974
2975 /* If we found the clause 'if (cond)', build either
2976 (cond != 0) or (cond ? val : 1u). */
2977 if (cond)
2978 {
75a70cf9 2979 gimple_stmt_iterator gsi;
773c5ba7 2980
2981 cond = gimple_boolify (cond);
2982
1e8e9920 2983 if (integer_zerop (val))
389dd41b 2984 val = fold_build2_loc (clause_loc,
2985 EQ_EXPR, unsigned_type_node, cond,
79acaae1 2986 build_int_cst (TREE_TYPE (cond), 0));
1e8e9920 2987 else
773c5ba7 2988 {
2989 basic_block cond_bb, then_bb, else_bb;
79acaae1 2990 edge e, e_then, e_else;
75a70cf9 2991 tree tmp_then, tmp_else, tmp_join, tmp_var;
79acaae1 2992
2993 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2994 if (gimple_in_ssa_p (cfun))
2995 {
75a70cf9 2996 tmp_then = make_ssa_name (tmp_var, NULL);
2997 tmp_else = make_ssa_name (tmp_var, NULL);
2998 tmp_join = make_ssa_name (tmp_var, NULL);
79acaae1 2999 }
3000 else
3001 {
3002 tmp_then = tmp_var;
3003 tmp_else = tmp_var;
3004 tmp_join = tmp_var;
3005 }
773c5ba7 3006
773c5ba7 3007 e = split_block (bb, NULL);
3008 cond_bb = e->src;
3009 bb = e->dest;
3010 remove_edge (e);
3011
3012 then_bb = create_empty_bb (cond_bb);
3013 else_bb = create_empty_bb (then_bb);
79acaae1 3014 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3015 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
773c5ba7 3016
75a70cf9 3017 stmt = gimple_build_cond_empty (cond);
3018 gsi = gsi_start_bb (cond_bb);
3019 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 3020
75a70cf9 3021 gsi = gsi_start_bb (then_bb);
3022 stmt = gimple_build_assign (tmp_then, val);
3023 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 3024
75a70cf9 3025 gsi = gsi_start_bb (else_bb);
3026 stmt = gimple_build_assign
3027 (tmp_else, build_int_cst (unsigned_type_node, 1));
3028 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 3029
3030 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3031 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
79acaae1 3032 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3033 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
773c5ba7 3034
79acaae1 3035 if (gimple_in_ssa_p (cfun))
3036 {
75a70cf9 3037 gimple phi = create_phi_node (tmp_join, bb);
79acaae1 3038 SSA_NAME_DEF_STMT (tmp_join) = phi;
efbcb6de 3039 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3040 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
79acaae1 3041 }
3042
3043 val = tmp_join;
773c5ba7 3044 }
3045
75a70cf9 3046 gsi = gsi_start_bb (bb);
3047 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3048 false, GSI_CONTINUE_LINKING);
1e8e9920 3049 }
3050
75a70cf9 3051 gsi = gsi_last_bb (bb);
3052 t = gimple_omp_parallel_data_arg (entry_stmt);
1e8e9920 3053 if (t == NULL)
c2f47e15 3054 t1 = null_pointer_node;
1e8e9920 3055 else
c2f47e15 3056 t1 = build_fold_addr_expr (t);
75a70cf9 3057 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
773c5ba7 3058
414c3a2c 3059 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3060 VEC_quick_push (tree, args, t2);
3061 VEC_quick_push (tree, args, t1);
3062 VEC_quick_push (tree, args, val);
3063 VEC_splice (tree, args, ws_args);
3064
3065 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
b9a16870 3066 builtin_decl_explicit (start_ix), args);
773c5ba7 3067
75a70cf9 3068 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3069 false, GSI_CONTINUE_LINKING);
1e8e9920 3070
75a70cf9 3071 t = gimple_omp_parallel_data_arg (entry_stmt);
1e8e9920 3072 if (t == NULL)
3073 t = null_pointer_node;
3074 else
3075 t = build_fold_addr_expr (t);
389dd41b 3076 t = build_call_expr_loc (gimple_location (entry_stmt),
3077 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
75a70cf9 3078 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3079 false, GSI_CONTINUE_LINKING);
1e8e9920 3080
389dd41b 3081 t = build_call_expr_loc (gimple_location (entry_stmt),
b9a16870 3082 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3083 0);
75a70cf9 3084 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3085 false, GSI_CONTINUE_LINKING);
1e8e9920 3086}
3087
773c5ba7 3088
fd6481cf 3089/* Build the function call to GOMP_task to actually
3090 generate the task operation. BB is the block where to insert the code. */
3091
3092static void
75a70cf9 3093expand_task_call (basic_block bb, gimple entry_stmt)
fd6481cf 3094{
2169f33b 3095 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
75a70cf9 3096 gimple_stmt_iterator gsi;
389dd41b 3097 location_t loc = gimple_location (entry_stmt);
fd6481cf 3098
75a70cf9 3099 clauses = gimple_omp_task_clauses (entry_stmt);
fd6481cf 3100
fd6481cf 3101 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3102 if (c)
3103 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3104 else
3105 cond = boolean_true_node;
3106
3107 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
2169f33b 3108 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3109 flags = build_int_cst (unsigned_type_node,
3110 (c ? 1 : 0) + (c2 ? 4 : 0));
3111
3112 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3113 if (c)
3114 {
3115 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3116 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3117 build_int_cst (unsigned_type_node, 2),
3118 build_int_cst (unsigned_type_node, 0));
3119 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3120 }
fd6481cf 3121
75a70cf9 3122 gsi = gsi_last_bb (bb);
3123 t = gimple_omp_task_data_arg (entry_stmt);
fd6481cf 3124 if (t == NULL)
3125 t2 = null_pointer_node;
3126 else
389dd41b 3127 t2 = build_fold_addr_expr_loc (loc, t);
3128 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
75a70cf9 3129 t = gimple_omp_task_copy_fn (entry_stmt);
fd6481cf 3130 if (t == NULL)
3131 t3 = null_pointer_node;
3132 else
389dd41b 3133 t3 = build_fold_addr_expr_loc (loc, t);
fd6481cf 3134
b9a16870 3135 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3136 7, t1, t2, t3,
75a70cf9 3137 gimple_omp_task_arg_size (entry_stmt),
3138 gimple_omp_task_arg_align (entry_stmt), cond, flags);
fd6481cf 3139
75a70cf9 3140 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3141 false, GSI_CONTINUE_LINKING);
fd6481cf 3142}
3143
3144
75a70cf9 3145/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3146 catch handler and return it. This prevents programs from violating the
3147 structured block semantics with throws. */
1e8e9920 3148
75a70cf9 3149static gimple_seq
3150maybe_catch_exception (gimple_seq body)
1e8e9920 3151{
e38def9c 3152 gimple g;
3153 tree decl;
1e8e9920 3154
3155 if (!flag_exceptions)
75a70cf9 3156 return body;
1e8e9920 3157
596981c8 3158 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3159 decl = lang_hooks.eh_protect_cleanup_actions ();
1e8e9920 3160 else
b9a16870 3161 decl = builtin_decl_explicit (BUILT_IN_TRAP);
75a70cf9 3162
e38def9c 3163 g = gimple_build_eh_must_not_throw (decl);
3164 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
75a70cf9 3165 GIMPLE_TRY_CATCH);
1e8e9920 3166
e38def9c 3167 return gimple_seq_alloc_with_stmt (g);
1e8e9920 3168}
3169
773c5ba7 3170/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
1e8e9920 3171
773c5ba7 3172static tree
2ab2ce89 3173vec2chain (VEC(tree,gc) *v)
1e8e9920 3174{
2ab2ce89 3175 tree chain = NULL_TREE, t;
3176 unsigned ix;
1e8e9920 3177
2ab2ce89 3178 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
773c5ba7 3179 {
1767a056 3180 DECL_CHAIN (t) = chain;
2ab2ce89 3181 chain = t;
773c5ba7 3182 }
1e8e9920 3183
2ab2ce89 3184 return chain;
773c5ba7 3185}
1e8e9920 3186
1e8e9920 3187
773c5ba7 3188/* Remove barriers in REGION->EXIT's block. Note that this is only
75a70cf9 3189 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3190 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3191 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
773c5ba7 3192 removed. */
1e8e9920 3193
773c5ba7 3194static void
3195remove_exit_barrier (struct omp_region *region)
3196{
75a70cf9 3197 gimple_stmt_iterator gsi;
773c5ba7 3198 basic_block exit_bb;
61e47ac8 3199 edge_iterator ei;
3200 edge e;
75a70cf9 3201 gimple stmt;
4a04f4b4 3202 int any_addressable_vars = -1;
1e8e9920 3203
61e47ac8 3204 exit_bb = region->exit;
1e8e9920 3205
5056ba1a 3206 /* If the parallel region doesn't return, we don't have REGION->EXIT
3207 block at all. */
3208 if (! exit_bb)
3209 return;
3210
75a70cf9 3211 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3212 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
61e47ac8 3213 statements that can appear in between are extremely limited -- no
3214 memory operations at all. Here, we allow nothing at all, so the
75a70cf9 3215 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3216 gsi = gsi_last_bb (exit_bb);
3217 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3218 gsi_prev (&gsi);
3219 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
773c5ba7 3220 return;
1e8e9920 3221
61e47ac8 3222 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3223 {
75a70cf9 3224 gsi = gsi_last_bb (e->src);
3225 if (gsi_end_p (gsi))
61e47ac8 3226 continue;
75a70cf9 3227 stmt = gsi_stmt (gsi);
4a04f4b4 3228 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3229 && !gimple_omp_return_nowait_p (stmt))
3230 {
3231 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3232 in many cases. If there could be tasks queued, the barrier
3233 might be needed to let the tasks run before some local
3234 variable of the parallel that the task uses as shared
3235 runs out of scope. The task can be spawned either
3236 from within current function (this would be easy to check)
3237 or from some function it calls and gets passed an address
3238 of such a variable. */
3239 if (any_addressable_vars < 0)
3240 {
3241 gimple parallel_stmt = last_stmt (region->entry);
3242 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
2ab2ce89 3243 tree local_decls, block, decl;
3244 unsigned ix;
4a04f4b4 3245
3246 any_addressable_vars = 0;
2ab2ce89 3247 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3248 if (TREE_ADDRESSABLE (decl))
4a04f4b4 3249 {
3250 any_addressable_vars = 1;
3251 break;
3252 }
3253 for (block = gimple_block (stmt);
3254 !any_addressable_vars
3255 && block
3256 && TREE_CODE (block) == BLOCK;
3257 block = BLOCK_SUPERCONTEXT (block))
3258 {
3259 for (local_decls = BLOCK_VARS (block);
3260 local_decls;
1767a056 3261 local_decls = DECL_CHAIN (local_decls))
4a04f4b4 3262 if (TREE_ADDRESSABLE (local_decls))
3263 {
3264 any_addressable_vars = 1;
3265 break;
3266 }
3267 if (block == gimple_block (parallel_stmt))
3268 break;
3269 }
3270 }
3271 if (!any_addressable_vars)
3272 gimple_omp_return_set_nowait (stmt);
3273 }
61e47ac8 3274 }
1e8e9920 3275}
3276
61e47ac8 3277static void
3278remove_exit_barriers (struct omp_region *region)
3279{
75a70cf9 3280 if (region->type == GIMPLE_OMP_PARALLEL)
61e47ac8 3281 remove_exit_barrier (region);
3282
3283 if (region->inner)
3284 {
3285 region = region->inner;
3286 remove_exit_barriers (region);
3287 while (region->next)
3288 {
3289 region = region->next;
3290 remove_exit_barriers (region);
3291 }
3292 }
3293}
773c5ba7 3294
658b4427 3295/* Optimize omp_get_thread_num () and omp_get_num_threads ()
3296 calls. These can't be declared as const functions, but
3297 within one parallel body they are constant, so they can be
3298 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
fd6481cf 3299 which are declared const. Similarly for task body, except
3300 that in untied task omp_get_thread_num () can change at any task
3301 scheduling point. */
658b4427 3302
3303static void
75a70cf9 3304optimize_omp_library_calls (gimple entry_stmt)
658b4427 3305{
3306 basic_block bb;
75a70cf9 3307 gimple_stmt_iterator gsi;
b9a16870 3308 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3309 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3310 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3311 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
75a70cf9 3312 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3313 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
fd6481cf 3314 OMP_CLAUSE_UNTIED) != NULL);
658b4427 3315
3316 FOR_EACH_BB (bb)
75a70cf9 3317 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
658b4427 3318 {
75a70cf9 3319 gimple call = gsi_stmt (gsi);
658b4427 3320 tree decl;
3321
75a70cf9 3322 if (is_gimple_call (call)
3323 && (decl = gimple_call_fndecl (call))
658b4427 3324 && DECL_EXTERNAL (decl)
3325 && TREE_PUBLIC (decl)
3326 && DECL_INITIAL (decl) == NULL)
3327 {
3328 tree built_in;
3329
3330 if (DECL_NAME (decl) == thr_num_id)
fd6481cf 3331 {
3332 /* In #pragma omp task untied omp_get_thread_num () can change
3333 during the execution of the task region. */
3334 if (untied_task)
3335 continue;
b9a16870 3336 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
fd6481cf 3337 }
658b4427 3338 else if (DECL_NAME (decl) == num_thr_id)
b9a16870 3339 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
658b4427 3340 else
3341 continue;
3342
3343 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
75a70cf9 3344 || gimple_call_num_args (call) != 0)
658b4427 3345 continue;
3346
3347 if (flag_exceptions && !TREE_NOTHROW (decl))
3348 continue;
3349
3350 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
1ea6a73c 3351 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3352 TREE_TYPE (TREE_TYPE (built_in))))
658b4427 3353 continue;
3354
0acacf9e 3355 gimple_call_set_fndecl (call, built_in);
658b4427 3356 }
3357 }
3358}
3359
fd6481cf 3360/* Expand the OpenMP parallel or task directive starting at REGION. */
1e8e9920 3361
3362static void
fd6481cf 3363expand_omp_taskreg (struct omp_region *region)
1e8e9920 3364{
773c5ba7 3365 basic_block entry_bb, exit_bb, new_bb;
87d4aa85 3366 struct function *child_cfun;
414c3a2c 3367 tree child_fn, block, t;
ba3a7ba0 3368 tree save_current;
75a70cf9 3369 gimple_stmt_iterator gsi;
3370 gimple entry_stmt, stmt;
773c5ba7 3371 edge e;
414c3a2c 3372 VEC(tree,gc) *ws_args;
773c5ba7 3373
61e47ac8 3374 entry_stmt = last_stmt (region->entry);
75a70cf9 3375 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
773c5ba7 3376 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
b3a3ddec 3377 /* If this function has been already instrumented, make sure
3378 the child function isn't instrumented again. */
3379 child_cfun->after_tree_profile = cfun->after_tree_profile;
773c5ba7 3380
61e47ac8 3381 entry_bb = region->entry;
3382 exit_bb = region->exit;
773c5ba7 3383
773c5ba7 3384 if (is_combined_parallel (region))
61e47ac8 3385 ws_args = region->ws_args;
773c5ba7 3386 else
414c3a2c 3387 ws_args = NULL;
1e8e9920 3388
61e47ac8 3389 if (child_cfun->cfg)
1e8e9920 3390 {
773c5ba7 3391 /* Due to inlining, it may happen that we have already outlined
3392 the region, in which case all we need to do is make the
3393 sub-graph unreachable and emit the parallel call. */
3394 edge entry_succ_e, exit_succ_e;
75a70cf9 3395 gimple_stmt_iterator gsi;
773c5ba7 3396
3397 entry_succ_e = single_succ_edge (entry_bb);
773c5ba7 3398
75a70cf9 3399 gsi = gsi_last_bb (entry_bb);
3400 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3401 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3402 gsi_remove (&gsi, true);
773c5ba7 3403
3404 new_bb = entry_bb;
03ed154b 3405 if (exit_bb)
3406 {
3407 exit_succ_e = single_succ_edge (exit_bb);
3408 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3409 }
79acaae1 3410 remove_edge_and_dominated_blocks (entry_succ_e);
1e8e9920 3411 }
773c5ba7 3412 else
3413 {
501bdd19 3414 unsigned srcidx, dstidx, num;
2ab2ce89 3415
773c5ba7 3416 /* If the parallel region needs data sent from the parent
3480139d 3417 function, then the very first statement (except possible
3418 tree profile counter updates) of the parallel body
773c5ba7 3419 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3420 &.OMP_DATA_O is passed as an argument to the child function,
3421 we need to replace it with the argument as seen by the child
3422 function.
3423
3424 In most cases, this will end up being the identity assignment
3425 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3426 a function call that has been inlined, the original PARM_DECL
3427 .OMP_DATA_I may have been converted into a different local
3428 variable. In which case, we need to keep the assignment. */
75a70cf9 3429 if (gimple_omp_taskreg_data_arg (entry_stmt))
773c5ba7 3430 {
3431 basic_block entry_succ_bb = single_succ (entry_bb);
75a70cf9 3432 gimple_stmt_iterator gsi;
3433 tree arg, narg;
3434 gimple parcopy_stmt = NULL;
1e8e9920 3435
75a70cf9 3436 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3480139d 3437 {
75a70cf9 3438 gimple stmt;
3480139d 3439
75a70cf9 3440 gcc_assert (!gsi_end_p (gsi));
3441 stmt = gsi_stmt (gsi);
3442 if (gimple_code (stmt) != GIMPLE_ASSIGN)
cc6b725b 3443 continue;
3444
75a70cf9 3445 if (gimple_num_ops (stmt) == 2)
3480139d 3446 {
75a70cf9 3447 tree arg = gimple_assign_rhs1 (stmt);
3448
3449 /* We're ignore the subcode because we're
3450 effectively doing a STRIP_NOPS. */
3451
3452 if (TREE_CODE (arg) == ADDR_EXPR
3453 && TREE_OPERAND (arg, 0)
3454 == gimple_omp_taskreg_data_arg (entry_stmt))
3455 {
3456 parcopy_stmt = stmt;
3457 break;
3458 }
3480139d 3459 }
3460 }
79acaae1 3461
75a70cf9 3462 gcc_assert (parcopy_stmt != NULL);
79acaae1 3463 arg = DECL_ARGUMENTS (child_fn);
3464
3465 if (!gimple_in_ssa_p (cfun))
3466 {
75a70cf9 3467 if (gimple_assign_lhs (parcopy_stmt) == arg)
3468 gsi_remove (&gsi, true);
79acaae1 3469 else
75a70cf9 3470 {
3471 /* ?? Is setting the subcode really necessary ?? */
3472 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3473 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3474 }
79acaae1 3475 }
3476 else
3477 {
3478 /* If we are in ssa form, we must load the value from the default
3479 definition of the argument. That should not be defined now,
3480 since the argument is not used uninitialized. */
3481 gcc_assert (gimple_default_def (cfun, arg) == NULL);
75a70cf9 3482 narg = make_ssa_name (arg, gimple_build_nop ());
79acaae1 3483 set_default_def (arg, narg);
75a70cf9 3484 /* ?? Is setting the subcode really necessary ?? */
3485 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3486 gimple_assign_set_rhs1 (parcopy_stmt, narg);
79acaae1 3487 update_stmt (parcopy_stmt);
3488 }
773c5ba7 3489 }
3490
3491 /* Declare local variables needed in CHILD_CFUN. */
3492 block = DECL_INITIAL (child_fn);
2ab2ce89 3493 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
e1a7ccb9 3494 /* The gimplifier could record temporaries in parallel/task block
3495 rather than in containing function's local_decls chain,
3496 which would mean cgraph missed finalizing them. Do it now. */
1767a056 3497 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
e1a7ccb9 3498 if (TREE_CODE (t) == VAR_DECL
3499 && TREE_STATIC (t)
3500 && !DECL_EXTERNAL (t))
3501 varpool_finalize_decl (t);
75a70cf9 3502 DECL_SAVED_TREE (child_fn) = NULL;
3503 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
1d22f541 3504 TREE_USED (block) = 1;
773c5ba7 3505
79acaae1 3506 /* Reset DECL_CONTEXT on function arguments. */
1767a056 3507 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
773c5ba7 3508 DECL_CONTEXT (t) = child_fn;
3509
75a70cf9 3510 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3511 so that it can be moved to the child function. */
3512 gsi = gsi_last_bb (entry_bb);
3513 stmt = gsi_stmt (gsi);
3514 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3515 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3516 gsi_remove (&gsi, true);
3517 e = split_block (entry_bb, stmt);
773c5ba7 3518 entry_bb = e->dest;
3519 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3520
75a70cf9 3521 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
5056ba1a 3522 if (exit_bb)
3523 {
75a70cf9 3524 gsi = gsi_last_bb (exit_bb);
3525 gcc_assert (!gsi_end_p (gsi)
3526 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3527 stmt = gimple_build_return (NULL);
3528 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3529 gsi_remove (&gsi, true);
5056ba1a 3530 }
79acaae1 3531
3532 /* Move the parallel region into CHILD_CFUN. */
48e1416a 3533
79acaae1 3534 if (gimple_in_ssa_p (cfun))
3535 {
3536 push_cfun (child_cfun);
bcaa2770 3537 init_tree_ssa (child_cfun);
79acaae1 3538 init_ssa_operands ();
3539 cfun->gimple_df->in_ssa_p = true;
3540 pop_cfun ();
1d22f541 3541 block = NULL_TREE;
79acaae1 3542 }
1d22f541 3543 else
75a70cf9 3544 block = gimple_block (entry_stmt);
1d22f541 3545
3546 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
79acaae1 3547 if (exit_bb)
3548 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3549
1d22f541 3550 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
501bdd19 3551 num = VEC_length (tree, child_cfun->local_decls);
3552 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3553 {
3554 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3555 if (DECL_CONTEXT (t) == cfun->decl)
3556 continue;
3557 if (srcidx != dstidx)
3558 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3559 dstidx++;
3560 }
3561 if (dstidx != num)
3562 VEC_truncate (tree, child_cfun->local_decls, dstidx);
1d22f541 3563
79acaae1 3564 /* Inform the callgraph about the new function. */
3565 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3566 = cfun->curr_properties;
3567 cgraph_add_new_function (child_fn, true);
3568
3569 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3570 fixed in a following pass. */
3571 push_cfun (child_cfun);
ba3a7ba0 3572 save_current = current_function_decl;
3573 current_function_decl = child_fn;
658b4427 3574 if (optimize)
fd6481cf 3575 optimize_omp_library_calls (entry_stmt);
79acaae1 3576 rebuild_cgraph_edges ();
fbe86b1b 3577
3578 /* Some EH regions might become dead, see PR34608. If
3579 pass_cleanup_cfg isn't the first pass to happen with the
3580 new child, these dead EH edges might cause problems.
3581 Clean them up now. */
3582 if (flag_exceptions)
3583 {
3584 basic_block bb;
fbe86b1b 3585 bool changed = false;
3586
fbe86b1b 3587 FOR_EACH_BB (bb)
75a70cf9 3588 changed |= gimple_purge_dead_eh_edges (bb);
fbe86b1b 3589 if (changed)
3590 cleanup_tree_cfg ();
fbe86b1b 3591 }
dd277d48 3592 if (gimple_in_ssa_p (cfun))
3593 update_ssa (TODO_update_ssa);
ba3a7ba0 3594 current_function_decl = save_current;
79acaae1 3595 pop_cfun ();
773c5ba7 3596 }
48e1416a 3597
773c5ba7 3598 /* Emit a library call to launch the children threads. */
75a70cf9 3599 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
fd6481cf 3600 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3601 else
3602 expand_task_call (new_bb, entry_stmt);
28c92cbb 3603 update_ssa (TODO_update_ssa_only_virtuals);
1e8e9920 3604}
3605
773c5ba7 3606
3607/* A subroutine of expand_omp_for. Generate code for a parallel
1e8e9920 3608 loop with any schedule. Given parameters:
3609
3610 for (V = N1; V cond N2; V += STEP) BODY;
3611
3612 where COND is "<" or ">", we generate pseudocode
3613
3614 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
773c5ba7 3615 if (more) goto L0; else goto L3;
1e8e9920 3616 L0:
3617 V = istart0;
3618 iend = iend0;
3619 L1:
3620 BODY;
3621 V += STEP;
773c5ba7 3622 if (V cond iend) goto L1; else goto L2;
1e8e9920 3623 L2:
773c5ba7 3624 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3625 L3:
1e8e9920 3626
773c5ba7 3627 If this is a combined omp parallel loop, instead of the call to
fd6481cf 3628 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3629
3630 For collapsed loops, given parameters:
3631 collapse(3)
3632 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3633 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3634 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3635 BODY;
3636
3637 we generate pseudocode
3638
3639 if (cond3 is <)
3640 adj = STEP3 - 1;
3641 else
3642 adj = STEP3 + 1;
3643 count3 = (adj + N32 - N31) / STEP3;
3644 if (cond2 is <)
3645 adj = STEP2 - 1;
3646 else
3647 adj = STEP2 + 1;
3648 count2 = (adj + N22 - N21) / STEP2;
3649 if (cond1 is <)
3650 adj = STEP1 - 1;
3651 else
3652 adj = STEP1 + 1;
3653 count1 = (adj + N12 - N11) / STEP1;
3654 count = count1 * count2 * count3;
3655 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3656 if (more) goto L0; else goto L3;
3657 L0:
3658 V = istart0;
3659 T = V;
3660 V3 = N31 + (T % count3) * STEP3;
3661 T = T / count3;
3662 V2 = N21 + (T % count2) * STEP2;
3663 T = T / count2;
3664 V1 = N11 + T * STEP1;
3665 iend = iend0;
3666 L1:
3667 BODY;
3668 V += 1;
3669 if (V < iend) goto L10; else goto L2;
3670 L10:
3671 V3 += STEP3;
3672 if (V3 cond3 N32) goto L1; else goto L11;
3673 L11:
3674 V3 = N31;
3675 V2 += STEP2;
3676 if (V2 cond2 N22) goto L1; else goto L12;
3677 L12:
3678 V2 = N21;
3679 V1 += STEP1;
3680 goto L1;
3681 L2:
3682 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3683 L3:
3684
3685 */
1e8e9920 3686
61e47ac8 3687static void
773c5ba7 3688expand_omp_for_generic (struct omp_region *region,
3689 struct omp_for_data *fd,
1e8e9920 3690 enum built_in_function start_fn,
3691 enum built_in_function next_fn)
3692{
75a70cf9 3693 tree type, istart0, iend0, iend;
fd6481cf 3694 tree t, vmain, vback, bias = NULL_TREE;
3695 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
03ed154b 3696 basic_block l2_bb = NULL, l3_bb = NULL;
75a70cf9 3697 gimple_stmt_iterator gsi;
3698 gimple stmt;
773c5ba7 3699 bool in_combined_parallel = is_combined_parallel (region);
ac6e3339 3700 bool broken_loop = region->cont == NULL;
79acaae1 3701 edge e, ne;
fd6481cf 3702 tree *counts = NULL;
3703 int i;
ac6e3339 3704
3705 gcc_assert (!broken_loop || !in_combined_parallel);
fd6481cf 3706 gcc_assert (fd->iter_type == long_integer_type_node
3707 || !in_combined_parallel);
1e8e9920 3708
fd6481cf 3709 type = TREE_TYPE (fd->loop.v);
3710 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3711 iend0 = create_tmp_var (fd->iter_type, ".iend0");
6d63fc03 3712 TREE_ADDRESSABLE (istart0) = 1;
3713 TREE_ADDRESSABLE (iend0) = 1;
79acaae1 3714 if (gimple_in_ssa_p (cfun))
3715 {
3716 add_referenced_var (istart0);
3717 add_referenced_var (iend0);
3718 }
1e8e9920 3719
fd6481cf 3720 /* See if we need to bias by LLONG_MIN. */
3721 if (fd->iter_type == long_long_unsigned_type_node
3722 && TREE_CODE (type) == INTEGER_TYPE
3723 && !TYPE_UNSIGNED (type))
3724 {
3725 tree n1, n2;
3726
3727 if (fd->loop.cond_code == LT_EXPR)
3728 {
3729 n1 = fd->loop.n1;
3730 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3731 }
3732 else
3733 {
3734 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3735 n2 = fd->loop.n1;
3736 }
3737 if (TREE_CODE (n1) != INTEGER_CST
3738 || TREE_CODE (n2) != INTEGER_CST
3739 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3740 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3741 }
3742
61e47ac8 3743 entry_bb = region->entry;
03ed154b 3744 cont_bb = region->cont;
fd6481cf 3745 collapse_bb = NULL;
ac6e3339 3746 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3747 gcc_assert (broken_loop
3748 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3749 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3750 l1_bb = single_succ (l0_bb);
3751 if (!broken_loop)
03ed154b 3752 {
3753 l2_bb = create_empty_bb (cont_bb);
ac6e3339 3754 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3755 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
03ed154b 3756 }
ac6e3339 3757 else
3758 l2_bb = NULL;
3759 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3760 exit_bb = region->exit;
773c5ba7 3761
75a70cf9 3762 gsi = gsi_last_bb (entry_bb);
fd6481cf 3763
75a70cf9 3764 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
fd6481cf 3765 if (fd->collapse > 1)
3766 {
3767 /* collapsed loops need work for expansion in SSA form. */
3768 gcc_assert (!gimple_in_ssa_p (cfun));
3769 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3770 for (i = 0; i < fd->collapse; i++)
3771 {
3772 tree itype = TREE_TYPE (fd->loops[i].v);
3773
3774 if (POINTER_TYPE_P (itype))
3775 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3776 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3777 ? -1 : 1));
3778 t = fold_build2 (PLUS_EXPR, itype,
3779 fold_convert (itype, fd->loops[i].step), t);
3780 t = fold_build2 (PLUS_EXPR, itype, t,
3781 fold_convert (itype, fd->loops[i].n2));
3782 t = fold_build2 (MINUS_EXPR, itype, t,
3783 fold_convert (itype, fd->loops[i].n1));
3784 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3785 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3786 fold_build1 (NEGATE_EXPR, itype, t),
3787 fold_build1 (NEGATE_EXPR, itype,
3788 fold_convert (itype,
3789 fd->loops[i].step)));
3790 else
3791 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3792 fold_convert (itype, fd->loops[i].step));
3793 t = fold_convert (type, t);
3794 if (TREE_CODE (t) == INTEGER_CST)
3795 counts[i] = t;
3796 else
3797 {
3798 counts[i] = create_tmp_var (type, ".count");
75a70cf9 3799 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3800 true, GSI_SAME_STMT);
3801 stmt = gimple_build_assign (counts[i], t);
3802 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
fd6481cf 3803 }
3804 if (SSA_VAR_P (fd->loop.n2))
3805 {
3806 if (i == 0)
75a70cf9 3807 t = counts[0];
fd6481cf 3808 else
3809 {
3810 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
75a70cf9 3811 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3812 true, GSI_SAME_STMT);
fd6481cf 3813 }
75a70cf9 3814 stmt = gimple_build_assign (fd->loop.n2, t);
3815 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
fd6481cf 3816 }
3817 }
3818 }
79acaae1 3819 if (in_combined_parallel)
3820 {
3821 /* In a combined parallel loop, emit a call to
3822 GOMP_loop_foo_next. */
b9a16870 3823 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
79acaae1 3824 build_fold_addr_expr (istart0),
3825 build_fold_addr_expr (iend0));
3826 }
3827 else
1e8e9920 3828 {
c2f47e15 3829 tree t0, t1, t2, t3, t4;
773c5ba7 3830 /* If this is not a combined parallel loop, emit a call to
3831 GOMP_loop_foo_start in ENTRY_BB. */
c2f47e15 3832 t4 = build_fold_addr_expr (iend0);
3833 t3 = build_fold_addr_expr (istart0);
fd6481cf 3834 t2 = fold_convert (fd->iter_type, fd->loop.step);
c799f233 3835 if (POINTER_TYPE_P (type)
3836 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3837 {
3838 /* Avoid casting pointers to integer of a different size. */
3839 tree itype
3840 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3841 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3842 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3843 }
3844 else
3845 {
3846 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3847 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3848 }
fd6481cf 3849 if (bias)
1e8e9920 3850 {
fd6481cf 3851 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3852 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3853 }
3854 if (fd->iter_type == long_integer_type_node)
3855 {
3856 if (fd->chunk_size)
3857 {
3858 t = fold_convert (fd->iter_type, fd->chunk_size);
b9a16870 3859 t = build_call_expr (builtin_decl_explicit (start_fn),
3860 6, t0, t1, t2, t, t3, t4);
fd6481cf 3861 }
3862 else
b9a16870 3863 t = build_call_expr (builtin_decl_explicit (start_fn),
3864 5, t0, t1, t2, t3, t4);
1e8e9920 3865 }
c2f47e15 3866 else
fd6481cf 3867 {
3868 tree t5;
3869 tree c_bool_type;
b9a16870 3870 tree bfn_decl;
fd6481cf 3871
3872 /* The GOMP_loop_ull_*start functions have additional boolean
3873 argument, true for < loops and false for > loops.
3874 In Fortran, the C bool type can be different from
3875 boolean_type_node. */
b9a16870 3876 bfn_decl = builtin_decl_explicit (start_fn);
3877 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
fd6481cf 3878 t5 = build_int_cst (c_bool_type,
3879 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3880 if (fd->chunk_size)
3881 {
b9a16870 3882 tree bfn_decl = builtin_decl_explicit (start_fn);
fd6481cf 3883 t = fold_convert (fd->iter_type, fd->chunk_size);
b9a16870 3884 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
fd6481cf 3885 }
3886 else
b9a16870 3887 t = build_call_expr (builtin_decl_explicit (start_fn),
3888 6, t5, t0, t1, t2, t3, t4);
fd6481cf 3889 }
1e8e9920 3890 }
fd6481cf 3891 if (TREE_TYPE (t) != boolean_type_node)
3892 t = fold_build2 (NE_EXPR, boolean_type_node,
3893 t, build_int_cst (TREE_TYPE (t), 0));
75a70cf9 3894 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3895 true, GSI_SAME_STMT);
3896 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
79acaae1 3897
75a70cf9 3898 /* Remove the GIMPLE_OMP_FOR statement. */
3899 gsi_remove (&gsi, true);
1e8e9920 3900
773c5ba7 3901 /* Iteration setup for sequential loop goes in L0_BB. */
75a70cf9 3902 gsi = gsi_start_bb (l0_bb);
1efcacec 3903 t = istart0;
fd6481cf 3904 if (bias)
1efcacec 3905 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3906 if (POINTER_TYPE_P (type))
3907 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3908 0), t);
3909 t = fold_convert (type, t);
75a70cf9 3910 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3911 false, GSI_CONTINUE_LINKING);
3912 stmt = gimple_build_assign (fd->loop.v, t);
3913 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1e8e9920 3914
1efcacec 3915 t = iend0;
fd6481cf 3916 if (bias)
1efcacec 3917 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3918 if (POINTER_TYPE_P (type))
3919 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3920 0), t);
3921 t = fold_convert (type, t);
75a70cf9 3922 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3923 false, GSI_CONTINUE_LINKING);
fd6481cf 3924 if (fd->collapse > 1)
3925 {
3926 tree tem = create_tmp_var (type, ".tem");
3927
75a70cf9 3928 stmt = gimple_build_assign (tem, fd->loop.v);
3929 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3930 for (i = fd->collapse - 1; i >= 0; i--)
3931 {
3932 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3933 itype = vtype;
3934 if (POINTER_TYPE_P (vtype))
3935 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3936 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3937 t = fold_convert (itype, t);
c821ef7d 3938 t = fold_build2 (MULT_EXPR, itype, t,
3939 fold_convert (itype, fd->loops[i].step));
fd6481cf 3940 if (POINTER_TYPE_P (vtype))
2cc66f2a 3941 t = fold_build_pointer_plus (fd->loops[i].n1, t);
fd6481cf 3942 else
3943 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
75a70cf9 3944 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3945 false, GSI_CONTINUE_LINKING);
3946 stmt = gimple_build_assign (fd->loops[i].v, t);
3947 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3948 if (i != 0)
3949 {
3950 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
75a70cf9 3951 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3952 false, GSI_CONTINUE_LINKING);
3953 stmt = gimple_build_assign (tem, t);
3954 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3955 }
3956 }
3957 }
773c5ba7 3958
ac6e3339 3959 if (!broken_loop)
03ed154b 3960 {
ac6e3339 3961 /* Code to control the increment and predicate for the sequential
3962 loop goes in the CONT_BB. */
75a70cf9 3963 gsi = gsi_last_bb (cont_bb);
3964 stmt = gsi_stmt (gsi);
3965 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3966 vmain = gimple_omp_continue_control_use (stmt);
3967 vback = gimple_omp_continue_control_def (stmt);
79acaae1 3968
fd6481cf 3969 if (POINTER_TYPE_P (type))
2cc66f2a 3970 t = fold_build_pointer_plus (vmain, fd->loop.step);
fd6481cf 3971 else
3972 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
75a70cf9 3973 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3974 true, GSI_SAME_STMT);
3975 stmt = gimple_build_assign (vback, t);
3976 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3977
fd6481cf 3978 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
75a70cf9 3979 stmt = gimple_build_cond_empty (t);
3980 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
773c5ba7 3981
75a70cf9 3982 /* Remove GIMPLE_OMP_CONTINUE. */
3983 gsi_remove (&gsi, true);
773c5ba7 3984
fd6481cf 3985 if (fd->collapse > 1)
3986 {
3987 basic_block last_bb, bb;
3988
3989 last_bb = cont_bb;
3990 for (i = fd->collapse - 1; i >= 0; i--)
3991 {
3992 tree vtype = TREE_TYPE (fd->loops[i].v);
3993
3994 bb = create_empty_bb (last_bb);
75a70cf9 3995 gsi = gsi_start_bb (bb);
fd6481cf 3996
3997 if (i < fd->collapse - 1)
3998 {
3999 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
4000 e->probability = REG_BR_PROB_BASE / 8;
4001
75a70cf9 4002 t = fd->loops[i + 1].n1;
4003 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4004 false, GSI_CONTINUE_LINKING);
4005 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4006 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 4007 }
4008 else
4009 collapse_bb = bb;
4010
4011 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4012
4013 if (POINTER_TYPE_P (vtype))
2cc66f2a 4014 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
fd6481cf 4015 else
4016 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
4017 fd->loops[i].step);
75a70cf9 4018 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4019 false, GSI_CONTINUE_LINKING);
4020 stmt = gimple_build_assign (fd->loops[i].v, t);
4021 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 4022
4023 if (i > 0)
4024 {
75a70cf9 4025 t = fd->loops[i].n2;
4026 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4027 false, GSI_CONTINUE_LINKING);
fd6481cf 4028 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
75a70cf9 4029 fd->loops[i].v, t);
4030 stmt = gimple_build_cond_empty (t);
4031 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 4032 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4033 e->probability = REG_BR_PROB_BASE * 7 / 8;
4034 }
4035 else
4036 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4037 last_bb = bb;
4038 }
4039 }
4040
ac6e3339 4041 /* Emit code to get the next parallel iteration in L2_BB. */
75a70cf9 4042 gsi = gsi_start_bb (l2_bb);
773c5ba7 4043
b9a16870 4044 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
ac6e3339 4045 build_fold_addr_expr (istart0),
4046 build_fold_addr_expr (iend0));
75a70cf9 4047 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4048 false, GSI_CONTINUE_LINKING);
fd6481cf 4049 if (TREE_TYPE (t) != boolean_type_node)
4050 t = fold_build2 (NE_EXPR, boolean_type_node,
4051 t, build_int_cst (TREE_TYPE (t), 0));
75a70cf9 4052 stmt = gimple_build_cond_empty (t);
4053 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
ac6e3339 4054 }
1e8e9920 4055
61e47ac8 4056 /* Add the loop cleanup function. */
75a70cf9 4057 gsi = gsi_last_bb (exit_bb);
4058 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
b9a16870 4059 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
61e47ac8 4060 else
b9a16870 4061 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
75a70cf9 4062 stmt = gimple_build_call (t, 0);
4063 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4064 gsi_remove (&gsi, true);
773c5ba7 4065
4066 /* Connect the new blocks. */
79acaae1 4067 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4068 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
1e8e9920 4069
ac6e3339 4070 if (!broken_loop)
4071 {
75a70cf9 4072 gimple_seq phis;
4073
79acaae1 4074 e = find_edge (cont_bb, l3_bb);
4075 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4076
75a70cf9 4077 phis = phi_nodes (l3_bb);
4078 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4079 {
4080 gimple phi = gsi_stmt (gsi);
4081 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4082 PHI_ARG_DEF_FROM_EDGE (phi, e));
4083 }
79acaae1 4084 remove_edge (e);
4085
ac6e3339 4086 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
fd6481cf 4087 if (fd->collapse > 1)
4088 {
4089 e = find_edge (cont_bb, l1_bb);
4090 remove_edge (e);
4091 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4092 }
4093 else
4094 {
4095 e = find_edge (cont_bb, l1_bb);
4096 e->flags = EDGE_TRUE_VALUE;
4097 }
4098 e->probability = REG_BR_PROB_BASE * 7 / 8;
4099 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
ac6e3339 4100 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
79acaae1 4101
4102 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4103 recompute_dominator (CDI_DOMINATORS, l2_bb));
4104 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4105 recompute_dominator (CDI_DOMINATORS, l3_bb));
4106 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4107 recompute_dominator (CDI_DOMINATORS, l0_bb));
4108 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4109 recompute_dominator (CDI_DOMINATORS, l1_bb));
ac6e3339 4110 }
1e8e9920 4111}
4112
4113
773c5ba7 4114/* A subroutine of expand_omp_for. Generate code for a parallel
4115 loop with static schedule and no specified chunk size. Given
4116 parameters:
1e8e9920 4117
4118 for (V = N1; V cond N2; V += STEP) BODY;
4119
4120 where COND is "<" or ">", we generate pseudocode
4121
4122 if (cond is <)
4123 adj = STEP - 1;
4124 else
4125 adj = STEP + 1;
fd6481cf 4126 if ((__typeof (V)) -1 > 0 && cond is >)
4127 n = -(adj + N2 - N1) / -STEP;
4128 else
4129 n = (adj + N2 - N1) / STEP;
1e8e9920 4130 q = n / nthreads;
31712e83 4131 tt = n % nthreads;
4132 if (threadid < tt) goto L3; else goto L4;
4133 L3:
4134 tt = 0;
4135 q = q + 1;
4136 L4:
4137 s0 = q * threadid + tt;
4138 e0 = s0 + q;
79acaae1 4139 V = s0 * STEP + N1;
1e8e9920 4140 if (s0 >= e0) goto L2; else goto L0;
4141 L0:
1e8e9920 4142 e = e0 * STEP + N1;
4143 L1:
4144 BODY;
4145 V += STEP;
4146 if (V cond e) goto L1;
1e8e9920 4147 L2:
4148*/
4149
61e47ac8 4150static void
773c5ba7 4151expand_omp_for_static_nochunk (struct omp_region *region,
4152 struct omp_for_data *fd)
1e8e9920 4153{
31712e83 4154 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
fd6481cf 4155 tree type, itype, vmain, vback;
31712e83 4156 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4157 basic_block body_bb, cont_bb;
61e47ac8 4158 basic_block fin_bb;
75a70cf9 4159 gimple_stmt_iterator gsi;
4160 gimple stmt;
31712e83 4161 edge ep;
1e8e9920 4162
fd6481cf 4163 itype = type = TREE_TYPE (fd->loop.v);
4164 if (POINTER_TYPE_P (type))
4165 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
1e8e9920 4166
61e47ac8 4167 entry_bb = region->entry;
61e47ac8 4168 cont_bb = region->cont;
ac6e3339 4169 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4170 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4171 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4172 body_bb = single_succ (seq_start_bb);
4173 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4174 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4175 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
61e47ac8 4176 exit_bb = region->exit;
4177
773c5ba7 4178 /* Iteration space partitioning goes in ENTRY_BB. */
75a70cf9 4179 gsi = gsi_last_bb (entry_bb);
4180 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
61e47ac8 4181
b9a16870 4182 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
fd6481cf 4183 t = fold_convert (itype, t);
75a70cf9 4184 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4185 true, GSI_SAME_STMT);
48e1416a 4186
b9a16870 4187 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
fd6481cf 4188 t = fold_convert (itype, t);
75a70cf9 4189 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4190 true, GSI_SAME_STMT);
1e8e9920 4191
fd6481cf 4192 fd->loop.n1
75a70cf9 4193 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4194 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4195 fd->loop.n2
75a70cf9 4196 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4197 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4198 fd->loop.step
75a70cf9 4199 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4200 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4201
4202 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4203 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4204 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4205 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4206 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4207 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4208 fold_build1 (NEGATE_EXPR, itype, t),
4209 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4210 else
4211 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4212 t = fold_convert (itype, t);
75a70cf9 4213 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 4214
31712e83 4215 q = create_tmp_var (itype, "q");
fd6481cf 4216 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
31712e83 4217 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4218 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4219
4220 tt = create_tmp_var (itype, "tt");
4221 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4222 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4223 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
1e8e9920 4224
31712e83 4225 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4226 stmt = gimple_build_cond_empty (t);
4227 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4228
4229 second_bb = split_block (entry_bb, stmt)->dest;
4230 gsi = gsi_last_bb (second_bb);
4231 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4232
4233 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4234 GSI_SAME_STMT);
4235 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4236 build_int_cst (itype, 1));
4237 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4238
4239 third_bb = split_block (second_bb, stmt)->dest;
4240 gsi = gsi_last_bb (third_bb);
4241 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
1e8e9920 4242
fd6481cf 4243 t = build2 (MULT_EXPR, itype, q, threadid);
31712e83 4244 t = build2 (PLUS_EXPR, itype, t, tt);
75a70cf9 4245 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 4246
fd6481cf 4247 t = fold_build2 (PLUS_EXPR, itype, s0, q);
75a70cf9 4248 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
79acaae1 4249
1e8e9920 4250 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
75a70cf9 4251 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
773c5ba7 4252
75a70cf9 4253 /* Remove the GIMPLE_OMP_FOR statement. */
4254 gsi_remove (&gsi, true);
773c5ba7 4255
4256 /* Setup code for sequential iteration goes in SEQ_START_BB. */
75a70cf9 4257 gsi = gsi_start_bb (seq_start_bb);
1e8e9920 4258
fd6481cf 4259 t = fold_convert (itype, s0);
4260 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4261 if (POINTER_TYPE_P (type))
2cc66f2a 4262 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4263 else
4264 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4265 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4266 false, GSI_CONTINUE_LINKING);
4267 stmt = gimple_build_assign (fd->loop.v, t);
4268 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
48e1416a 4269
fd6481cf 4270 t = fold_convert (itype, e0);
4271 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4272 if (POINTER_TYPE_P (type))
2cc66f2a 4273 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4274 else
4275 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4276 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4277 false, GSI_CONTINUE_LINKING);
1e8e9920 4278
75a70cf9 4279 /* The code controlling the sequential loop replaces the
4280 GIMPLE_OMP_CONTINUE. */
4281 gsi = gsi_last_bb (cont_bb);
4282 stmt = gsi_stmt (gsi);
4283 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4284 vmain = gimple_omp_continue_control_use (stmt);
4285 vback = gimple_omp_continue_control_def (stmt);
79acaae1 4286
fd6481cf 4287 if (POINTER_TYPE_P (type))
2cc66f2a 4288 t = fold_build_pointer_plus (vmain, fd->loop.step);
fd6481cf 4289 else
4290 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
75a70cf9 4291 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4292 true, GSI_SAME_STMT);
4293 stmt = gimple_build_assign (vback, t);
4294 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
79acaae1 4295
fd6481cf 4296 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
75a70cf9 4297 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
1e8e9920 4298
75a70cf9 4299 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4300 gsi_remove (&gsi, true);
773c5ba7 4301
75a70cf9 4302 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4303 gsi = gsi_last_bb (exit_bb);
4304 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4305 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4306 false, GSI_SAME_STMT);
4307 gsi_remove (&gsi, true);
773c5ba7 4308
4309 /* Connect all the blocks. */
31712e83 4310 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4311 ep->probability = REG_BR_PROB_BASE / 4 * 3;
4312 ep = find_edge (entry_bb, second_bb);
4313 ep->flags = EDGE_TRUE_VALUE;
4314 ep->probability = REG_BR_PROB_BASE / 4;
4315 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4316 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
79acaae1 4317
ac6e3339 4318 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
61e47ac8 4319 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
48e1416a 4320
31712e83 4321 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4322 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4323 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
79acaae1 4324 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4325 recompute_dominator (CDI_DOMINATORS, body_bb));
4326 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4327 recompute_dominator (CDI_DOMINATORS, fin_bb));
1e8e9920 4328}
4329
773c5ba7 4330
4331/* A subroutine of expand_omp_for. Generate code for a parallel
4332 loop with static schedule and a specified chunk size. Given
4333 parameters:
1e8e9920 4334
4335 for (V = N1; V cond N2; V += STEP) BODY;
4336
4337 where COND is "<" or ">", we generate pseudocode
4338
4339 if (cond is <)
4340 adj = STEP - 1;
4341 else
4342 adj = STEP + 1;
fd6481cf 4343 if ((__typeof (V)) -1 > 0 && cond is >)
4344 n = -(adj + N2 - N1) / -STEP;
4345 else
4346 n = (adj + N2 - N1) / STEP;
1e8e9920 4347 trip = 0;
79acaae1 4348 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4349 here so that V is defined
4350 if the loop is not entered
1e8e9920 4351 L0:
4352 s0 = (trip * nthreads + threadid) * CHUNK;
4353 e0 = min(s0 + CHUNK, n);
4354 if (s0 < n) goto L1; else goto L4;
4355 L1:
4356 V = s0 * STEP + N1;
4357 e = e0 * STEP + N1;
4358 L2:
4359 BODY;
4360 V += STEP;
4361 if (V cond e) goto L2; else goto L3;
4362 L3:
4363 trip += 1;
4364 goto L0;
4365 L4:
1e8e9920 4366*/
4367
61e47ac8 4368static void
75a70cf9 4369expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
1e8e9920 4370{
75a70cf9 4371 tree n, s0, e0, e, t;
79acaae1 4372 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
75a70cf9 4373 tree type, itype, v_main, v_back, v_extra;
773c5ba7 4374 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
61e47ac8 4375 basic_block trip_update_bb, cont_bb, fin_bb;
75a70cf9 4376 gimple_stmt_iterator si;
4377 gimple stmt;
4378 edge se;
1e8e9920 4379
fd6481cf 4380 itype = type = TREE_TYPE (fd->loop.v);
4381 if (POINTER_TYPE_P (type))
4382 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
1e8e9920 4383
61e47ac8 4384 entry_bb = region->entry;
ac6e3339 4385 se = split_block (entry_bb, last_stmt (entry_bb));
4386 entry_bb = se->src;
4387 iter_part_bb = se->dest;
61e47ac8 4388 cont_bb = region->cont;
ac6e3339 4389 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4390 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4391 == FALLTHRU_EDGE (cont_bb)->dest);
4392 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4393 body_bb = single_succ (seq_start_bb);
4394 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4395 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4396 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4397 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
61e47ac8 4398 exit_bb = region->exit;
773c5ba7 4399
773c5ba7 4400 /* Trip and adjustment setup goes in ENTRY_BB. */
75a70cf9 4401 si = gsi_last_bb (entry_bb);
4402 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
773c5ba7 4403
b9a16870 4404 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
fd6481cf 4405 t = fold_convert (itype, t);
75a70cf9 4406 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4407 true, GSI_SAME_STMT);
48e1416a 4408
b9a16870 4409 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
fd6481cf 4410 t = fold_convert (itype, t);
75a70cf9 4411 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4412 true, GSI_SAME_STMT);
79acaae1 4413
fd6481cf 4414 fd->loop.n1
75a70cf9 4415 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4416 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4417 fd->loop.n2
75a70cf9 4418 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4419 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4420 fd->loop.step
75a70cf9 4421 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4422 true, NULL_TREE, true, GSI_SAME_STMT);
79acaae1 4423 fd->chunk_size
75a70cf9 4424 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4425 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4426
4427 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4428 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4429 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4430 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4431 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4432 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4433 fold_build1 (NEGATE_EXPR, itype, t),
4434 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4435 else
4436 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4437 t = fold_convert (itype, t);
75a70cf9 4438 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4439 true, GSI_SAME_STMT);
79acaae1 4440
fd6481cf 4441 trip_var = create_tmp_var (itype, ".trip");
79acaae1 4442 if (gimple_in_ssa_p (cfun))
4443 {
4444 add_referenced_var (trip_var);
75a70cf9 4445 trip_init = make_ssa_name (trip_var, NULL);
4446 trip_main = make_ssa_name (trip_var, NULL);
4447 trip_back = make_ssa_name (trip_var, NULL);
79acaae1 4448 }
1e8e9920 4449 else
79acaae1 4450 {
4451 trip_init = trip_var;
4452 trip_main = trip_var;
4453 trip_back = trip_var;
4454 }
1e8e9920 4455
75a70cf9 4456 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4457 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
773c5ba7 4458
fd6481cf 4459 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4460 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4461 if (POINTER_TYPE_P (type))
2cc66f2a 4462 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4463 else
4464 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4465 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4466 true, GSI_SAME_STMT);
79acaae1 4467
75a70cf9 4468 /* Remove the GIMPLE_OMP_FOR. */
4469 gsi_remove (&si, true);
773c5ba7 4470
4471 /* Iteration space partitioning goes in ITER_PART_BB. */
75a70cf9 4472 si = gsi_last_bb (iter_part_bb);
1e8e9920 4473
fd6481cf 4474 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4475 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4476 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
75a70cf9 4477 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4478 false, GSI_CONTINUE_LINKING);
1e8e9920 4479
fd6481cf 4480 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4481 t = fold_build2 (MIN_EXPR, itype, t, n);
75a70cf9 4482 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4483 false, GSI_CONTINUE_LINKING);
1e8e9920 4484
4485 t = build2 (LT_EXPR, boolean_type_node, s0, n);
75a70cf9 4486 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
773c5ba7 4487
4488 /* Setup code for sequential iteration goes in SEQ_START_BB. */
75a70cf9 4489 si = gsi_start_bb (seq_start_bb);
1e8e9920 4490
fd6481cf 4491 t = fold_convert (itype, s0);
4492 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4493 if (POINTER_TYPE_P (type))
2cc66f2a 4494 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4495 else
4496 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4497 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4498 false, GSI_CONTINUE_LINKING);
4499 stmt = gimple_build_assign (fd->loop.v, t);
4500 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
1e8e9920 4501
fd6481cf 4502 t = fold_convert (itype, e0);
4503 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4504 if (POINTER_TYPE_P (type))
2cc66f2a 4505 t = fold_build_pointer_plus (fd->loop.n1, t);
fd6481cf 4506 else
4507 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4508 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4509 false, GSI_CONTINUE_LINKING);
1e8e9920 4510
61e47ac8 4511 /* The code controlling the sequential loop goes in CONT_BB,
75a70cf9 4512 replacing the GIMPLE_OMP_CONTINUE. */
4513 si = gsi_last_bb (cont_bb);
4514 stmt = gsi_stmt (si);
4515 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4516 v_main = gimple_omp_continue_control_use (stmt);
4517 v_back = gimple_omp_continue_control_def (stmt);
79acaae1 4518
fd6481cf 4519 if (POINTER_TYPE_P (type))
2cc66f2a 4520 t = fold_build_pointer_plus (v_main, fd->loop.step);
fd6481cf 4521 else
75a70cf9 4522 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4523 stmt = gimple_build_assign (v_back, t);
4524 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
79acaae1 4525
fd6481cf 4526 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
75a70cf9 4527 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
48e1416a 4528
75a70cf9 4529 /* Remove GIMPLE_OMP_CONTINUE. */
4530 gsi_remove (&si, true);
773c5ba7 4531
4532 /* Trip update code goes into TRIP_UPDATE_BB. */
75a70cf9 4533 si = gsi_start_bb (trip_update_bb);
1e8e9920 4534
fd6481cf 4535 t = build_int_cst (itype, 1);
4536 t = build2 (PLUS_EXPR, itype, trip_main, t);
75a70cf9 4537 stmt = gimple_build_assign (trip_back, t);
4538 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
1e8e9920 4539
75a70cf9 4540 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4541 si = gsi_last_bb (exit_bb);
4542 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4543 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4544 false, GSI_SAME_STMT);
4545 gsi_remove (&si, true);
1e8e9920 4546
773c5ba7 4547 /* Connect the new blocks. */
ac6e3339 4548 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4549 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
79acaae1 4550
ac6e3339 4551 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4552 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
79acaae1 4553
ac6e3339 4554 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
79acaae1 4555
4556 if (gimple_in_ssa_p (cfun))
4557 {
75a70cf9 4558 gimple_stmt_iterator psi;
4559 gimple phi;
4560 edge re, ene;
4561 edge_var_map_vector head;
4562 edge_var_map *vm;
4563 size_t i;
4564
79acaae1 4565 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4566 remove arguments of the phi nodes in fin_bb. We need to create
4567 appropriate phi nodes in iter_part_bb instead. */
4568 se = single_pred_edge (fin_bb);
4569 re = single_succ_edge (trip_update_bb);
75a70cf9 4570 head = redirect_edge_var_map_vector (re);
79acaae1 4571 ene = single_succ_edge (entry_bb);
4572
75a70cf9 4573 psi = gsi_start_phis (fin_bb);
4574 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4575 gsi_next (&psi), ++i)
79acaae1 4576 {
75a70cf9 4577 gimple nphi;
efbcb6de 4578 source_location locus;
75a70cf9 4579
4580 phi = gsi_stmt (psi);
4581 t = gimple_phi_result (phi);
4582 gcc_assert (t == redirect_edge_var_map_result (vm));
79acaae1 4583 nphi = create_phi_node (t, iter_part_bb);
4584 SSA_NAME_DEF_STMT (t) = nphi;
4585
4586 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
efbcb6de 4587 locus = gimple_phi_arg_location_from_edge (phi, se);
4588
fd6481cf 4589 /* A special case -- fd->loop.v is not yet computed in
4590 iter_part_bb, we need to use v_extra instead. */
4591 if (t == fd->loop.v)
79acaae1 4592 t = v_extra;
efbcb6de 4593 add_phi_arg (nphi, t, ene, locus);
4594 locus = redirect_edge_var_map_location (vm);
4595 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
75a70cf9 4596 }
4597 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4598 redirect_edge_var_map_clear (re);
4599 while (1)
4600 {
4601 psi = gsi_start_phis (fin_bb);
4602 if (gsi_end_p (psi))
4603 break;
4604 remove_phi_node (&psi, false);
79acaae1 4605 }
79acaae1 4606
4607 /* Make phi node for trip. */
4608 phi = create_phi_node (trip_main, iter_part_bb);
4609 SSA_NAME_DEF_STMT (trip_main) = phi;
efbcb6de 4610 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4611 UNKNOWN_LOCATION);
4612 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4613 UNKNOWN_LOCATION);
79acaae1 4614 }
4615
4616 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4617 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4618 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4619 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4620 recompute_dominator (CDI_DOMINATORS, fin_bb));
4621 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4622 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4623 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4624 recompute_dominator (CDI_DOMINATORS, body_bb));
1e8e9920 4625}
4626
1e8e9920 4627
773c5ba7 4628/* Expand the OpenMP loop defined by REGION. */
1e8e9920 4629
773c5ba7 4630static void
4631expand_omp_for (struct omp_region *region)
4632{
4633 struct omp_for_data fd;
fd6481cf 4634 struct omp_for_data_loop *loops;
1e8e9920 4635
fd6481cf 4636 loops
4637 = (struct omp_for_data_loop *)
75a70cf9 4638 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
fd6481cf 4639 * sizeof (struct omp_for_data_loop));
fd6481cf 4640 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
f77459c5 4641 region->sched_kind = fd.sched_kind;
1e8e9920 4642
b3a3ddec 4643 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4644 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4645 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4646 if (region->cont)
4647 {
4648 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4649 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4650 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4651 }
4652
03ed154b 4653 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4654 && !fd.have_ordered
fd6481cf 4655 && fd.collapse == 1
ac6e3339 4656 && region->cont != NULL)
1e8e9920 4657 {
4658 if (fd.chunk_size == NULL)
61e47ac8 4659 expand_omp_for_static_nochunk (region, &fd);
1e8e9920 4660 else
61e47ac8 4661 expand_omp_for_static_chunk (region, &fd);
1e8e9920 4662 }
4663 else
4664 {
fd6481cf 4665 int fn_index, start_ix, next_ix;
4666
4667 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4668 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
75a70cf9 4669 ? 3 : fd.sched_kind;
fd6481cf 4670 fn_index += fd.have_ordered * 4;
b9a16870 4671 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
4672 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
fd6481cf 4673 if (fd.iter_type == long_long_unsigned_type_node)
4674 {
b9a16870 4675 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4676 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
4677 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4678 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
fd6481cf 4679 }
b9c74b4d 4680 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4681 (enum built_in_function) next_ix);
1e8e9920 4682 }
28c92cbb 4683
4684 update_ssa (TODO_update_ssa_only_virtuals);
1e8e9920 4685}
4686
1e8e9920 4687
4688/* Expand code for an OpenMP sections directive. In pseudo code, we generate
4689
1e8e9920 4690 v = GOMP_sections_start (n);
4691 L0:
4692 switch (v)
4693 {
4694 case 0:
4695 goto L2;
4696 case 1:
4697 section 1;
4698 goto L1;
4699 case 2:
4700 ...
4701 case n:
4702 ...
1e8e9920 4703 default:
4704 abort ();
4705 }
4706 L1:
4707 v = GOMP_sections_next ();
4708 goto L0;
4709 L2:
4710 reduction;
4711
773c5ba7 4712 If this is a combined parallel sections, replace the call to
79acaae1 4713 GOMP_sections_start with call to GOMP_sections_next. */
1e8e9920 4714
4715static void
773c5ba7 4716expand_omp_sections (struct omp_region *region)
1e8e9920 4717{
f018d957 4718 tree t, u, vin = NULL, vmain, vnext, l2;
75a70cf9 4719 VEC (tree,heap) *label_vec;
4720 unsigned len;
ac6e3339 4721 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
75a70cf9 4722 gimple_stmt_iterator si, switch_si;
4723 gimple sections_stmt, stmt, cont;
9884aaf8 4724 edge_iterator ei;
4725 edge e;
61e47ac8 4726 struct omp_region *inner;
75a70cf9 4727 unsigned i, casei;
ac6e3339 4728 bool exit_reachable = region->cont != NULL;
1e8e9920 4729
ac6e3339 4730 gcc_assert (exit_reachable == (region->exit != NULL));
61e47ac8 4731 entry_bb = region->entry;
ac6e3339 4732 l0_bb = single_succ (entry_bb);
61e47ac8 4733 l1_bb = region->cont;
ac6e3339 4734 l2_bb = region->exit;
4735 if (exit_reachable)
03ed154b 4736 {
295e9e85 4737 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
75a70cf9 4738 l2 = gimple_block_label (l2_bb);
9884aaf8 4739 else
4740 {
4741 /* This can happen if there are reductions. */
4742 len = EDGE_COUNT (l0_bb->succs);
4743 gcc_assert (len > 0);
4744 e = EDGE_SUCC (l0_bb, len - 1);
75a70cf9 4745 si = gsi_last_bb (e->dest);
6d5a0fbe 4746 l2 = NULL_TREE;
75a70cf9 4747 if (gsi_end_p (si)
4748 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4749 l2 = gimple_block_label (e->dest);
9884aaf8 4750 else
4751 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4752 {
75a70cf9 4753 si = gsi_last_bb (e->dest);
4754 if (gsi_end_p (si)
4755 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
9884aaf8 4756 {
75a70cf9 4757 l2 = gimple_block_label (e->dest);
9884aaf8 4758 break;
4759 }
4760 }
4761 }
03ed154b 4762 default_bb = create_empty_bb (l1_bb->prev_bb);
03ed154b 4763 }
4764 else
4765 {
ac6e3339 4766 default_bb = create_empty_bb (l0_bb);
75a70cf9 4767 l2 = gimple_block_label (default_bb);
03ed154b 4768 }
773c5ba7 4769
4770 /* We will build a switch() with enough cases for all the
75a70cf9 4771 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
773c5ba7 4772 and a default case to abort if something goes wrong. */
ac6e3339 4773 len = EDGE_COUNT (l0_bb->succs);
75a70cf9 4774
4775 /* Use VEC_quick_push on label_vec throughout, since we know the size
4776 in advance. */
4777 label_vec = VEC_alloc (tree, heap, len);
1e8e9920 4778
61e47ac8 4779 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
75a70cf9 4780 GIMPLE_OMP_SECTIONS statement. */
4781 si = gsi_last_bb (entry_bb);
4782 sections_stmt = gsi_stmt (si);
4783 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4784 vin = gimple_omp_sections_control (sections_stmt);
773c5ba7 4785 if (!is_combined_parallel (region))
1e8e9920 4786 {
773c5ba7 4787 /* If we are not inside a combined parallel+sections region,
4788 call GOMP_sections_start. */
ac6e3339 4789 t = build_int_cst (unsigned_type_node,
4790 exit_reachable ? len - 1 : len);
b9a16870 4791 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
75a70cf9 4792 stmt = gimple_build_call (u, 1, t);
1e8e9920 4793 }
79acaae1 4794 else
4795 {
4796 /* Otherwise, call GOMP_sections_next. */
b9a16870 4797 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
75a70cf9 4798 stmt = gimple_build_call (u, 0);
79acaae1 4799 }
75a70cf9 4800 gimple_call_set_lhs (stmt, vin);
4801 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4802 gsi_remove (&si, true);
4803
4804 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4805 L0_BB. */
4806 switch_si = gsi_last_bb (l0_bb);
4807 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
79acaae1 4808 if (exit_reachable)
4809 {
4810 cont = last_stmt (l1_bb);
75a70cf9 4811 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4812 vmain = gimple_omp_continue_control_use (cont);
4813 vnext = gimple_omp_continue_control_def (cont);
79acaae1 4814 }
4815 else
4816 {
4817 vmain = vin;
4818 vnext = NULL_TREE;
4819 }
1e8e9920 4820
ac6e3339 4821 i = 0;
4822 if (exit_reachable)
4823 {
b6e3dd65 4824 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
75a70cf9 4825 VEC_quick_push (tree, label_vec, t);
ac6e3339 4826 i++;
4827 }
03ed154b 4828
75a70cf9 4829 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
ac6e3339 4830 for (inner = region->inner, casei = 1;
4831 inner;
4832 inner = inner->next, i++, casei++)
1e8e9920 4833 {
773c5ba7 4834 basic_block s_entry_bb, s_exit_bb;
4835
9884aaf8 4836 /* Skip optional reduction region. */
75a70cf9 4837 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
9884aaf8 4838 {
4839 --i;
4840 --casei;
4841 continue;
4842 }
4843
61e47ac8 4844 s_entry_bb = inner->entry;
4845 s_exit_bb = inner->exit;
1e8e9920 4846
75a70cf9 4847 t = gimple_block_label (s_entry_bb);
ac6e3339 4848 u = build_int_cst (unsigned_type_node, casei);
b6e3dd65 4849 u = build_case_label (u, NULL, t);
75a70cf9 4850 VEC_quick_push (tree, label_vec, u);
61e47ac8 4851
75a70cf9 4852 si = gsi_last_bb (s_entry_bb);
4853 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4854 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4855 gsi_remove (&si, true);
61e47ac8 4856 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
03ed154b 4857
4858 if (s_exit_bb == NULL)
4859 continue;
4860
75a70cf9 4861 si = gsi_last_bb (s_exit_bb);
4862 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4863 gsi_remove (&si, true);
03ed154b 4864
773c5ba7 4865 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
1e8e9920 4866 }
4867
773c5ba7 4868 /* Error handling code goes in DEFAULT_BB. */
75a70cf9 4869 t = gimple_block_label (default_bb);
b6e3dd65 4870 u = build_case_label (NULL, NULL, t);
61e47ac8 4871 make_edge (l0_bb, default_bb, 0);
1e8e9920 4872
75a70cf9 4873 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4874 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4875 gsi_remove (&switch_si, true);
4876 VEC_free (tree, heap, label_vec);
4877
4878 si = gsi_start_bb (default_bb);
b9a16870 4879 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
75a70cf9 4880 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
773c5ba7 4881
ac6e3339 4882 if (exit_reachable)
03ed154b 4883 {
b9a16870 4884 tree bfn_decl;
4885
ac6e3339 4886 /* Code to get the next section goes in L1_BB. */
75a70cf9 4887 si = gsi_last_bb (l1_bb);
4888 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
1e8e9920 4889
b9a16870 4890 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4891 stmt = gimple_build_call (bfn_decl, 0);
75a70cf9 4892 gimple_call_set_lhs (stmt, vnext);
4893 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4894 gsi_remove (&si, true);
773c5ba7 4895
ac6e3339 4896 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4897
75a70cf9 4898 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4899 si = gsi_last_bb (l2_bb);
4900 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
b9a16870 4901 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
03ed154b 4902 else
b9a16870 4903 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
75a70cf9 4904 stmt = gimple_build_call (t, 0);
4905 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4906 gsi_remove (&si, true);
03ed154b 4907 }
773c5ba7 4908
79acaae1 4909 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
773c5ba7 4910}
1e8e9920 4911
1e8e9920 4912
61e47ac8 4913/* Expand code for an OpenMP single directive. We've already expanded
4914 much of the code, here we simply place the GOMP_barrier call. */
4915
4916static void
4917expand_omp_single (struct omp_region *region)
4918{
4919 basic_block entry_bb, exit_bb;
75a70cf9 4920 gimple_stmt_iterator si;
61e47ac8 4921 bool need_barrier = false;
4922
4923 entry_bb = region->entry;
4924 exit_bb = region->exit;
4925
75a70cf9 4926 si = gsi_last_bb (entry_bb);
61e47ac8 4927 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4928 be removed. We need to ensure that the thread that entered the single
4929 does not exit before the data is copied out by the other threads. */
75a70cf9 4930 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
61e47ac8 4931 OMP_CLAUSE_COPYPRIVATE))
4932 need_barrier = true;
75a70cf9 4933 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4934 gsi_remove (&si, true);
61e47ac8 4935 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4936
75a70cf9 4937 si = gsi_last_bb (exit_bb);
4938 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4939 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4940 false, GSI_SAME_STMT);
4941 gsi_remove (&si, true);
61e47ac8 4942 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4943}
4944
4945
4946/* Generic expansion for OpenMP synchronization directives: master,
4947 ordered and critical. All we need to do here is remove the entry
4948 and exit markers for REGION. */
773c5ba7 4949
4950static void
4951expand_omp_synch (struct omp_region *region)
4952{
4953 basic_block entry_bb, exit_bb;
75a70cf9 4954 gimple_stmt_iterator si;
773c5ba7 4955
61e47ac8 4956 entry_bb = region->entry;
4957 exit_bb = region->exit;
773c5ba7 4958
75a70cf9 4959 si = gsi_last_bb (entry_bb);
4960 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4961 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4962 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4963 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4964 gsi_remove (&si, true);
773c5ba7 4965 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4966
03ed154b 4967 if (exit_bb)
4968 {
75a70cf9 4969 si = gsi_last_bb (exit_bb);
4970 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4971 gsi_remove (&si, true);
03ed154b 4972 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4973 }
773c5ba7 4974}
1e8e9920 4975
2169f33b 4976/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4977 operation as a normal volatile load. */
4978
4979static bool
4980expand_omp_atomic_load (basic_block load_bb, tree addr, tree loaded_val)
4981{
4982 /* FIXME */
4983 (void) load_bb;
4984 (void) addr;
4985 (void) loaded_val;
4986 return false;
4987}
4988
4989/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4990 operation as a normal volatile store. */
4991
4992static bool
4993expand_omp_atomic_store (basic_block load_bb, tree addr)
4994{
4995 /* FIXME */
4996 (void) load_bb;
4997 (void) addr;
4998 return false;
4999}
5000
cb7f680b 5001/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
1cd6e20d 5002 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
cb7f680b 5003 size of the data type, and thus usable to find the index of the builtin
5004 decl. Returns false if the expression is not of the proper form. */
5005
5006static bool
5007expand_omp_atomic_fetch_op (basic_block load_bb,
5008 tree addr, tree loaded_val,
5009 tree stored_val, int index)
5010{
b9a16870 5011 enum built_in_function oldbase, newbase, tmpbase;
cb7f680b 5012 tree decl, itype, call;
2169f33b 5013 tree lhs, rhs;
cb7f680b 5014 basic_block store_bb = single_succ (load_bb);
75a70cf9 5015 gimple_stmt_iterator gsi;
5016 gimple stmt;
389dd41b 5017 location_t loc;
1cd6e20d 5018 enum tree_code code;
2169f33b 5019 bool need_old, need_new;
1cd6e20d 5020 enum machine_mode imode;
cb7f680b 5021
5022 /* We expect to find the following sequences:
48e1416a 5023
cb7f680b 5024 load_bb:
75a70cf9 5025 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
cb7f680b 5026
5027 store_bb:
5028 val = tmp OP something; (or: something OP tmp)
48e1416a 5029 GIMPLE_OMP_STORE (val)
cb7f680b 5030
48e1416a 5031 ???FIXME: Allow a more flexible sequence.
cb7f680b 5032 Perhaps use data flow to pick the statements.
48e1416a 5033
cb7f680b 5034 */
5035
75a70cf9 5036 gsi = gsi_after_labels (store_bb);
5037 stmt = gsi_stmt (gsi);
389dd41b 5038 loc = gimple_location (stmt);
75a70cf9 5039 if (!is_gimple_assign (stmt))
cb7f680b 5040 return false;
75a70cf9 5041 gsi_next (&gsi);
5042 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
cb7f680b 5043 return false;
2169f33b 5044 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
5045 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
5046 gcc_checking_assert (!need_old || !need_new);
cb7f680b 5047
75a70cf9 5048 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
cb7f680b 5049 return false;
5050
cb7f680b 5051 /* Check for one of the supported fetch-op operations. */
1cd6e20d 5052 code = gimple_assign_rhs_code (stmt);
5053 switch (code)
cb7f680b 5054 {
5055 case PLUS_EXPR:
5056 case POINTER_PLUS_EXPR:
1cd6e20d 5057 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
5058 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
cb7f680b 5059 break;
5060 case MINUS_EXPR:
1cd6e20d 5061 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
5062 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
cb7f680b 5063 break;
5064 case BIT_AND_EXPR:
1cd6e20d 5065 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
5066 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
cb7f680b 5067 break;
5068 case BIT_IOR_EXPR:
1cd6e20d 5069 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
5070 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
cb7f680b 5071 break;
5072 case BIT_XOR_EXPR:
1cd6e20d 5073 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
5074 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
cb7f680b 5075 break;
5076 default:
5077 return false;
5078 }
1cd6e20d 5079
cb7f680b 5080 /* Make sure the expression is of the proper form. */
75a70cf9 5081 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5082 rhs = gimple_assign_rhs2 (stmt);
5083 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5084 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5085 rhs = gimple_assign_rhs1 (stmt);
cb7f680b 5086 else
5087 return false;
5088
b9a16870 5089 tmpbase = ((enum built_in_function)
5090 ((need_new ? newbase : oldbase) + index + 1));
5091 decl = builtin_decl_explicit (tmpbase);
0f94f46b 5092 if (decl == NULL_TREE)
5093 return false;
cb7f680b 5094 itype = TREE_TYPE (TREE_TYPE (decl));
1cd6e20d 5095 imode = TYPE_MODE (itype);
cb7f680b 5096
1cd6e20d 5097 /* We could test all of the various optabs involved, but the fact of the
5098 matter is that (with the exception of i486 vs i586 and xadd) all targets
5099 that support any atomic operaton optab also implements compare-and-swap.
5100 Let optabs.c take care of expanding any compare-and-swap loop. */
29139cdc 5101 if (!can_compare_and_swap_p (imode, true))
cb7f680b 5102 return false;
5103
75a70cf9 5104 gsi = gsi_last_bb (load_bb);
5105 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
1cd6e20d 5106
5107 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5108 It only requires that the operation happen atomically. Thus we can
5109 use the RELAXED memory model. */
5110 call = build_call_expr_loc (loc, decl, 3, addr,
5111 fold_convert_loc (loc, itype, rhs),
5112 build_int_cst (NULL, MEMMODEL_RELAXED));
5113
2169f33b 5114 if (need_old || need_new)
5115 {
5116 lhs = need_old ? loaded_val : stored_val;
5117 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
5118 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
5119 }
5120 else
5121 call = fold_convert_loc (loc, void_type_node, call);
75a70cf9 5122 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5123 gsi_remove (&gsi, true);
cb7f680b 5124
75a70cf9 5125 gsi = gsi_last_bb (store_bb);
5126 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5127 gsi_remove (&gsi, true);
5128 gsi = gsi_last_bb (store_bb);
5129 gsi_remove (&gsi, true);
cb7f680b 5130
5131 if (gimple_in_ssa_p (cfun))
5132 update_ssa (TODO_update_ssa_no_phi);
5133
5134 return true;
5135}
5136
5137/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5138
5139 oldval = *addr;
5140 repeat:
5141 newval = rhs; // with oldval replacing *addr in rhs
5142 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5143 if (oldval != newval)
5144 goto repeat;
5145
5146 INDEX is log2 of the size of the data type, and thus usable to find the
5147 index of the builtin decl. */
5148
5149static bool
5150expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5151 tree addr, tree loaded_val, tree stored_val,
5152 int index)
5153{
790368c5 5154 tree loadedi, storedi, initial, new_storedi, old_vali;
cb7f680b 5155 tree type, itype, cmpxchg, iaddr;
75a70cf9 5156 gimple_stmt_iterator si;
cb7f680b 5157 basic_block loop_header = single_succ (load_bb);
75a70cf9 5158 gimple phi, stmt;
cb7f680b 5159 edge e;
b9a16870 5160 enum built_in_function fncode;
cb7f680b 5161
1cd6e20d 5162 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5163 order to use the RELAXED memory model effectively. */
b9a16870 5164 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5165 + index + 1);
5166 cmpxchg = builtin_decl_explicit (fncode);
0f94f46b 5167 if (cmpxchg == NULL_TREE)
5168 return false;
cb7f680b 5169 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5170 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5171
29139cdc 5172 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
cb7f680b 5173 return false;
5174
75a70cf9 5175 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5176 si = gsi_last_bb (load_bb);
5177 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5178
790368c5 5179 /* For floating-point values, we'll need to view-convert them to integers
5180 so that we can perform the atomic compare and swap. Simplify the
5181 following code by always setting up the "i"ntegral variables. */
5182 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5183 {
75a70cf9 5184 tree iaddr_val;
5185
8115f0af 5186 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5187 true), NULL);
75a70cf9 5188 iaddr_val
5189 = force_gimple_operand_gsi (&si,
5190 fold_convert (TREE_TYPE (iaddr), addr),
5191 false, NULL_TREE, true, GSI_SAME_STMT);
5192 stmt = gimple_build_assign (iaddr, iaddr_val);
5193 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
790368c5 5194 loadedi = create_tmp_var (itype, NULL);
5195 if (gimple_in_ssa_p (cfun))
5196 {
5197 add_referenced_var (iaddr);
5198 add_referenced_var (loadedi);
5199 loadedi = make_ssa_name (loadedi, NULL);
5200 }
5201 }
5202 else
5203 {
5204 iaddr = addr;
5205 loadedi = loaded_val;
5206 }
75a70cf9 5207
182cf5a9 5208 initial
5209 = force_gimple_operand_gsi (&si,
5210 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5211 iaddr,
5212 build_int_cst (TREE_TYPE (iaddr), 0)),
5213 true, NULL_TREE, true, GSI_SAME_STMT);
790368c5 5214
5215 /* Move the value to the LOADEDI temporary. */
cb7f680b 5216 if (gimple_in_ssa_p (cfun))
5217 {
75a70cf9 5218 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
790368c5 5219 phi = create_phi_node (loadedi, loop_header);
5220 SSA_NAME_DEF_STMT (loadedi) = phi;
cb7f680b 5221 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5222 initial);
5223 }
5224 else
75a70cf9 5225 gsi_insert_before (&si,
5226 gimple_build_assign (loadedi, initial),
5227 GSI_SAME_STMT);
790368c5 5228 if (loadedi != loaded_val)
5229 {
75a70cf9 5230 gimple_stmt_iterator gsi2;
5231 tree x;
790368c5 5232
5233 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
75a70cf9 5234 gsi2 = gsi_start_bb (loop_header);
790368c5 5235 if (gimple_in_ssa_p (cfun))
5236 {
75a70cf9 5237 gimple stmt;
5238 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5239 true, GSI_SAME_STMT);
5240 stmt = gimple_build_assign (loaded_val, x);
5241 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
790368c5 5242 }
5243 else
5244 {
75a70cf9 5245 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5246 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5247 true, GSI_SAME_STMT);
790368c5 5248 }
5249 }
75a70cf9 5250 gsi_remove (&si, true);
cb7f680b 5251
75a70cf9 5252 si = gsi_last_bb (store_bb);
5253 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
cb7f680b 5254
790368c5 5255 if (iaddr == addr)
5256 storedi = stored_val;
cb7f680b 5257 else
790368c5 5258 storedi =
75a70cf9 5259 force_gimple_operand_gsi (&si,
790368c5 5260 build1 (VIEW_CONVERT_EXPR, itype,
5261 stored_val), true, NULL_TREE, true,
75a70cf9 5262 GSI_SAME_STMT);
cb7f680b 5263
5264 /* Build the compare&swap statement. */
5265 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
75a70cf9 5266 new_storedi = force_gimple_operand_gsi (&si,
87f9ffa4 5267 fold_convert (TREE_TYPE (loadedi),
5268 new_storedi),
cb7f680b 5269 true, NULL_TREE,
75a70cf9 5270 true, GSI_SAME_STMT);
cb7f680b 5271
5272 if (gimple_in_ssa_p (cfun))
5273 old_vali = loadedi;
5274 else
5275 {
87f9ffa4 5276 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
790368c5 5277 if (gimple_in_ssa_p (cfun))
5278 add_referenced_var (old_vali);
75a70cf9 5279 stmt = gimple_build_assign (old_vali, loadedi);
5280 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5281
75a70cf9 5282 stmt = gimple_build_assign (loadedi, new_storedi);
5283 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5284 }
5285
5286 /* Note that we always perform the comparison as an integer, even for
48e1416a 5287 floating point. This allows the atomic operation to properly
cb7f680b 5288 succeed even with NaNs and -0.0. */
75a70cf9 5289 stmt = gimple_build_cond_empty
5290 (build2 (NE_EXPR, boolean_type_node,
5291 new_storedi, old_vali));
5292 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5293
5294 /* Update cfg. */
5295 e = single_succ_edge (store_bb);
5296 e->flags &= ~EDGE_FALLTHRU;
5297 e->flags |= EDGE_FALSE_VALUE;
5298
5299 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5300
790368c5 5301 /* Copy the new value to loadedi (we already did that before the condition
cb7f680b 5302 if we are not in SSA). */
5303 if (gimple_in_ssa_p (cfun))
5304 {
75a70cf9 5305 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
790368c5 5306 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
cb7f680b 5307 }
5308
75a70cf9 5309 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5310 gsi_remove (&si, true);
cb7f680b 5311
5312 if (gimple_in_ssa_p (cfun))
5313 update_ssa (TODO_update_ssa_no_phi);
5314
5315 return true;
5316}
5317
5318/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5319
5320 GOMP_atomic_start ();
5321 *addr = rhs;
5322 GOMP_atomic_end ();
5323
5324 The result is not globally atomic, but works so long as all parallel
5325 references are within #pragma omp atomic directives. According to
5326 responses received from omp@openmp.org, appears to be within spec.
5327 Which makes sense, since that's how several other compilers handle
48e1416a 5328 this situation as well.
75a70cf9 5329 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5330 expanding. STORED_VAL is the operand of the matching
5331 GIMPLE_OMP_ATOMIC_STORE.
cb7f680b 5332
48e1416a 5333 We replace
5334 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
cb7f680b 5335 loaded_val = *addr;
5336
5337 and replace
75a70cf9 5338 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
48e1416a 5339 *addr = stored_val;
cb7f680b 5340*/
5341
5342static bool
5343expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5344 tree addr, tree loaded_val, tree stored_val)
5345{
75a70cf9 5346 gimple_stmt_iterator si;
5347 gimple stmt;
cb7f680b 5348 tree t;
5349
75a70cf9 5350 si = gsi_last_bb (load_bb);
5351 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
cb7f680b 5352
b9a16870 5353 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
414c3a2c 5354 t = build_call_expr (t, 0);
75a70cf9 5355 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
cb7f680b 5356
182cf5a9 5357 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
75a70cf9 5358 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5359 gsi_remove (&si, true);
cb7f680b 5360
75a70cf9 5361 si = gsi_last_bb (store_bb);
5362 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
cb7f680b 5363
182cf5a9 5364 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5365 stored_val);
75a70cf9 5366 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5367
b9a16870 5368 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
414c3a2c 5369 t = build_call_expr (t, 0);
75a70cf9 5370 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5371 gsi_remove (&si, true);
cb7f680b 5372
5373 if (gimple_in_ssa_p (cfun))
5374 update_ssa (TODO_update_ssa_no_phi);
5375 return true;
5376}
5377
48e1416a 5378/* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5379 using expand_omp_atomic_fetch_op. If it failed, we try to
cb7f680b 5380 call expand_omp_atomic_pipeline, and if it fails too, the
5381 ultimate fallback is wrapping the operation in a mutex
48e1416a 5382 (expand_omp_atomic_mutex). REGION is the atomic region built
5383 by build_omp_regions_1(). */
cb7f680b 5384
5385static void
5386expand_omp_atomic (struct omp_region *region)
5387{
5388 basic_block load_bb = region->entry, store_bb = region->exit;
75a70cf9 5389 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5390 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5391 tree addr = gimple_omp_atomic_load_rhs (load);
5392 tree stored_val = gimple_omp_atomic_store_val (store);
cb7f680b 5393 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5394 HOST_WIDE_INT index;
5395
5396 /* Make sure the type is one of the supported sizes. */
5397 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5398 index = exact_log2 (index);
5399 if (index >= 0 && index <= 4)
5400 {
5401 unsigned int align = TYPE_ALIGN_UNIT (type);
5402
5403 /* __sync builtins require strict data alignment. */
5404 if (exact_log2 (align) >= index)
5405 {
2169f33b 5406 /* Atomic load. FIXME: have some target hook signalize what loads
5407 are actually atomic? */
5408 if (loaded_val == stored_val
5409 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5410 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5411 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5412 && expand_omp_atomic_load (load_bb, addr, loaded_val))
5413 return;
5414
5415 /* Atomic store. FIXME: have some target hook signalize what
5416 stores are actually atomic? */
5417 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5418 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5419 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5420 && store_bb == single_succ (load_bb)
5421 && first_stmt (store_bb) == store
5422 && expand_omp_atomic_store (load_bb, addr))
5423 return;
5424
cb7f680b 5425 /* When possible, use specialized atomic update functions. */
5426 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5427 && store_bb == single_succ (load_bb))
5428 {
5429 if (expand_omp_atomic_fetch_op (load_bb, addr,
5430 loaded_val, stored_val, index))
5431 return;
5432 }
5433
5434 /* If we don't have specialized __sync builtins, try and implement
5435 as a compare and swap loop. */
5436 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5437 loaded_val, stored_val, index))
5438 return;
5439 }
5440 }
5441
5442 /* The ultimate fallback is wrapping the operation in a mutex. */
5443 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5444}
5445
1e8e9920 5446
773c5ba7 5447/* Expand the parallel region tree rooted at REGION. Expansion
5448 proceeds in depth-first order. Innermost regions are expanded
5449 first. This way, parallel regions that require a new function to
75a70cf9 5450 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
773c5ba7 5451 internal dependencies in their body. */
5452
5453static void
5454expand_omp (struct omp_region *region)
5455{
5456 while (region)
5457 {
1d22f541 5458 location_t saved_location;
5459
d1d5b012 5460 /* First, determine whether this is a combined parallel+workshare
5461 region. */
75a70cf9 5462 if (region->type == GIMPLE_OMP_PARALLEL)
d1d5b012 5463 determine_parallel_type (region);
5464
773c5ba7 5465 if (region->inner)
5466 expand_omp (region->inner);
5467
1d22f541 5468 saved_location = input_location;
75a70cf9 5469 if (gimple_has_location (last_stmt (region->entry)))
5470 input_location = gimple_location (last_stmt (region->entry));
1d22f541 5471
61e47ac8 5472 switch (region->type)
773c5ba7 5473 {
75a70cf9 5474 case GIMPLE_OMP_PARALLEL:
5475 case GIMPLE_OMP_TASK:
fd6481cf 5476 expand_omp_taskreg (region);
5477 break;
5478
75a70cf9 5479 case GIMPLE_OMP_FOR:
61e47ac8 5480 expand_omp_for (region);
5481 break;
773c5ba7 5482
75a70cf9 5483 case GIMPLE_OMP_SECTIONS:
61e47ac8 5484 expand_omp_sections (region);
5485 break;
773c5ba7 5486
75a70cf9 5487 case GIMPLE_OMP_SECTION:
61e47ac8 5488 /* Individual omp sections are handled together with their
75a70cf9 5489 parent GIMPLE_OMP_SECTIONS region. */
61e47ac8 5490 break;
773c5ba7 5491
75a70cf9 5492 case GIMPLE_OMP_SINGLE:
61e47ac8 5493 expand_omp_single (region);
5494 break;
773c5ba7 5495
75a70cf9 5496 case GIMPLE_OMP_MASTER:
5497 case GIMPLE_OMP_ORDERED:
5498 case GIMPLE_OMP_CRITICAL:
61e47ac8 5499 expand_omp_synch (region);
5500 break;
773c5ba7 5501
75a70cf9 5502 case GIMPLE_OMP_ATOMIC_LOAD:
cb7f680b 5503 expand_omp_atomic (region);
5504 break;
5505
61e47ac8 5506 default:
5507 gcc_unreachable ();
5508 }
cc5982dc 5509
1d22f541 5510 input_location = saved_location;
773c5ba7 5511 region = region->next;
5512 }
5513}
5514
5515
5516/* Helper for build_omp_regions. Scan the dominator tree starting at
28c92cbb 5517 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5518 true, the function ends once a single tree is built (otherwise, whole
5519 forest of OMP constructs may be built). */
773c5ba7 5520
5521static void
28c92cbb 5522build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5523 bool single_tree)
773c5ba7 5524{
75a70cf9 5525 gimple_stmt_iterator gsi;
5526 gimple stmt;
773c5ba7 5527 basic_block son;
5528
75a70cf9 5529 gsi = gsi_last_bb (bb);
5530 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
773c5ba7 5531 {
5532 struct omp_region *region;
75a70cf9 5533 enum gimple_code code;
773c5ba7 5534
75a70cf9 5535 stmt = gsi_stmt (gsi);
5536 code = gimple_code (stmt);
5537 if (code == GIMPLE_OMP_RETURN)
773c5ba7 5538 {
5539 /* STMT is the return point out of region PARENT. Mark it
5540 as the exit point and make PARENT the immediately
5541 enclosing region. */
5542 gcc_assert (parent);
5543 region = parent;
61e47ac8 5544 region->exit = bb;
773c5ba7 5545 parent = parent->outer;
773c5ba7 5546 }
75a70cf9 5547 else if (code == GIMPLE_OMP_ATOMIC_STORE)
cb7f680b 5548 {
75a70cf9 5549 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5550 GIMPLE_OMP_RETURN, but matches with
5551 GIMPLE_OMP_ATOMIC_LOAD. */
cb7f680b 5552 gcc_assert (parent);
75a70cf9 5553 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
cb7f680b 5554 region = parent;
5555 region->exit = bb;
5556 parent = parent->outer;
5557 }
5558
75a70cf9 5559 else if (code == GIMPLE_OMP_CONTINUE)
61e47ac8 5560 {
5561 gcc_assert (parent);
5562 parent->cont = bb;
5563 }
75a70cf9 5564 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
ac6e3339 5565 {
75a70cf9 5566 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5567 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5568 ;
ac6e3339 5569 }
773c5ba7 5570 else
5571 {
5572 /* Otherwise, this directive becomes the parent for a new
5573 region. */
61e47ac8 5574 region = new_omp_region (bb, code, parent);
773c5ba7 5575 parent = region;
5576 }
773c5ba7 5577 }
5578
28c92cbb 5579 if (single_tree && !parent)
5580 return;
5581
773c5ba7 5582 for (son = first_dom_son (CDI_DOMINATORS, bb);
5583 son;
5584 son = next_dom_son (CDI_DOMINATORS, son))
28c92cbb 5585 build_omp_regions_1 (son, parent, single_tree);
5586}
5587
5588/* Builds the tree of OMP regions rooted at ROOT, storing it to
5589 root_omp_region. */
5590
5591static void
5592build_omp_regions_root (basic_block root)
5593{
5594 gcc_assert (root_omp_region == NULL);
5595 build_omp_regions_1 (root, NULL, true);
5596 gcc_assert (root_omp_region != NULL);
773c5ba7 5597}
5598
28c92cbb 5599/* Expands omp construct (and its subconstructs) starting in HEAD. */
5600
5601void
5602omp_expand_local (basic_block head)
5603{
5604 build_omp_regions_root (head);
5605 if (dump_file && (dump_flags & TDF_DETAILS))
5606 {
5607 fprintf (dump_file, "\nOMP region tree\n\n");
5608 dump_omp_region (dump_file, root_omp_region, 0);
5609 fprintf (dump_file, "\n");
5610 }
5611
5612 remove_exit_barriers (root_omp_region);
5613 expand_omp (root_omp_region);
5614
5615 free_omp_regions ();
5616}
773c5ba7 5617
5618/* Scan the CFG and build a tree of OMP regions. Return the root of
5619 the OMP region tree. */
5620
5621static void
5622build_omp_regions (void)
5623{
61e47ac8 5624 gcc_assert (root_omp_region == NULL);
773c5ba7 5625 calculate_dominance_info (CDI_DOMINATORS);
28c92cbb 5626 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
773c5ba7 5627}
5628
773c5ba7 5629/* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5630
2a1990e9 5631static unsigned int
773c5ba7 5632execute_expand_omp (void)
5633{
5634 build_omp_regions ();
5635
61e47ac8 5636 if (!root_omp_region)
5637 return 0;
773c5ba7 5638
61e47ac8 5639 if (dump_file)
5640 {
5641 fprintf (dump_file, "\nOMP region tree\n\n");
5642 dump_omp_region (dump_file, root_omp_region, 0);
5643 fprintf (dump_file, "\n");
773c5ba7 5644 }
61e47ac8 5645
5646 remove_exit_barriers (root_omp_region);
5647
5648 expand_omp (root_omp_region);
5649
61e47ac8 5650 cleanup_tree_cfg ();
5651
5652 free_omp_regions ();
5653
2a1990e9 5654 return 0;
773c5ba7 5655}
5656
79acaae1 5657/* OMP expansion -- the default pass, run before creation of SSA form. */
5658
773c5ba7 5659static bool
5660gate_expand_omp (void)
5661{
852f689e 5662 return (flag_openmp != 0 && !seen_error ());
773c5ba7 5663}
5664
48e1416a 5665struct gimple_opt_pass pass_expand_omp =
773c5ba7 5666{
20099e35 5667 {
5668 GIMPLE_PASS,
773c5ba7 5669 "ompexp", /* name */
5670 gate_expand_omp, /* gate */
5671 execute_expand_omp, /* execute */
5672 NULL, /* sub */
5673 NULL, /* next */
5674 0, /* static_pass_number */
0b1615c1 5675 TV_NONE, /* tv_id */
773c5ba7 5676 PROP_gimple_any, /* properties_required */
41709826 5677 0, /* properties_provided */
773c5ba7 5678 0, /* properties_destroyed */
5679 0, /* todo_flags_start */
771e2890 5680 0 /* todo_flags_finish */
20099e35 5681 }
773c5ba7 5682};
5683\f
5684/* Routines to lower OpenMP directives into OMP-GIMPLE. */
5685
75a70cf9 5686/* Lower the OpenMP sections directive in the current statement in GSI_P.
5687 CTX is the enclosing OMP context for the current statement. */
773c5ba7 5688
5689static void
75a70cf9 5690lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 5691{
75a70cf9 5692 tree block, control;
5693 gimple_stmt_iterator tgsi;
773c5ba7 5694 unsigned i, len;
75a70cf9 5695 gimple stmt, new_stmt, bind, t;
5696 gimple_seq ilist, dlist, olist, new_body, body;
dac18d1a 5697 struct gimplify_ctx gctx;
773c5ba7 5698
75a70cf9 5699 stmt = gsi_stmt (*gsi_p);
773c5ba7 5700
dac18d1a 5701 push_gimplify_context (&gctx);
773c5ba7 5702
5703 dlist = NULL;
5704 ilist = NULL;
75a70cf9 5705 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5706 &ilist, &dlist, ctx);
773c5ba7 5707
75a70cf9 5708 tgsi = gsi_start (gimple_omp_body (stmt));
5709 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
773c5ba7 5710 continue;
5711
75a70cf9 5712 tgsi = gsi_start (gimple_omp_body (stmt));
5713 body = NULL;
5714 for (i = 0; i < len; i++, gsi_next (&tgsi))
773c5ba7 5715 {
5716 omp_context *sctx;
75a70cf9 5717 gimple sec_start;
773c5ba7 5718
75a70cf9 5719 sec_start = gsi_stmt (tgsi);
773c5ba7 5720 sctx = maybe_lookup_ctx (sec_start);
5721 gcc_assert (sctx);
5722
75a70cf9 5723 gimple_seq_add_stmt (&body, sec_start);
61e47ac8 5724
75a70cf9 5725 lower_omp (gimple_omp_body (sec_start), sctx);
5726 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5727 gimple_omp_set_body (sec_start, NULL);
773c5ba7 5728
5729 if (i == len - 1)
5730 {
75a70cf9 5731 gimple_seq l = NULL;
5732 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
773c5ba7 5733 &l, ctx);
75a70cf9 5734 gimple_seq_add_seq (&body, l);
5735 gimple_omp_section_set_last (sec_start);
773c5ba7 5736 }
48e1416a 5737
75a70cf9 5738 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
773c5ba7 5739 }
1e8e9920 5740
5741 block = make_node (BLOCK);
75a70cf9 5742 bind = gimple_build_bind (NULL, body, block);
1e8e9920 5743
75a70cf9 5744 olist = NULL;
5745 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
773c5ba7 5746
1d22f541 5747 block = make_node (BLOCK);
75a70cf9 5748 new_stmt = gimple_build_bind (NULL, NULL, block);
773c5ba7 5749
1d22f541 5750 pop_gimplify_context (new_stmt);
75a70cf9 5751 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5752 BLOCK_VARS (block) = gimple_bind_vars (bind);
1d22f541 5753 if (BLOCK_VARS (block))
5754 TREE_USED (block) = 1;
5755
75a70cf9 5756 new_body = NULL;
5757 gimple_seq_add_seq (&new_body, ilist);
5758 gimple_seq_add_stmt (&new_body, stmt);
5759 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5760 gimple_seq_add_stmt (&new_body, bind);
61e47ac8 5761
ac6e3339 5762 control = create_tmp_var (unsigned_type_node, ".section");
75a70cf9 5763 t = gimple_build_omp_continue (control, control);
5764 gimple_omp_sections_set_control (stmt, control);
5765 gimple_seq_add_stmt (&new_body, t);
61e47ac8 5766
75a70cf9 5767 gimple_seq_add_seq (&new_body, olist);
5768 gimple_seq_add_seq (&new_body, dlist);
773c5ba7 5769
75a70cf9 5770 new_body = maybe_catch_exception (new_body);
aade31a0 5771
75a70cf9 5772 t = gimple_build_omp_return
5773 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5774 OMP_CLAUSE_NOWAIT));
5775 gimple_seq_add_stmt (&new_body, t);
61e47ac8 5776
75a70cf9 5777 gimple_bind_set_body (new_stmt, new_body);
5778 gimple_omp_set_body (stmt, NULL);
773c5ba7 5779
75a70cf9 5780 gsi_replace (gsi_p, new_stmt, true);
1e8e9920 5781}
5782
5783
773c5ba7 5784/* A subroutine of lower_omp_single. Expand the simple form of
75a70cf9 5785 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
1e8e9920 5786
5787 if (GOMP_single_start ())
5788 BODY;
5789 [ GOMP_barrier (); ] -> unless 'nowait' is present.
773c5ba7 5790
5791 FIXME. It may be better to delay expanding the logic of this until
5792 pass_expand_omp. The expanded logic may make the job more difficult
5793 to a synchronization analysis pass. */
1e8e9920 5794
5795static void
75a70cf9 5796lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
1e8e9920 5797{
e60a6f7b 5798 location_t loc = gimple_location (single_stmt);
5799 tree tlabel = create_artificial_label (loc);
5800 tree flabel = create_artificial_label (loc);
75a70cf9 5801 gimple call, cond;
5802 tree lhs, decl;
5803
b9a16870 5804 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
75a70cf9 5805 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5806 call = gimple_build_call (decl, 0);
5807 gimple_call_set_lhs (call, lhs);
5808 gimple_seq_add_stmt (pre_p, call);
5809
5810 cond = gimple_build_cond (EQ_EXPR, lhs,
389dd41b 5811 fold_convert_loc (loc, TREE_TYPE (lhs),
5812 boolean_true_node),
75a70cf9 5813 tlabel, flabel);
5814 gimple_seq_add_stmt (pre_p, cond);
5815 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5816 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5817 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
1e8e9920 5818}
5819
773c5ba7 5820
5821/* A subroutine of lower_omp_single. Expand the simple form of
75a70cf9 5822 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
1e8e9920 5823
5824 #pragma omp single copyprivate (a, b, c)
5825
5826 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5827
5828 {
5829 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5830 {
5831 BODY;
5832 copyout.a = a;
5833 copyout.b = b;
5834 copyout.c = c;
5835 GOMP_single_copy_end (&copyout);
5836 }
5837 else
5838 {
5839 a = copyout_p->a;
5840 b = copyout_p->b;
5841 c = copyout_p->c;
5842 }
5843 GOMP_barrier ();
5844 }
773c5ba7 5845
5846 FIXME. It may be better to delay expanding the logic of this until
5847 pass_expand_omp. The expanded logic may make the job more difficult
5848 to a synchronization analysis pass. */
1e8e9920 5849
5850static void
75a70cf9 5851lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
1e8e9920 5852{
b9a16870 5853 tree ptr_type, t, l0, l1, l2, bfn_decl;
75a70cf9 5854 gimple_seq copyin_seq;
e60a6f7b 5855 location_t loc = gimple_location (single_stmt);
1e8e9920 5856
5857 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5858
5859 ptr_type = build_pointer_type (ctx->record_type);
5860 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5861
e60a6f7b 5862 l0 = create_artificial_label (loc);
5863 l1 = create_artificial_label (loc);
5864 l2 = create_artificial_label (loc);
1e8e9920 5865
b9a16870 5866 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
5867 t = build_call_expr_loc (loc, bfn_decl, 0);
389dd41b 5868 t = fold_convert_loc (loc, ptr_type, t);
75a70cf9 5869 gimplify_assign (ctx->receiver_decl, t, pre_p);
1e8e9920 5870
5871 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5872 build_int_cst (ptr_type, 0));
5873 t = build3 (COND_EXPR, void_type_node, t,
5874 build_and_jump (&l0), build_and_jump (&l1));
5875 gimplify_and_add (t, pre_p);
5876
75a70cf9 5877 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
1e8e9920 5878
75a70cf9 5879 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
1e8e9920 5880
5881 copyin_seq = NULL;
75a70cf9 5882 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
1e8e9920 5883 &copyin_seq, ctx);
5884
389dd41b 5885 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
b9a16870 5886 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
5887 t = build_call_expr_loc (loc, bfn_decl, 1, t);
1e8e9920 5888 gimplify_and_add (t, pre_p);
5889
5890 t = build_and_jump (&l2);
5891 gimplify_and_add (t, pre_p);
5892
75a70cf9 5893 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
1e8e9920 5894
75a70cf9 5895 gimple_seq_add_seq (pre_p, copyin_seq);
1e8e9920 5896
75a70cf9 5897 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
1e8e9920 5898}
5899
773c5ba7 5900
1e8e9920 5901/* Expand code for an OpenMP single directive. */
5902
5903static void
75a70cf9 5904lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 5905{
75a70cf9 5906 tree block;
5907 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5908 gimple_seq bind_body, dlist;
dac18d1a 5909 struct gimplify_ctx gctx;
1e8e9920 5910
dac18d1a 5911 push_gimplify_context (&gctx);
1e8e9920 5912
75a70cf9 5913 bind_body = NULL;
5914 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5915 &bind_body, &dlist, ctx);
5916 lower_omp (gimple_omp_body (single_stmt), ctx);
1e8e9920 5917
75a70cf9 5918 gimple_seq_add_stmt (&bind_body, single_stmt);
1e8e9920 5919
5920 if (ctx->record_type)
75a70cf9 5921 lower_omp_single_copy (single_stmt, &bind_body, ctx);
1e8e9920 5922 else
75a70cf9 5923 lower_omp_single_simple (single_stmt, &bind_body);
5924
5925 gimple_omp_set_body (single_stmt, NULL);
1e8e9920 5926
75a70cf9 5927 gimple_seq_add_seq (&bind_body, dlist);
61e47ac8 5928
75a70cf9 5929 bind_body = maybe_catch_exception (bind_body);
61e47ac8 5930
48e1416a 5931 t = gimple_build_omp_return
75a70cf9 5932 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5933 OMP_CLAUSE_NOWAIT));
5934 gimple_seq_add_stmt (&bind_body, t);
aade31a0 5935
75a70cf9 5936 block = make_node (BLOCK);
5937 bind = gimple_build_bind (NULL, bind_body, block);
61e47ac8 5938
1e8e9920 5939 pop_gimplify_context (bind);
773c5ba7 5940
75a70cf9 5941 gimple_bind_append_vars (bind, ctx->block_vars);
5942 BLOCK_VARS (block) = ctx->block_vars;
5943 gsi_replace (gsi_p, bind, true);
1d22f541 5944 if (BLOCK_VARS (block))
5945 TREE_USED (block) = 1;
1e8e9920 5946}
5947
773c5ba7 5948
1e8e9920 5949/* Expand code for an OpenMP master directive. */
5950
5951static void
75a70cf9 5952lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 5953{
b9a16870 5954 tree block, lab = NULL, x, bfn_decl;
75a70cf9 5955 gimple stmt = gsi_stmt (*gsi_p), bind;
389dd41b 5956 location_t loc = gimple_location (stmt);
75a70cf9 5957 gimple_seq tseq;
dac18d1a 5958 struct gimplify_ctx gctx;
1e8e9920 5959
dac18d1a 5960 push_gimplify_context (&gctx);
1e8e9920 5961
5962 block = make_node (BLOCK);
75a70cf9 5963 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5964 block);
61e47ac8 5965
b9a16870 5966 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
5967 x = build_call_expr_loc (loc, bfn_decl, 0);
1e8e9920 5968 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5969 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
75a70cf9 5970 tseq = NULL;
5971 gimplify_and_add (x, &tseq);
5972 gimple_bind_add_seq (bind, tseq);
1e8e9920 5973
75a70cf9 5974 lower_omp (gimple_omp_body (stmt), ctx);
5975 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5976 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5977 gimple_omp_set_body (stmt, NULL);
1e8e9920 5978
75a70cf9 5979 gimple_bind_add_stmt (bind, gimple_build_label (lab));
61e47ac8 5980
75a70cf9 5981 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
61e47ac8 5982
1e8e9920 5983 pop_gimplify_context (bind);
773c5ba7 5984
75a70cf9 5985 gimple_bind_append_vars (bind, ctx->block_vars);
5986 BLOCK_VARS (block) = ctx->block_vars;
5987 gsi_replace (gsi_p, bind, true);
1e8e9920 5988}
5989
773c5ba7 5990
1e8e9920 5991/* Expand code for an OpenMP ordered directive. */
5992
5993static void
75a70cf9 5994lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 5995{
75a70cf9 5996 tree block;
5997 gimple stmt = gsi_stmt (*gsi_p), bind, x;
dac18d1a 5998 struct gimplify_ctx gctx;
1e8e9920 5999
dac18d1a 6000 push_gimplify_context (&gctx);
1e8e9920 6001
6002 block = make_node (BLOCK);
75a70cf9 6003 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
6004 block);
61e47ac8 6005
b9a16870 6006 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
6007 0);
75a70cf9 6008 gimple_bind_add_stmt (bind, x);
1e8e9920 6009
75a70cf9 6010 lower_omp (gimple_omp_body (stmt), ctx);
6011 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6012 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6013 gimple_omp_set_body (stmt, NULL);
1e8e9920 6014
b9a16870 6015 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
75a70cf9 6016 gimple_bind_add_stmt (bind, x);
61e47ac8 6017
75a70cf9 6018 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
61e47ac8 6019
1e8e9920 6020 pop_gimplify_context (bind);
773c5ba7 6021
75a70cf9 6022 gimple_bind_append_vars (bind, ctx->block_vars);
6023 BLOCK_VARS (block) = gimple_bind_vars (bind);
6024 gsi_replace (gsi_p, bind, true);
1e8e9920 6025}
6026
1e8e9920 6027
75a70cf9 6028/* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
1e8e9920 6029 substitution of a couple of function calls. But in the NAMED case,
6030 requires that languages coordinate a symbol name. It is therefore
6031 best put here in common code. */
6032
6033static GTY((param1_is (tree), param2_is (tree)))
6034 splay_tree critical_name_mutexes;
6035
6036static void
75a70cf9 6037lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 6038{
75a70cf9 6039 tree block;
6040 tree name, lock, unlock;
6041 gimple stmt = gsi_stmt (*gsi_p), bind;
389dd41b 6042 location_t loc = gimple_location (stmt);
75a70cf9 6043 gimple_seq tbody;
dac18d1a 6044 struct gimplify_ctx gctx;
1e8e9920 6045
75a70cf9 6046 name = gimple_omp_critical_name (stmt);
1e8e9920 6047 if (name)
6048 {
c2f47e15 6049 tree decl;
1e8e9920 6050 splay_tree_node n;
6051
6052 if (!critical_name_mutexes)
6053 critical_name_mutexes
ba72912a 6054 = splay_tree_new_ggc (splay_tree_compare_pointers,
6055 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
6056 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
1e8e9920 6057
6058 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
6059 if (n == NULL)
6060 {
6061 char *new_str;
6062
6063 decl = create_tmp_var_raw (ptr_type_node, NULL);
6064
6065 new_str = ACONCAT ((".gomp_critical_user_",
6066 IDENTIFIER_POINTER (name), NULL));
6067 DECL_NAME (decl) = get_identifier (new_str);
6068 TREE_PUBLIC (decl) = 1;
6069 TREE_STATIC (decl) = 1;
6070 DECL_COMMON (decl) = 1;
6071 DECL_ARTIFICIAL (decl) = 1;
6072 DECL_IGNORED_P (decl) = 1;
1d416bd7 6073 varpool_finalize_decl (decl);
1e8e9920 6074
6075 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
6076 (splay_tree_value) decl);
6077 }
6078 else
6079 decl = (tree) n->value;
6080
b9a16870 6081 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
389dd41b 6082 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
1e8e9920 6083
b9a16870 6084 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
389dd41b 6085 unlock = build_call_expr_loc (loc, unlock, 1,
6086 build_fold_addr_expr_loc (loc, decl));
1e8e9920 6087 }
6088 else
6089 {
b9a16870 6090 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
389dd41b 6091 lock = build_call_expr_loc (loc, lock, 0);
1e8e9920 6092
b9a16870 6093 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
389dd41b 6094 unlock = build_call_expr_loc (loc, unlock, 0);
1e8e9920 6095 }
6096
dac18d1a 6097 push_gimplify_context (&gctx);
1e8e9920 6098
6099 block = make_node (BLOCK);
75a70cf9 6100 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
61e47ac8 6101
75a70cf9 6102 tbody = gimple_bind_body (bind);
6103 gimplify_and_add (lock, &tbody);
6104 gimple_bind_set_body (bind, tbody);
1e8e9920 6105
75a70cf9 6106 lower_omp (gimple_omp_body (stmt), ctx);
6107 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6108 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6109 gimple_omp_set_body (stmt, NULL);
1e8e9920 6110
75a70cf9 6111 tbody = gimple_bind_body (bind);
6112 gimplify_and_add (unlock, &tbody);
6113 gimple_bind_set_body (bind, tbody);
61e47ac8 6114
75a70cf9 6115 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
1e8e9920 6116
6117 pop_gimplify_context (bind);
75a70cf9 6118 gimple_bind_append_vars (bind, ctx->block_vars);
6119 BLOCK_VARS (block) = gimple_bind_vars (bind);
6120 gsi_replace (gsi_p, bind, true);
773c5ba7 6121}
6122
6123
6124/* A subroutine of lower_omp_for. Generate code to emit the predicate
6125 for a lastprivate clause. Given a loop control predicate of (V
6126 cond N2), we gate the clause on (!(V cond N2)). The lowered form
1e4afe3c 6127 is appended to *DLIST, iterator initialization is appended to
6128 *BODY_P. */
773c5ba7 6129
6130static void
75a70cf9 6131lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6132 gimple_seq *dlist, struct omp_context *ctx)
773c5ba7 6133{
75a70cf9 6134 tree clauses, cond, vinit;
773c5ba7 6135 enum tree_code cond_code;
75a70cf9 6136 gimple_seq stmts;
48e1416a 6137
fd6481cf 6138 cond_code = fd->loop.cond_code;
773c5ba7 6139 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6140
6141 /* When possible, use a strict equality expression. This can let VRP
6142 type optimizations deduce the value and remove a copy. */
fd6481cf 6143 if (host_integerp (fd->loop.step, 0))
773c5ba7 6144 {
fd6481cf 6145 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
773c5ba7 6146 if (step == 1 || step == -1)
6147 cond_code = EQ_EXPR;
6148 }
6149
fd6481cf 6150 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
773c5ba7 6151
75a70cf9 6152 clauses = gimple_omp_for_clauses (fd->for_stmt);
1e4afe3c 6153 stmts = NULL;
6154 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
75a70cf9 6155 if (!gimple_seq_empty_p (stmts))
1e4afe3c 6156 {
75a70cf9 6157 gimple_seq_add_seq (&stmts, *dlist);
fd6481cf 6158 *dlist = stmts;
1e4afe3c 6159
6160 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
fd6481cf 6161 vinit = fd->loop.n1;
1e4afe3c 6162 if (cond_code == EQ_EXPR
fd6481cf 6163 && host_integerp (fd->loop.n2, 0)
6164 && ! integer_zerop (fd->loop.n2))
6165 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
1e4afe3c 6166
6167 /* Initialize the iterator variable, so that threads that don't execute
6168 any iterations don't execute the lastprivate clauses by accident. */
75a70cf9 6169 gimplify_assign (fd->loop.v, vinit, body_p);
1e4afe3c 6170 }
773c5ba7 6171}
6172
6173
6174/* Lower code for an OpenMP loop directive. */
6175
6176static void
75a70cf9 6177lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 6178{
75a70cf9 6179 tree *rhs_p, block;
773c5ba7 6180 struct omp_for_data fd;
75a70cf9 6181 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
f018d957 6182 gimple_seq omp_for_body, body, dlist;
75a70cf9 6183 size_t i;
dac18d1a 6184 struct gimplify_ctx gctx;
773c5ba7 6185
dac18d1a 6186 push_gimplify_context (&gctx);
773c5ba7 6187
75a70cf9 6188 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6189 lower_omp (gimple_omp_body (stmt), ctx);
773c5ba7 6190
1d22f541 6191 block = make_node (BLOCK);
75a70cf9 6192 new_stmt = gimple_build_bind (NULL, NULL, block);
1d22f541 6193
773c5ba7 6194 /* Move declaration of temporaries in the loop body before we make
6195 it go away. */
75a70cf9 6196 omp_for_body = gimple_omp_body (stmt);
6197 if (!gimple_seq_empty_p (omp_for_body)
6198 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6199 {
6200 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6201 gimple_bind_append_vars (new_stmt, vars);
6202 }
773c5ba7 6203
75a70cf9 6204 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
773c5ba7 6205 dlist = NULL;
75a70cf9 6206 body = NULL;
6207 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6208 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
773c5ba7 6209
6210 /* Lower the header expressions. At this point, we can assume that
6211 the header is of the form:
6212
6213 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6214
6215 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6216 using the .omp_data_s mapping, if needed. */
75a70cf9 6217 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
fd6481cf 6218 {
75a70cf9 6219 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
fd6481cf 6220 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 6221 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 6222
75a70cf9 6223 rhs_p = gimple_omp_for_final_ptr (stmt, i);
fd6481cf 6224 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 6225 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 6226
75a70cf9 6227 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
fd6481cf 6228 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 6229 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 6230 }
773c5ba7 6231
6232 /* Once lowered, extract the bounds and clauses. */
fd6481cf 6233 extract_omp_for_data (stmt, &fd, NULL);
773c5ba7 6234
75a70cf9 6235 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
773c5ba7 6236
75a70cf9 6237 gimple_seq_add_stmt (&body, stmt);
6238 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
61e47ac8 6239
75a70cf9 6240 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6241 fd.loop.v));
61e47ac8 6242
773c5ba7 6243 /* After the loop, add exit clauses. */
75a70cf9 6244 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6245 gimple_seq_add_seq (&body, dlist);
773c5ba7 6246
75a70cf9 6247 body = maybe_catch_exception (body);
aade31a0 6248
61e47ac8 6249 /* Region exit marker goes at the end of the loop body. */
75a70cf9 6250 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
773c5ba7 6251
1d22f541 6252 pop_gimplify_context (new_stmt);
75a70cf9 6253
6254 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6255 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
1d22f541 6256 if (BLOCK_VARS (block))
6257 TREE_USED (block) = 1;
773c5ba7 6258
75a70cf9 6259 gimple_bind_set_body (new_stmt, body);
6260 gimple_omp_set_body (stmt, NULL);
6261 gimple_omp_for_set_pre_body (stmt, NULL);
6262 gsi_replace (gsi_p, new_stmt, true);
1e8e9920 6263}
6264
48e1416a 6265/* Callback for walk_stmts. Check if the current statement only contains
75a70cf9 6266 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
de7ef844 6267
6268static tree
75a70cf9 6269check_combined_parallel (gimple_stmt_iterator *gsi_p,
6270 bool *handled_ops_p,
6271 struct walk_stmt_info *wi)
de7ef844 6272{
4077bf7a 6273 int *info = (int *) wi->info;
75a70cf9 6274 gimple stmt = gsi_stmt (*gsi_p);
de7ef844 6275
75a70cf9 6276 *handled_ops_p = true;
6277 switch (gimple_code (stmt))
de7ef844 6278 {
75a70cf9 6279 WALK_SUBSTMTS;
6280
6281 case GIMPLE_OMP_FOR:
6282 case GIMPLE_OMP_SECTIONS:
de7ef844 6283 *info = *info == 0 ? 1 : -1;
6284 break;
6285 default:
6286 *info = -1;
6287 break;
6288 }
6289 return NULL;
6290}
773c5ba7 6291
fd6481cf 6292struct omp_taskcopy_context
6293{
6294 /* This field must be at the beginning, as we do "inheritance": Some
6295 callback functions for tree-inline.c (e.g., omp_copy_decl)
6296 receive a copy_body_data pointer that is up-casted to an
6297 omp_context pointer. */
6298 copy_body_data cb;
6299 omp_context *ctx;
6300};
6301
6302static tree
6303task_copyfn_copy_decl (tree var, copy_body_data *cb)
6304{
6305 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6306
6307 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6308 return create_tmp_var (TREE_TYPE (var), NULL);
6309
6310 return var;
6311}
6312
6313static tree
6314task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6315{
6316 tree name, new_fields = NULL, type, f;
6317
6318 type = lang_hooks.types.make_type (RECORD_TYPE);
6319 name = DECL_NAME (TYPE_NAME (orig_type));
e60a6f7b 6320 name = build_decl (gimple_location (tcctx->ctx->stmt),
6321 TYPE_DECL, name, type);
fd6481cf 6322 TYPE_NAME (type) = name;
6323
6324 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6325 {
6326 tree new_f = copy_node (f);
6327 DECL_CONTEXT (new_f) = type;
6328 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6329 TREE_CHAIN (new_f) = new_fields;
75a70cf9 6330 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6331 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6332 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6333 &tcctx->cb, NULL);
fd6481cf 6334 new_fields = new_f;
6335 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6336 }
6337 TYPE_FIELDS (type) = nreverse (new_fields);
6338 layout_type (type);
6339 return type;
6340}
6341
6342/* Create task copyfn. */
6343
6344static void
75a70cf9 6345create_task_copyfn (gimple task_stmt, omp_context *ctx)
fd6481cf 6346{
6347 struct function *child_cfun;
6348 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6349 tree record_type, srecord_type, bind, list;
6350 bool record_needs_remap = false, srecord_needs_remap = false;
6351 splay_tree_node n;
6352 struct omp_taskcopy_context tcctx;
dac18d1a 6353 struct gimplify_ctx gctx;
389dd41b 6354 location_t loc = gimple_location (task_stmt);
fd6481cf 6355
75a70cf9 6356 child_fn = gimple_omp_task_copy_fn (task_stmt);
fd6481cf 6357 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6358 gcc_assert (child_cfun->cfg == NULL);
fd6481cf 6359 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6360
6361 /* Reset DECL_CONTEXT on function arguments. */
1767a056 6362 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
fd6481cf 6363 DECL_CONTEXT (t) = child_fn;
6364
6365 /* Populate the function. */
dac18d1a 6366 push_gimplify_context (&gctx);
fd6481cf 6367 current_function_decl = child_fn;
6368
6369 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6370 TREE_SIDE_EFFECTS (bind) = 1;
6371 list = NULL;
6372 DECL_SAVED_TREE (child_fn) = bind;
75a70cf9 6373 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
fd6481cf 6374
6375 /* Remap src and dst argument types if needed. */
6376 record_type = ctx->record_type;
6377 srecord_type = ctx->srecord_type;
1767a056 6378 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
fd6481cf 6379 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6380 {
6381 record_needs_remap = true;
6382 break;
6383 }
1767a056 6384 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
fd6481cf 6385 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6386 {
6387 srecord_needs_remap = true;
6388 break;
6389 }
6390
6391 if (record_needs_remap || srecord_needs_remap)
6392 {
6393 memset (&tcctx, '\0', sizeof (tcctx));
6394 tcctx.cb.src_fn = ctx->cb.src_fn;
6395 tcctx.cb.dst_fn = child_fn;
53f79206 6396 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6397 gcc_checking_assert (tcctx.cb.src_node);
fd6481cf 6398 tcctx.cb.dst_node = tcctx.cb.src_node;
6399 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6400 tcctx.cb.copy_decl = task_copyfn_copy_decl;
e38def9c 6401 tcctx.cb.eh_lp_nr = 0;
fd6481cf 6402 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6403 tcctx.cb.decl_map = pointer_map_create ();
6404 tcctx.ctx = ctx;
6405
6406 if (record_needs_remap)
6407 record_type = task_copyfn_remap_type (&tcctx, record_type);
6408 if (srecord_needs_remap)
6409 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6410 }
6411 else
6412 tcctx.cb.decl_map = NULL;
6413
6414 push_cfun (child_cfun);
6415
6416 arg = DECL_ARGUMENTS (child_fn);
6417 TREE_TYPE (arg) = build_pointer_type (record_type);
1767a056 6418 sarg = DECL_CHAIN (arg);
fd6481cf 6419 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6420
6421 /* First pass: initialize temporaries used in record_type and srecord_type
6422 sizes and field offsets. */
6423 if (tcctx.cb.decl_map)
75a70cf9 6424 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 6425 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6426 {
6427 tree *p;
6428
6429 decl = OMP_CLAUSE_DECL (c);
6430 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6431 if (p == NULL)
6432 continue;
6433 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6434 sf = (tree) n->value;
6435 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 6436 src = build_simple_mem_ref_loc (loc, sarg);
fd6481cf 6437 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
75a70cf9 6438 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
fd6481cf 6439 append_to_statement_list (t, &list);
6440 }
6441
6442 /* Second pass: copy shared var pointers and copy construct non-VLA
6443 firstprivate vars. */
75a70cf9 6444 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 6445 switch (OMP_CLAUSE_CODE (c))
6446 {
6447 case OMP_CLAUSE_SHARED:
6448 decl = OMP_CLAUSE_DECL (c);
6449 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6450 if (n == NULL)
6451 break;
6452 f = (tree) n->value;
6453 if (tcctx.cb.decl_map)
6454 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6455 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6456 sf = (tree) n->value;
6457 if (tcctx.cb.decl_map)
6458 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 6459 src = build_simple_mem_ref_loc (loc, sarg);
fd6481cf 6460 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
182cf5a9 6461 dst = build_simple_mem_ref_loc (loc, arg);
fd6481cf 6462 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
75a70cf9 6463 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
fd6481cf 6464 append_to_statement_list (t, &list);
6465 break;
6466 case OMP_CLAUSE_FIRSTPRIVATE:
6467 decl = OMP_CLAUSE_DECL (c);
6468 if (is_variable_sized (decl))
6469 break;
6470 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6471 if (n == NULL)
6472 break;
6473 f = (tree) n->value;
6474 if (tcctx.cb.decl_map)
6475 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6476 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6477 if (n != NULL)
6478 {
6479 sf = (tree) n->value;
6480 if (tcctx.cb.decl_map)
6481 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 6482 src = build_simple_mem_ref_loc (loc, sarg);
fd6481cf 6483 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6484 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
182cf5a9 6485 src = build_simple_mem_ref_loc (loc, src);
fd6481cf 6486 }
6487 else
6488 src = decl;
182cf5a9 6489 dst = build_simple_mem_ref_loc (loc, arg);
fd6481cf 6490 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6491 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6492 append_to_statement_list (t, &list);
6493 break;
6494 case OMP_CLAUSE_PRIVATE:
6495 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6496 break;
6497 decl = OMP_CLAUSE_DECL (c);
6498 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6499 f = (tree) n->value;
6500 if (tcctx.cb.decl_map)
6501 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6502 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6503 if (n != NULL)
6504 {
6505 sf = (tree) n->value;
6506 if (tcctx.cb.decl_map)
6507 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 6508 src = build_simple_mem_ref_loc (loc, sarg);
fd6481cf 6509 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6510 if (use_pointer_for_field (decl, NULL))
182cf5a9 6511 src = build_simple_mem_ref_loc (loc, src);
fd6481cf 6512 }
6513 else
6514 src = decl;
182cf5a9 6515 dst = build_simple_mem_ref_loc (loc, arg);
fd6481cf 6516 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
75a70cf9 6517 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
fd6481cf 6518 append_to_statement_list (t, &list);
6519 break;
6520 default:
6521 break;
6522 }
6523
6524 /* Last pass: handle VLA firstprivates. */
6525 if (tcctx.cb.decl_map)
75a70cf9 6526 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 6527 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6528 {
6529 tree ind, ptr, df;
6530
6531 decl = OMP_CLAUSE_DECL (c);
6532 if (!is_variable_sized (decl))
6533 continue;
6534 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6535 if (n == NULL)
6536 continue;
6537 f = (tree) n->value;
6538 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6539 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6540 ind = DECL_VALUE_EXPR (decl);
6541 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6542 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6543 n = splay_tree_lookup (ctx->sfield_map,
6544 (splay_tree_key) TREE_OPERAND (ind, 0));
6545 sf = (tree) n->value;
6546 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
182cf5a9 6547 src = build_simple_mem_ref_loc (loc, sarg);
fd6481cf 6548 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
182cf5a9 6549 src = build_simple_mem_ref_loc (loc, src);
6550 dst = build_simple_mem_ref_loc (loc, arg);
fd6481cf 6551 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6552 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6553 append_to_statement_list (t, &list);
6554 n = splay_tree_lookup (ctx->field_map,
6555 (splay_tree_key) TREE_OPERAND (ind, 0));
6556 df = (tree) n->value;
6557 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
182cf5a9 6558 ptr = build_simple_mem_ref_loc (loc, arg);
fd6481cf 6559 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
75a70cf9 6560 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
389dd41b 6561 build_fold_addr_expr_loc (loc, dst));
fd6481cf 6562 append_to_statement_list (t, &list);
6563 }
6564
6565 t = build1 (RETURN_EXPR, void_type_node, NULL);
6566 append_to_statement_list (t, &list);
6567
6568 if (tcctx.cb.decl_map)
6569 pointer_map_destroy (tcctx.cb.decl_map);
6570 pop_gimplify_context (NULL);
6571 BIND_EXPR_BODY (bind) = list;
6572 pop_cfun ();
6573 current_function_decl = ctx->cb.src_fn;
6574}
6575
75a70cf9 6576/* Lower the OpenMP parallel or task directive in the current statement
6577 in GSI_P. CTX holds context information for the directive. */
773c5ba7 6578
6579static void
75a70cf9 6580lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 6581{
75a70cf9 6582 tree clauses;
6583 tree child_fn, t;
6584 gimple stmt = gsi_stmt (*gsi_p);
6585 gimple par_bind, bind;
6586 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
dac18d1a 6587 struct gimplify_ctx gctx;
389dd41b 6588 location_t loc = gimple_location (stmt);
773c5ba7 6589
75a70cf9 6590 clauses = gimple_omp_taskreg_clauses (stmt);
6591 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6592 par_body = gimple_bind_body (par_bind);
773c5ba7 6593 child_fn = ctx->cb.dst_fn;
75a70cf9 6594 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6595 && !gimple_omp_parallel_combined_p (stmt))
de7ef844 6596 {
6597 struct walk_stmt_info wi;
6598 int ws_num = 0;
6599
6600 memset (&wi, 0, sizeof (wi));
de7ef844 6601 wi.info = &ws_num;
6602 wi.val_only = true;
75a70cf9 6603 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
de7ef844 6604 if (ws_num == 1)
75a70cf9 6605 gimple_omp_parallel_set_combined_p (stmt, true);
de7ef844 6606 }
fd6481cf 6607 if (ctx->srecord_type)
6608 create_task_copyfn (stmt, ctx);
773c5ba7 6609
dac18d1a 6610 push_gimplify_context (&gctx);
773c5ba7 6611
75a70cf9 6612 par_olist = NULL;
6613 par_ilist = NULL;
773c5ba7 6614 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
75a70cf9 6615 lower_omp (par_body, ctx);
6616 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
fd6481cf 6617 lower_reduction_clauses (clauses, &par_olist, ctx);
773c5ba7 6618
6619 /* Declare all the variables created by mapping and the variables
6620 declared in the scope of the parallel body. */
6621 record_vars_into (ctx->block_vars, child_fn);
75a70cf9 6622 record_vars_into (gimple_bind_vars (par_bind), child_fn);
773c5ba7 6623
6624 if (ctx->record_type)
6625 {
fd6481cf 6626 ctx->sender_decl
6627 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6628 : ctx->record_type, ".omp_data_o");
84bfaaeb 6629 DECL_NAMELESS (ctx->sender_decl) = 1;
86f2ad37 6630 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
75a70cf9 6631 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
773c5ba7 6632 }
6633
75a70cf9 6634 olist = NULL;
6635 ilist = NULL;
773c5ba7 6636 lower_send_clauses (clauses, &ilist, &olist, ctx);
6637 lower_send_shared_vars (&ilist, &olist, ctx);
6638
6639 /* Once all the expansions are done, sequence all the different
75a70cf9 6640 fragments inside gimple_omp_body. */
773c5ba7 6641
75a70cf9 6642 new_body = NULL;
773c5ba7 6643
6644 if (ctx->record_type)
6645 {
389dd41b 6646 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
cc6b725b 6647 /* fixup_child_record_type might have changed receiver_decl's type. */
389dd41b 6648 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
75a70cf9 6649 gimple_seq_add_stmt (&new_body,
6650 gimple_build_assign (ctx->receiver_decl, t));
773c5ba7 6651 }
6652
75a70cf9 6653 gimple_seq_add_seq (&new_body, par_ilist);
6654 gimple_seq_add_seq (&new_body, par_body);
6655 gimple_seq_add_seq (&new_body, par_olist);
6656 new_body = maybe_catch_exception (new_body);
6657 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6658 gimple_omp_set_body (stmt, new_body);
773c5ba7 6659
75a70cf9 6660 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6661 gimple_bind_add_stmt (bind, stmt);
1d22f541 6662 if (ilist || olist)
6663 {
75a70cf9 6664 gimple_seq_add_stmt (&ilist, bind);
6665 gimple_seq_add_seq (&ilist, olist);
6666 bind = gimple_build_bind (NULL, ilist, NULL);
1d22f541 6667 }
773c5ba7 6668
75a70cf9 6669 gsi_replace (gsi_p, bind, true);
773c5ba7 6670
75a70cf9 6671 pop_gimplify_context (NULL);
773c5ba7 6672}
6673
a4890dc9 6674/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
75a70cf9 6675 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6676 of OpenMP context, but with task_shared_vars set. */
46515c92 6677
6678static tree
75a70cf9 6679lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6680 void *data)
46515c92 6681{
a4890dc9 6682 tree t = *tp;
46515c92 6683
a4890dc9 6684 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
75a70cf9 6685 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
9f49e155 6686 return t;
6687
6688 if (task_shared_vars
6689 && DECL_P (t)
6690 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
a4890dc9 6691 return t;
46515c92 6692
a4890dc9 6693 /* If a global variable has been privatized, TREE_CONSTANT on
6694 ADDR_EXPR might be wrong. */
75a70cf9 6695 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
a4890dc9 6696 recompute_tree_invariant_for_addr_expr (t);
46515c92 6697
a4890dc9 6698 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6699 return NULL_TREE;
46515c92 6700}
773c5ba7 6701
a4890dc9 6702static void
75a70cf9 6703lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 6704{
75a70cf9 6705 gimple stmt = gsi_stmt (*gsi_p);
6706 struct walk_stmt_info wi;
1e8e9920 6707
75a70cf9 6708 if (gimple_has_location (stmt))
6709 input_location = gimple_location (stmt);
a4890dc9 6710
75a70cf9 6711 if (task_shared_vars)
6712 memset (&wi, '\0', sizeof (wi));
a4890dc9 6713
773c5ba7 6714 /* If we have issued syntax errors, avoid doing any heavy lifting.
6715 Just replace the OpenMP directives with a NOP to avoid
6716 confusing RTL expansion. */
852f689e 6717 if (seen_error () && is_gimple_omp (stmt))
773c5ba7 6718 {
75a70cf9 6719 gsi_replace (gsi_p, gimple_build_nop (), true);
a4890dc9 6720 return;
773c5ba7 6721 }
6722
75a70cf9 6723 switch (gimple_code (stmt))
1e8e9920 6724 {
75a70cf9 6725 case GIMPLE_COND:
fd6481cf 6726 if ((ctx || task_shared_vars)
75a70cf9 6727 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6728 ctx ? NULL : &wi, NULL)
6729 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6730 ctx ? NULL : &wi, NULL)))
6731 gimple_regimplify_operands (stmt, gsi_p);
a4890dc9 6732 break;
75a70cf9 6733 case GIMPLE_CATCH:
6734 lower_omp (gimple_catch_handler (stmt), ctx);
a4890dc9 6735 break;
75a70cf9 6736 case GIMPLE_EH_FILTER:
6737 lower_omp (gimple_eh_filter_failure (stmt), ctx);
a4890dc9 6738 break;
75a70cf9 6739 case GIMPLE_TRY:
6740 lower_omp (gimple_try_eval (stmt), ctx);
6741 lower_omp (gimple_try_cleanup (stmt), ctx);
a4890dc9 6742 break;
75a70cf9 6743 case GIMPLE_BIND:
6744 lower_omp (gimple_bind_body (stmt), ctx);
a4890dc9 6745 break;
75a70cf9 6746 case GIMPLE_OMP_PARALLEL:
6747 case GIMPLE_OMP_TASK:
6748 ctx = maybe_lookup_ctx (stmt);
6749 lower_omp_taskreg (gsi_p, ctx);
a4890dc9 6750 break;
75a70cf9 6751 case GIMPLE_OMP_FOR:
6752 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6753 gcc_assert (ctx);
75a70cf9 6754 lower_omp_for (gsi_p, ctx);
1e8e9920 6755 break;
75a70cf9 6756 case GIMPLE_OMP_SECTIONS:
6757 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6758 gcc_assert (ctx);
75a70cf9 6759 lower_omp_sections (gsi_p, ctx);
1e8e9920 6760 break;
75a70cf9 6761 case GIMPLE_OMP_SINGLE:
6762 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6763 gcc_assert (ctx);
75a70cf9 6764 lower_omp_single (gsi_p, ctx);
1e8e9920 6765 break;
75a70cf9 6766 case GIMPLE_OMP_MASTER:
6767 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6768 gcc_assert (ctx);
75a70cf9 6769 lower_omp_master (gsi_p, ctx);
1e8e9920 6770 break;
75a70cf9 6771 case GIMPLE_OMP_ORDERED:
6772 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6773 gcc_assert (ctx);
75a70cf9 6774 lower_omp_ordered (gsi_p, ctx);
1e8e9920 6775 break;
75a70cf9 6776 case GIMPLE_OMP_CRITICAL:
6777 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6778 gcc_assert (ctx);
75a70cf9 6779 lower_omp_critical (gsi_p, ctx);
6780 break;
6781 case GIMPLE_OMP_ATOMIC_LOAD:
6782 if ((ctx || task_shared_vars)
6783 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6784 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6785 gimple_regimplify_operands (stmt, gsi_p);
1e8e9920 6786 break;
a4890dc9 6787 default:
fd6481cf 6788 if ((ctx || task_shared_vars)
75a70cf9 6789 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6790 ctx ? NULL : &wi))
6791 gimple_regimplify_operands (stmt, gsi_p);
1e8e9920 6792 break;
1e8e9920 6793 }
1e8e9920 6794}
6795
6796static void
75a70cf9 6797lower_omp (gimple_seq body, omp_context *ctx)
1e8e9920 6798{
1d22f541 6799 location_t saved_location = input_location;
75a70cf9 6800 gimple_stmt_iterator gsi = gsi_start (body);
6801 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6802 lower_omp_1 (&gsi, ctx);
1d22f541 6803 input_location = saved_location;
1e8e9920 6804}
6805\f
6806/* Main entry point. */
6807
2a1990e9 6808static unsigned int
1e8e9920 6809execute_lower_omp (void)
6810{
75a70cf9 6811 gimple_seq body;
6812
41709826 6813 /* This pass always runs, to provide PROP_gimple_lomp.
6814 But there is nothing to do unless -fopenmp is given. */
6815 if (flag_openmp == 0)
6816 return 0;
6817
1e8e9920 6818 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6819 delete_omp_context);
6820
75a70cf9 6821 body = gimple_body (current_function_decl);
6822 scan_omp (body, NULL);
fd6481cf 6823 gcc_assert (taskreg_nesting_level == 0);
1e8e9920 6824
6825 if (all_contexts->root)
fd6481cf 6826 {
dac18d1a 6827 struct gimplify_ctx gctx;
6828
fd6481cf 6829 if (task_shared_vars)
dac18d1a 6830 push_gimplify_context (&gctx);
75a70cf9 6831 lower_omp (body, NULL);
fd6481cf 6832 if (task_shared_vars)
6833 pop_gimplify_context (NULL);
6834 }
1e8e9920 6835
773c5ba7 6836 if (all_contexts)
6837 {
6838 splay_tree_delete (all_contexts);
6839 all_contexts = NULL;
6840 }
fd6481cf 6841 BITMAP_FREE (task_shared_vars);
2a1990e9 6842 return 0;
1e8e9920 6843}
6844
48e1416a 6845struct gimple_opt_pass pass_lower_omp =
1e8e9920 6846{
20099e35 6847 {
6848 GIMPLE_PASS,
1e8e9920 6849 "omplower", /* name */
41709826 6850 NULL, /* gate */
1e8e9920 6851 execute_lower_omp, /* execute */
6852 NULL, /* sub */
6853 NULL, /* next */
6854 0, /* static_pass_number */
0b1615c1 6855 TV_NONE, /* tv_id */
1e8e9920 6856 PROP_gimple_any, /* properties_required */
6857 PROP_gimple_lomp, /* properties_provided */
6858 0, /* properties_destroyed */
6859 0, /* todo_flags_start */
771e2890 6860 0 /* todo_flags_finish */
20099e35 6861 }
1e8e9920 6862};
1e8e9920 6863\f
6864/* The following is a utility to diagnose OpenMP structured block violations.
61e47ac8 6865 It is not part of the "omplower" pass, as that's invoked too late. It
6866 should be invoked by the respective front ends after gimplification. */
1e8e9920 6867
6868static splay_tree all_labels;
6869
6870/* Check for mismatched contexts and generate an error if needed. Return
6871 true if an error is detected. */
6872
6873static bool
75a70cf9 6874diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6875 gimple branch_ctx, gimple label_ctx)
1e8e9920 6876{
75a70cf9 6877 if (label_ctx == branch_ctx)
1e8e9920 6878 return false;
6879
48e1416a 6880
75a70cf9 6881 /*
6882 Previously we kept track of the label's entire context in diagnose_sb_[12]
6883 so we could traverse it and issue a correct "exit" or "enter" error
6884 message upon a structured block violation.
6885
6886 We built the context by building a list with tree_cons'ing, but there is
6887 no easy counterpart in gimple tuples. It seems like far too much work
6888 for issuing exit/enter error messages. If someone really misses the
6889 distinct error message... patches welcome.
6890 */
48e1416a 6891
75a70cf9 6892#if 0
1e8e9920 6893 /* Try to avoid confusing the user by producing and error message
f0b5f617 6894 with correct "exit" or "enter" verbiage. We prefer "exit"
1e8e9920 6895 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6896 if (branch_ctx == NULL)
6897 exit_p = false;
6898 else
6899 {
6900 while (label_ctx)
6901 {
6902 if (TREE_VALUE (label_ctx) == branch_ctx)
6903 {
6904 exit_p = false;
6905 break;
6906 }
6907 label_ctx = TREE_CHAIN (label_ctx);
6908 }
6909 }
6910
6911 if (exit_p)
6912 error ("invalid exit from OpenMP structured block");
6913 else
6914 error ("invalid entry to OpenMP structured block");
75a70cf9 6915#endif
1e8e9920 6916
75a70cf9 6917 /* If it's obvious we have an invalid entry, be specific about the error. */
6918 if (branch_ctx == NULL)
6919 error ("invalid entry to OpenMP structured block");
6920 else
6921 /* Otherwise, be vague and lazy, but efficient. */
6922 error ("invalid branch to/from an OpenMP structured block");
6923
6924 gsi_replace (gsi_p, gimple_build_nop (), false);
1e8e9920 6925 return true;
6926}
6927
6928/* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
75a70cf9 6929 where each label is found. */
1e8e9920 6930
6931static tree
75a70cf9 6932diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6933 struct walk_stmt_info *wi)
1e8e9920 6934{
75a70cf9 6935 gimple context = (gimple) wi->info;
6936 gimple inner_context;
6937 gimple stmt = gsi_stmt (*gsi_p);
1e8e9920 6938
75a70cf9 6939 *handled_ops_p = true;
6940
6941 switch (gimple_code (stmt))
1e8e9920 6942 {
75a70cf9 6943 WALK_SUBSTMTS;
48e1416a 6944
75a70cf9 6945 case GIMPLE_OMP_PARALLEL:
6946 case GIMPLE_OMP_TASK:
6947 case GIMPLE_OMP_SECTIONS:
6948 case GIMPLE_OMP_SINGLE:
6949 case GIMPLE_OMP_SECTION:
6950 case GIMPLE_OMP_MASTER:
6951 case GIMPLE_OMP_ORDERED:
6952 case GIMPLE_OMP_CRITICAL:
6953 /* The minimal context here is just the current OMP construct. */
6954 inner_context = stmt;
1e8e9920 6955 wi->info = inner_context;
75a70cf9 6956 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
1e8e9920 6957 wi->info = context;
6958 break;
6959
75a70cf9 6960 case GIMPLE_OMP_FOR:
6961 inner_context = stmt;
1e8e9920 6962 wi->info = inner_context;
75a70cf9 6963 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6964 walk them. */
6965 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6966 diagnose_sb_1, NULL, wi);
6967 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
1e8e9920 6968 wi->info = context;
6969 break;
6970
75a70cf9 6971 case GIMPLE_LABEL:
6972 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
1e8e9920 6973 (splay_tree_value) context);
6974 break;
6975
6976 default:
6977 break;
6978 }
6979
6980 return NULL_TREE;
6981}
6982
6983/* Pass 2: Check each branch and see if its context differs from that of
6984 the destination label's context. */
6985
6986static tree
75a70cf9 6987diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6988 struct walk_stmt_info *wi)
1e8e9920 6989{
75a70cf9 6990 gimple context = (gimple) wi->info;
1e8e9920 6991 splay_tree_node n;
75a70cf9 6992 gimple stmt = gsi_stmt (*gsi_p);
1e8e9920 6993
75a70cf9 6994 *handled_ops_p = true;
6995
6996 switch (gimple_code (stmt))
1e8e9920 6997 {
75a70cf9 6998 WALK_SUBSTMTS;
6999
7000 case GIMPLE_OMP_PARALLEL:
7001 case GIMPLE_OMP_TASK:
7002 case GIMPLE_OMP_SECTIONS:
7003 case GIMPLE_OMP_SINGLE:
7004 case GIMPLE_OMP_SECTION:
7005 case GIMPLE_OMP_MASTER:
7006 case GIMPLE_OMP_ORDERED:
7007 case GIMPLE_OMP_CRITICAL:
7008 wi->info = stmt;
7009 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
1e8e9920 7010 wi->info = context;
7011 break;
7012
75a70cf9 7013 case GIMPLE_OMP_FOR:
7014 wi->info = stmt;
7015 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7016 walk them. */
7017 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7018 diagnose_sb_2, NULL, wi);
7019 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
1e8e9920 7020 wi->info = context;
7021 break;
7022
0e1818e7 7023 case GIMPLE_COND:
7024 {
7025 tree lab = gimple_cond_true_label (stmt);
7026 if (lab)
7027 {
7028 n = splay_tree_lookup (all_labels,
7029 (splay_tree_key) lab);
7030 diagnose_sb_0 (gsi_p, context,
7031 n ? (gimple) n->value : NULL);
7032 }
7033 lab = gimple_cond_false_label (stmt);
7034 if (lab)
7035 {
7036 n = splay_tree_lookup (all_labels,
7037 (splay_tree_key) lab);
7038 diagnose_sb_0 (gsi_p, context,
7039 n ? (gimple) n->value : NULL);
7040 }
7041 }
7042 break;
7043
75a70cf9 7044 case GIMPLE_GOTO:
1e8e9920 7045 {
75a70cf9 7046 tree lab = gimple_goto_dest (stmt);
1e8e9920 7047 if (TREE_CODE (lab) != LABEL_DECL)
7048 break;
7049
7050 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
75a70cf9 7051 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
1e8e9920 7052 }
7053 break;
7054
75a70cf9 7055 case GIMPLE_SWITCH:
1e8e9920 7056 {
75a70cf9 7057 unsigned int i;
7058 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
1e8e9920 7059 {
75a70cf9 7060 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
1e8e9920 7061 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
75a70cf9 7062 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
1e8e9920 7063 break;
7064 }
7065 }
7066 break;
7067
75a70cf9 7068 case GIMPLE_RETURN:
7069 diagnose_sb_0 (gsi_p, context, NULL);
1e8e9920 7070 break;
7071
7072 default:
7073 break;
7074 }
7075
7076 return NULL_TREE;
7077}
7078
bfec3452 7079static unsigned int
7080diagnose_omp_structured_block_errors (void)
1e8e9920 7081{
1e8e9920 7082 struct walk_stmt_info wi;
bfec3452 7083 gimple_seq body = gimple_body (current_function_decl);
1e8e9920 7084
7085 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
7086
7087 memset (&wi, 0, sizeof (wi));
75a70cf9 7088 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
1e8e9920 7089
7090 memset (&wi, 0, sizeof (wi));
1e8e9920 7091 wi.want_locations = true;
75a70cf9 7092 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
1e8e9920 7093
7094 splay_tree_delete (all_labels);
7095 all_labels = NULL;
7096
bfec3452 7097 return 0;
1e8e9920 7098}
7099
bfec3452 7100static bool
7101gate_diagnose_omp_blocks (void)
7102{
7103 return flag_openmp != 0;
7104}
7105
7106struct gimple_opt_pass pass_diagnose_omp_blocks =
7107{
7108 {
7109 GIMPLE_PASS,
53b5ae07 7110 "*diagnose_omp_blocks", /* name */
bfec3452 7111 gate_diagnose_omp_blocks, /* gate */
7112 diagnose_omp_structured_block_errors, /* execute */
7113 NULL, /* sub */
7114 NULL, /* next */
7115 0, /* static_pass_number */
7116 TV_NONE, /* tv_id */
7117 PROP_gimple_any, /* properties_required */
7118 0, /* properties_provided */
7119 0, /* properties_destroyed */
7120 0, /* todo_flags_start */
7121 0, /* todo_flags_finish */
7122 }
7123};
7124
1e8e9920 7125#include "gt-omp-low.h"