]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/omp-low.c
Fix ChangeLog to mention last commit to transfer.c
[thirdparty/gcc.git] / gcc / omp-low.c
CommitLineData
953ff289
DN
1/* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
c75c517d
SB
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
7 Free Software Foundation, Inc.
953ff289
DN
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
9dcd6f09 13Software Foundation; either version 3, or (at your option) any later
953ff289
DN
14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
9dcd6f09
NC
22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
953ff289
DN
24
25#include "config.h"
26#include "system.h"
27#include "coretypes.h"
28#include "tm.h"
29#include "tree.h"
30#include "rtl.h"
726a989a
RB
31#include "gimple.h"
32#include "tree-iterator.h"
953ff289
DN
33#include "tree-inline.h"
34#include "langhooks.h"
1da2ed5f 35#include "diagnostic-core.h"
953ff289
DN
36#include "tree-flow.h"
37#include "timevar.h"
38#include "flags.h"
39#include "function.h"
40#include "expr.h"
41#include "toplev.h"
42#include "tree-pass.h"
43#include "ggc.h"
44#include "except.h"
6be42dd4 45#include "splay-tree.h"
a509ebb5
RL
46#include "optabs.h"
47#include "cfgloop.h"
953ff289 48
726a989a 49
b8698a0f 50/* Lowering of OpenMP parallel and workshare constructs proceeds in two
953ff289
DN
51 phases. The first phase scans the function looking for OMP statements
52 and then for variables that must be replaced to satisfy data sharing
53 clauses. The second phase expands code for the constructs, as well as
c0220ea4 54 re-gimplifying things when variables have been replaced with complex
953ff289
DN
55 expressions.
56
7ebaeab5
DN
57 Final code generation is done by pass_expand_omp. The flowgraph is
58 scanned for parallel regions which are then moved to a new
59 function, to be invoked by the thread library. */
953ff289
DN
60
61/* Context structure. Used to store information about each parallel
62 directive in the code. */
63
64typedef struct omp_context
65{
66 /* This field must be at the beginning, as we do "inheritance": Some
67 callback functions for tree-inline.c (e.g., omp_copy_decl)
68 receive a copy_body_data pointer that is up-casted to an
69 omp_context pointer. */
70 copy_body_data cb;
71
72 /* The tree of contexts corresponding to the encountered constructs. */
73 struct omp_context *outer;
726a989a 74 gimple stmt;
953ff289 75
b8698a0f 76 /* Map variables to fields in a structure that allows communication
953ff289
DN
77 between sending and receiving threads. */
78 splay_tree field_map;
79 tree record_type;
80 tree sender_decl;
81 tree receiver_decl;
82
a68ab351
JJ
83 /* These are used just by task contexts, if task firstprivate fn is
84 needed. srecord_type is used to communicate from the thread
85 that encountered the task construct to task firstprivate fn,
86 record_type is allocated by GOMP_task, initialized by task firstprivate
87 fn and passed to the task body fn. */
88 splay_tree sfield_map;
89 tree srecord_type;
90
953ff289
DN
91 /* A chain of variables to add to the top-level block surrounding the
92 construct. In the case of a parallel, this is in the child function. */
93 tree block_vars;
94
95 /* What to do with variables with implicitly determined sharing
96 attributes. */
97 enum omp_clause_default_kind default_kind;
98
99 /* Nesting depth of this context. Used to beautify error messages re
100 invalid gotos. The outermost ctx is depth 1, with depth 0 being
101 reserved for the main body of the function. */
102 int depth;
103
953ff289
DN
104 /* True if this parallel directive is nested within another. */
105 bool is_nested;
953ff289
DN
106} omp_context;
107
108
a68ab351
JJ
109struct omp_for_data_loop
110{
111 tree v, n1, n2, step;
112 enum tree_code cond_code;
113};
114
50674e96 115/* A structure describing the main elements of a parallel loop. */
953ff289 116
50674e96 117struct omp_for_data
953ff289 118{
a68ab351 119 struct omp_for_data_loop loop;
726a989a
RB
120 tree chunk_size;
121 gimple for_stmt;
a68ab351
JJ
122 tree pre, iter_type;
123 int collapse;
953ff289
DN
124 bool have_nowait, have_ordered;
125 enum omp_clause_schedule_kind sched_kind;
a68ab351 126 struct omp_for_data_loop *loops;
953ff289
DN
127};
128
50674e96 129
953ff289 130static splay_tree all_contexts;
a68ab351 131static int taskreg_nesting_level;
777f7f9a 132struct omp_region *root_omp_region;
a68ab351 133static bitmap task_shared_vars;
953ff289 134
726a989a
RB
135static void scan_omp (gimple_seq, omp_context *);
136static tree scan_omp_1_op (tree *, int *, void *);
137
138#define WALK_SUBSTMTS \
139 case GIMPLE_BIND: \
140 case GIMPLE_TRY: \
141 case GIMPLE_CATCH: \
142 case GIMPLE_EH_FILTER: \
143 /* The sub-statements for these should be walked. */ \
144 *handled_ops_p = false; \
145 break;
146
147/* Convenience function for calling scan_omp_1_op on tree operands. */
148
149static inline tree
150scan_omp_op (tree *tp, omp_context *ctx)
151{
152 struct walk_stmt_info wi;
153
154 memset (&wi, 0, sizeof (wi));
155 wi.info = ctx;
156 wi.want_locations = true;
157
158 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
159}
160
161static void lower_omp (gimple_seq, omp_context *);
8ca5b2a2
JJ
162static tree lookup_decl_in_outer_ctx (tree, omp_context *);
163static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
953ff289
DN
164
165/* Find an OpenMP clause of type KIND within CLAUSES. */
166
917948d3 167tree
e0c68ce9 168find_omp_clause (tree clauses, enum omp_clause_code kind)
953ff289
DN
169{
170 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
aaf46ef9 171 if (OMP_CLAUSE_CODE (clauses) == kind)
953ff289
DN
172 return clauses;
173
174 return NULL_TREE;
175}
176
177/* Return true if CTX is for an omp parallel. */
178
179static inline bool
180is_parallel_ctx (omp_context *ctx)
181{
726a989a 182 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
953ff289
DN
183}
184
50674e96 185
a68ab351
JJ
186/* Return true if CTX is for an omp task. */
187
188static inline bool
189is_task_ctx (omp_context *ctx)
190{
726a989a 191 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
a68ab351
JJ
192}
193
194
195/* Return true if CTX is for an omp parallel or omp task. */
196
197static inline bool
198is_taskreg_ctx (omp_context *ctx)
199{
726a989a
RB
200 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
201 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
a68ab351
JJ
202}
203
204
50674e96 205/* Return true if REGION is a combined parallel+workshare region. */
953ff289
DN
206
207static inline bool
50674e96
DN
208is_combined_parallel (struct omp_region *region)
209{
210 return region->is_combined_parallel;
211}
212
213
214/* Extract the header elements of parallel loop FOR_STMT and store
215 them into *FD. */
216
217static void
726a989a 218extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
a68ab351 219 struct omp_for_data_loop *loops)
50674e96 220{
a68ab351
JJ
221 tree t, var, *collapse_iter, *collapse_count;
222 tree count = NULL_TREE, iter_type = long_integer_type_node;
223 struct omp_for_data_loop *loop;
224 int i;
225 struct omp_for_data_loop dummy_loop;
db3927fb 226 location_t loc = gimple_location (for_stmt);
50674e96
DN
227
228 fd->for_stmt = for_stmt;
229 fd->pre = NULL;
726a989a 230 fd->collapse = gimple_omp_for_collapse (for_stmt);
a68ab351
JJ
231 if (fd->collapse > 1)
232 fd->loops = loops;
233 else
234 fd->loops = &fd->loop;
50674e96
DN
235
236 fd->have_nowait = fd->have_ordered = false;
237 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238 fd->chunk_size = NULL_TREE;
a68ab351
JJ
239 collapse_iter = NULL;
240 collapse_count = NULL;
50674e96 241
726a989a 242 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
aaf46ef9 243 switch (OMP_CLAUSE_CODE (t))
50674e96
DN
244 {
245 case OMP_CLAUSE_NOWAIT:
246 fd->have_nowait = true;
247 break;
248 case OMP_CLAUSE_ORDERED:
249 fd->have_ordered = true;
250 break;
251 case OMP_CLAUSE_SCHEDULE:
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254 break;
a68ab351
JJ
255 case OMP_CLAUSE_COLLAPSE:
256 if (fd->collapse > 1)
257 {
258 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 }
50674e96
DN
261 default:
262 break;
263 }
264
a68ab351
JJ
265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
1cbc62c0 268 is best) or if it varies (then schedule(dynamic,N) is better). */
a68ab351
JJ
269 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 {
271 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272 gcc_assert (fd->chunk_size == NULL);
273 }
274 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
50674e96
DN
275 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276 gcc_assert (fd->chunk_size == NULL);
277 else if (fd->chunk_size == NULL)
278 {
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
a68ab351
JJ
281 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 || fd->have_ordered
283 || fd->collapse > 1)
50674e96
DN
284 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285 ? integer_zero_node : integer_one_node;
286 }
a68ab351
JJ
287
288 for (i = 0; i < fd->collapse; i++)
289 {
290 if (fd->collapse == 1)
291 loop = &fd->loop;
292 else if (loops != NULL)
293 loop = loops + i;
294 else
295 loop = &dummy_loop;
296
b8698a0f 297
726a989a 298 loop->v = gimple_omp_for_index (for_stmt, i);
a68ab351
JJ
299 gcc_assert (SSA_VAR_P (loop->v));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
726a989a 303 loop->n1 = gimple_omp_for_initial (for_stmt, i);
a68ab351 304
726a989a
RB
305 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306 loop->n2 = gimple_omp_for_final (for_stmt, i);
a68ab351
JJ
307 switch (loop->cond_code)
308 {
309 case LT_EXPR:
310 case GT_EXPR:
311 break;
312 case LE_EXPR:
313 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
db3927fb
AH
314 loop->n2 = fold_build2_loc (loc,
315 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
a68ab351
JJ
316 loop->n2, size_one_node);
317 else
db3927fb
AH
318 loop->n2 = fold_build2_loc (loc,
319 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
a68ab351
JJ
320 build_int_cst (TREE_TYPE (loop->n2), 1));
321 loop->cond_code = LT_EXPR;
322 break;
323 case GE_EXPR:
324 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
db3927fb
AH
325 loop->n2 = fold_build2_loc (loc,
326 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
a68ab351
JJ
327 loop->n2, size_int (-1));
328 else
db3927fb
AH
329 loop->n2 = fold_build2_loc (loc,
330 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
a68ab351
JJ
331 build_int_cst (TREE_TYPE (loop->n2), 1));
332 loop->cond_code = GT_EXPR;
333 break;
334 default:
335 gcc_unreachable ();
336 }
337
726a989a 338 t = gimple_omp_for_incr (for_stmt, i);
a68ab351
JJ
339 gcc_assert (TREE_OPERAND (t, 0) == var);
340 switch (TREE_CODE (t))
341 {
342 case PLUS_EXPR:
343 case POINTER_PLUS_EXPR:
344 loop->step = TREE_OPERAND (t, 1);
345 break;
346 case MINUS_EXPR:
347 loop->step = TREE_OPERAND (t, 1);
db3927fb
AH
348 loop->step = fold_build1_loc (loc,
349 NEGATE_EXPR, TREE_TYPE (loop->step),
a68ab351
JJ
350 loop->step);
351 break;
352 default:
353 gcc_unreachable ();
354 }
355
356 if (iter_type != long_long_unsigned_type_node)
357 {
358 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
359 iter_type = long_long_unsigned_type_node;
360 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
361 && TYPE_PRECISION (TREE_TYPE (loop->v))
362 >= TYPE_PRECISION (iter_type))
363 {
364 tree n;
365
366 if (loop->cond_code == LT_EXPR)
db3927fb
AH
367 n = fold_build2_loc (loc,
368 PLUS_EXPR, TREE_TYPE (loop->v),
a68ab351
JJ
369 loop->n2, loop->step);
370 else
371 n = loop->n1;
372 if (TREE_CODE (n) != INTEGER_CST
373 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
374 iter_type = long_long_unsigned_type_node;
375 }
376 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
377 > TYPE_PRECISION (iter_type))
378 {
379 tree n1, n2;
380
381 if (loop->cond_code == LT_EXPR)
382 {
383 n1 = loop->n1;
db3927fb
AH
384 n2 = fold_build2_loc (loc,
385 PLUS_EXPR, TREE_TYPE (loop->v),
a68ab351
JJ
386 loop->n2, loop->step);
387 }
388 else
389 {
db3927fb
AH
390 n1 = fold_build2_loc (loc,
391 MINUS_EXPR, TREE_TYPE (loop->v),
a68ab351
JJ
392 loop->n2, loop->step);
393 n2 = loop->n1;
394 }
395 if (TREE_CODE (n1) != INTEGER_CST
396 || TREE_CODE (n2) != INTEGER_CST
397 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
398 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
399 iter_type = long_long_unsigned_type_node;
400 }
401 }
402
403 if (collapse_count && *collapse_count == NULL)
404 {
405 if ((i == 0 || count != NULL_TREE)
406 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
407 && TREE_CONSTANT (loop->n1)
408 && TREE_CONSTANT (loop->n2)
409 && TREE_CODE (loop->step) == INTEGER_CST)
410 {
411 tree itype = TREE_TYPE (loop->v);
412
413 if (POINTER_TYPE_P (itype))
414 itype
415 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
416 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
db3927fb
AH
417 t = fold_build2_loc (loc,
418 PLUS_EXPR, itype,
419 fold_convert_loc (loc, itype, loop->step), t);
420 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
421 fold_convert_loc (loc, itype, loop->n2));
422 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
423 fold_convert_loc (loc, itype, loop->n1));
a68ab351 424 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
db3927fb
AH
425 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
426 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
427 fold_build1_loc (loc, NEGATE_EXPR, itype,
428 fold_convert_loc (loc, itype,
429 loop->step)));
a68ab351 430 else
db3927fb
AH
431 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
432 fold_convert_loc (loc, itype, loop->step));
433 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
a68ab351 434 if (count != NULL_TREE)
db3927fb
AH
435 count = fold_build2_loc (loc,
436 MULT_EXPR, long_long_unsigned_type_node,
a68ab351
JJ
437 count, t);
438 else
439 count = t;
440 if (TREE_CODE (count) != INTEGER_CST)
441 count = NULL_TREE;
442 }
443 else
444 count = NULL_TREE;
445 }
446 }
447
448 if (count)
449 {
450 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
451 iter_type = long_long_unsigned_type_node;
452 else
453 iter_type = long_integer_type_node;
454 }
455 else if (collapse_iter && *collapse_iter != NULL)
456 iter_type = TREE_TYPE (*collapse_iter);
457 fd->iter_type = iter_type;
458 if (collapse_iter && *collapse_iter == NULL)
459 *collapse_iter = create_tmp_var (iter_type, ".iter");
460 if (collapse_count && *collapse_count == NULL)
461 {
462 if (count)
db3927fb 463 *collapse_count = fold_convert_loc (loc, iter_type, count);
a68ab351
JJ
464 else
465 *collapse_count = create_tmp_var (iter_type, ".count");
466 }
467
468 if (fd->collapse > 1)
469 {
470 fd->loop.v = *collapse_iter;
471 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
472 fd->loop.n2 = *collapse_count;
473 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
474 fd->loop.cond_code = LT_EXPR;
475 }
50674e96
DN
476}
477
478
479/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
480 is the immediate dominator of PAR_ENTRY_BB, return true if there
481 are no data dependencies that would prevent expanding the parallel
482 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
483
484 When expanding a combined parallel+workshare region, the call to
485 the child function may need additional arguments in the case of
726a989a
RB
486 GIMPLE_OMP_FOR regions. In some cases, these arguments are
487 computed out of variables passed in from the parent to the child
488 via 'struct .omp_data_s'. For instance:
50674e96
DN
489
490 #pragma omp parallel for schedule (guided, i * 4)
491 for (j ...)
492
493 Is lowered into:
494
495 # BLOCK 2 (PAR_ENTRY_BB)
496 .omp_data_o.i = i;
497 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
b8698a0f 498
50674e96
DN
499 # BLOCK 3 (WS_ENTRY_BB)
500 .omp_data_i = &.omp_data_o;
501 D.1667 = .omp_data_i->i;
502 D.1598 = D.1667 * 4;
503 #pragma omp for schedule (guided, D.1598)
504
505 When we outline the parallel region, the call to the child function
506 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
507 that value is computed *after* the call site. So, in principle we
508 cannot do the transformation.
509
510 To see whether the code in WS_ENTRY_BB blocks the combined
511 parallel+workshare call, we collect all the variables used in the
726a989a 512 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
50674e96
DN
513 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
514 call.
515
516 FIXME. If we had the SSA form built at this point, we could merely
517 hoist the code in block 3 into block 2 and be done with it. But at
518 this point we don't have dataflow information and though we could
519 hack something up here, it is really not worth the aggravation. */
520
521static bool
0f900dfa 522workshare_safe_to_combine_p (basic_block ws_entry_bb)
50674e96
DN
523{
524 struct omp_for_data fd;
0f900dfa 525 gimple ws_stmt = last_stmt (ws_entry_bb);
50674e96 526
726a989a 527 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
50674e96
DN
528 return true;
529
726a989a 530 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
50674e96 531
a68ab351
JJ
532 extract_omp_for_data (ws_stmt, &fd, NULL);
533
534 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
535 return false;
536 if (fd.iter_type != long_integer_type_node)
537 return false;
50674e96
DN
538
539 /* FIXME. We give up too easily here. If any of these arguments
540 are not constants, they will likely involve variables that have
541 been mapped into fields of .omp_data_s for sharing with the child
542 function. With appropriate data flow, it would be possible to
543 see through this. */
a68ab351
JJ
544 if (!is_gimple_min_invariant (fd.loop.n1)
545 || !is_gimple_min_invariant (fd.loop.n2)
546 || !is_gimple_min_invariant (fd.loop.step)
50674e96
DN
547 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
548 return false;
549
550 return true;
551}
552
553
554/* Collect additional arguments needed to emit a combined
555 parallel+workshare call. WS_STMT is the workshare directive being
556 expanded. */
557
3bb06db4 558static VEC(tree,gc) *
726a989a 559get_ws_args_for (gimple ws_stmt)
50674e96
DN
560{
561 tree t;
db3927fb 562 location_t loc = gimple_location (ws_stmt);
3bb06db4 563 VEC(tree,gc) *ws_args;
50674e96 564
726a989a 565 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
50674e96
DN
566 {
567 struct omp_for_data fd;
50674e96 568
a68ab351 569 extract_omp_for_data (ws_stmt, &fd, NULL);
50674e96 570
3bb06db4 571 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
50674e96 572
3bb06db4
NF
573 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
574 VEC_quick_push (tree, ws_args, t);
50674e96 575
db3927fb 576 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
3bb06db4 577 VEC_quick_push (tree, ws_args, t);
50674e96 578
3bb06db4
NF
579 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
580 VEC_quick_push (tree, ws_args, t);
581
582 if (fd.chunk_size)
583 {
584 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
585 VEC_quick_push (tree, ws_args, t);
586 }
50674e96
DN
587
588 return ws_args;
589 }
726a989a 590 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
50674e96 591 {
e5c95afe 592 /* Number of sections is equal to the number of edges from the
726a989a
RB
593 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
594 the exit of the sections region. */
595 basic_block bb = single_succ (gimple_bb (ws_stmt));
e5c95afe 596 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
3bb06db4
NF
597 ws_args = VEC_alloc (tree, gc, 1);
598 VEC_quick_push (tree, ws_args, t);
599 return ws_args;
50674e96
DN
600 }
601
602 gcc_unreachable ();
603}
604
605
606/* Discover whether REGION is a combined parallel+workshare region. */
607
608static void
609determine_parallel_type (struct omp_region *region)
953ff289 610{
50674e96
DN
611 basic_block par_entry_bb, par_exit_bb;
612 basic_block ws_entry_bb, ws_exit_bb;
613
d3c673c7 614 if (region == NULL || region->inner == NULL
e5c95afe
ZD
615 || region->exit == NULL || region->inner->exit == NULL
616 || region->inner->cont == NULL)
50674e96
DN
617 return;
618
619 /* We only support parallel+for and parallel+sections. */
726a989a
RB
620 if (region->type != GIMPLE_OMP_PARALLEL
621 || (region->inner->type != GIMPLE_OMP_FOR
622 && region->inner->type != GIMPLE_OMP_SECTIONS))
50674e96
DN
623 return;
624
625 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
626 WS_EXIT_BB -> PAR_EXIT_BB. */
777f7f9a
RH
627 par_entry_bb = region->entry;
628 par_exit_bb = region->exit;
629 ws_entry_bb = region->inner->entry;
630 ws_exit_bb = region->inner->exit;
50674e96
DN
631
632 if (single_succ (par_entry_bb) == ws_entry_bb
633 && single_succ (ws_exit_bb) == par_exit_bb
0f900dfa 634 && workshare_safe_to_combine_p (ws_entry_bb)
726a989a 635 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
69f1837b
JJ
636 || (last_and_only_stmt (ws_entry_bb)
637 && last_and_only_stmt (par_exit_bb))))
50674e96 638 {
726a989a 639 gimple ws_stmt = last_stmt (ws_entry_bb);
777f7f9a 640
726a989a 641 if (region->inner->type == GIMPLE_OMP_FOR)
50674e96
DN
642 {
643 /* If this is a combined parallel loop, we need to determine
644 whether or not to use the combined library calls. There
645 are two cases where we do not apply the transformation:
646 static loops and any kind of ordered loop. In the first
647 case, we already open code the loop so there is no need
648 to do anything else. In the latter case, the combined
649 parallel loop call would still need extra synchronization
650 to implement ordered semantics, so there would not be any
651 gain in using the combined call. */
726a989a 652 tree clauses = gimple_omp_for_clauses (ws_stmt);
50674e96
DN
653 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
654 if (c == NULL
655 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
656 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
657 {
658 region->is_combined_parallel = false;
659 region->inner->is_combined_parallel = false;
660 return;
661 }
662 }
663
664 region->is_combined_parallel = true;
665 region->inner->is_combined_parallel = true;
777f7f9a 666 region->ws_args = get_ws_args_for (ws_stmt);
50674e96 667 }
953ff289
DN
668}
669
50674e96 670
953ff289
DN
671/* Return true if EXPR is variable sized. */
672
673static inline bool
22ea9ec0 674is_variable_sized (const_tree expr)
953ff289
DN
675{
676 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
677}
678
679/* Return true if DECL is a reference type. */
680
681static inline bool
682is_reference (tree decl)
683{
684 return lang_hooks.decls.omp_privatize_by_reference (decl);
685}
686
687/* Lookup variables in the decl or field splay trees. The "maybe" form
688 allows for the variable form to not have been entered, otherwise we
689 assert that the variable must have been entered. */
690
691static inline tree
692lookup_decl (tree var, omp_context *ctx)
693{
6be42dd4
RG
694 tree *n;
695 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
696 return *n;
953ff289
DN
697}
698
699static inline tree
7c8f7639 700maybe_lookup_decl (const_tree var, omp_context *ctx)
953ff289 701{
6be42dd4
RG
702 tree *n;
703 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
704 return n ? *n : NULL_TREE;
953ff289
DN
705}
706
707static inline tree
708lookup_field (tree var, omp_context *ctx)
709{
710 splay_tree_node n;
711 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
712 return (tree) n->value;
713}
714
a68ab351
JJ
715static inline tree
716lookup_sfield (tree var, omp_context *ctx)
717{
718 splay_tree_node n;
719 n = splay_tree_lookup (ctx->sfield_map
720 ? ctx->sfield_map : ctx->field_map,
721 (splay_tree_key) var);
722 return (tree) n->value;
723}
724
953ff289
DN
725static inline tree
726maybe_lookup_field (tree var, omp_context *ctx)
727{
728 splay_tree_node n;
729 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
730 return n ? (tree) n->value : NULL_TREE;
731}
732
7c8f7639
JJ
733/* Return true if DECL should be copied by pointer. SHARED_CTX is
734 the parallel context if DECL is to be shared. */
953ff289
DN
735
736static bool
a68ab351 737use_pointer_for_field (tree decl, omp_context *shared_ctx)
953ff289
DN
738{
739 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
740 return true;
741
6fc0bb99 742 /* We can only use copy-in/copy-out semantics for shared variables
953ff289 743 when we know the value is not accessible from an outer scope. */
7c8f7639 744 if (shared_ctx)
953ff289
DN
745 {
746 /* ??? Trivially accessible from anywhere. But why would we even
747 be passing an address in this case? Should we simply assert
748 this to be false, or should we have a cleanup pass that removes
749 these from the list of mappings? */
750 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
751 return true;
752
753 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
754 without analyzing the expression whether or not its location
755 is accessible to anyone else. In the case of nested parallel
756 regions it certainly may be. */
077b0dfb 757 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
953ff289
DN
758 return true;
759
760 /* Do not use copy-in/copy-out for variables that have their
761 address taken. */
762 if (TREE_ADDRESSABLE (decl))
763 return true;
7c8f7639
JJ
764
765 /* Disallow copy-in/out in nested parallel if
766 decl is shared in outer parallel, otherwise
767 each thread could store the shared variable
768 in its own copy-in location, making the
769 variable no longer really shared. */
770 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
771 {
772 omp_context *up;
773
774 for (up = shared_ctx->outer; up; up = up->outer)
d9c194cb 775 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
7c8f7639
JJ
776 break;
777
d9c194cb 778 if (up)
7c8f7639
JJ
779 {
780 tree c;
781
726a989a 782 for (c = gimple_omp_taskreg_clauses (up->stmt);
7c8f7639
JJ
783 c; c = OMP_CLAUSE_CHAIN (c))
784 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
785 && OMP_CLAUSE_DECL (c) == decl)
786 break;
787
788 if (c)
789 return true;
790 }
791 }
a68ab351
JJ
792
793 /* For tasks avoid using copy-in/out, unless they are readonly
794 (in which case just copy-in is used). As tasks can be
795 deferred or executed in different thread, when GOMP_task
796 returns, the task hasn't necessarily terminated. */
797 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
798 {
799 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
800 if (is_gimple_reg (outer))
801 {
802 /* Taking address of OUTER in lower_send_shared_vars
803 might need regimplification of everything that uses the
804 variable. */
805 if (!task_shared_vars)
806 task_shared_vars = BITMAP_ALLOC (NULL);
807 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
808 TREE_ADDRESSABLE (outer) = 1;
809 }
810 return true;
811 }
953ff289
DN
812 }
813
814 return false;
815}
816
917948d3 817/* Create a new VAR_DECL and copy information from VAR to it. */
953ff289 818
917948d3
ZD
819tree
820copy_var_decl (tree var, tree name, tree type)
953ff289 821{
c2255bc4 822 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
953ff289
DN
823
824 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
917948d3 825 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
0890b981 826 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
953ff289
DN
827 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
828 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
917948d3 829 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
953ff289 830 TREE_USED (copy) = 1;
953ff289
DN
831 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
832
917948d3
ZD
833 return copy;
834}
835
836/* Construct a new automatic decl similar to VAR. */
837
838static tree
839omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
840{
841 tree copy = copy_var_decl (var, name, type);
842
843 DECL_CONTEXT (copy) = current_function_decl;
953ff289
DN
844 TREE_CHAIN (copy) = ctx->block_vars;
845 ctx->block_vars = copy;
846
847 return copy;
848}
849
850static tree
851omp_copy_decl_1 (tree var, omp_context *ctx)
852{
853 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
854}
855
856/* Build tree nodes to access the field for VAR on the receiver side. */
857
858static tree
859build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
860{
861 tree x, field = lookup_field (var, ctx);
862
863 /* If the receiver record type was remapped in the child function,
864 remap the field into the new record type. */
865 x = maybe_lookup_field (field, ctx);
866 if (x != NULL)
867 field = x;
868
70f34814 869 x = build_simple_mem_ref (ctx->receiver_decl);
953ff289
DN
870 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
871 if (by_ref)
70f34814 872 x = build_simple_mem_ref (x);
953ff289
DN
873
874 return x;
875}
876
877/* Build tree nodes to access VAR in the scope outer to CTX. In the case
878 of a parallel, this is a component reference; for workshare constructs
879 this is some variable. */
880
881static tree
882build_outer_var_ref (tree var, omp_context *ctx)
883{
884 tree x;
885
8ca5b2a2 886 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
953ff289
DN
887 x = var;
888 else if (is_variable_sized (var))
889 {
890 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
891 x = build_outer_var_ref (x, ctx);
70f34814 892 x = build_simple_mem_ref (x);
953ff289 893 }
a68ab351 894 else if (is_taskreg_ctx (ctx))
953ff289 895 {
7c8f7639 896 bool by_ref = use_pointer_for_field (var, NULL);
953ff289
DN
897 x = build_receiver_ref (var, by_ref, ctx);
898 }
899 else if (ctx->outer)
900 x = lookup_decl (var, ctx->outer);
eeb1d9e0
JJ
901 else if (is_reference (var))
902 /* This can happen with orphaned constructs. If var is reference, it is
903 possible it is shared and as such valid. */
904 x = var;
953ff289
DN
905 else
906 gcc_unreachable ();
907
908 if (is_reference (var))
70f34814 909 x = build_simple_mem_ref (x);
953ff289
DN
910
911 return x;
912}
913
914/* Build tree nodes to access the field for VAR on the sender side. */
915
916static tree
917build_sender_ref (tree var, omp_context *ctx)
918{
a68ab351 919 tree field = lookup_sfield (var, ctx);
953ff289
DN
920 return build3 (COMPONENT_REF, TREE_TYPE (field),
921 ctx->sender_decl, field, NULL);
922}
923
924/* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
925
926static void
a68ab351 927install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
953ff289 928{
a68ab351 929 tree field, type, sfield = NULL_TREE;
953ff289 930
a68ab351
JJ
931 gcc_assert ((mask & 1) == 0
932 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
933 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
934 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
953ff289
DN
935
936 type = TREE_TYPE (var);
937 if (by_ref)
938 type = build_pointer_type (type);
a68ab351
JJ
939 else if ((mask & 3) == 1 && is_reference (var))
940 type = TREE_TYPE (type);
953ff289 941
c2255bc4
AH
942 field = build_decl (DECL_SOURCE_LOCATION (var),
943 FIELD_DECL, DECL_NAME (var), type);
953ff289
DN
944
945 /* Remember what variable this field was created for. This does have a
946 side effect of making dwarf2out ignore this member, so for helpful
947 debugging we clear it later in delete_omp_context. */
948 DECL_ABSTRACT_ORIGIN (field) = var;
a68ab351
JJ
949 if (type == TREE_TYPE (var))
950 {
951 DECL_ALIGN (field) = DECL_ALIGN (var);
952 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
953 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
954 }
955 else
956 DECL_ALIGN (field) = TYPE_ALIGN (type);
953ff289 957
a68ab351
JJ
958 if ((mask & 3) == 3)
959 {
960 insert_field_into_struct (ctx->record_type, field);
961 if (ctx->srecord_type)
962 {
c2255bc4
AH
963 sfield = build_decl (DECL_SOURCE_LOCATION (var),
964 FIELD_DECL, DECL_NAME (var), type);
a68ab351
JJ
965 DECL_ABSTRACT_ORIGIN (sfield) = var;
966 DECL_ALIGN (sfield) = DECL_ALIGN (field);
967 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
968 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
969 insert_field_into_struct (ctx->srecord_type, sfield);
970 }
971 }
972 else
973 {
974 if (ctx->srecord_type == NULL_TREE)
975 {
976 tree t;
977
978 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
979 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
980 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
981 {
c2255bc4
AH
982 sfield = build_decl (DECL_SOURCE_LOCATION (var),
983 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
a68ab351
JJ
984 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
985 insert_field_into_struct (ctx->srecord_type, sfield);
986 splay_tree_insert (ctx->sfield_map,
987 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
988 (splay_tree_value) sfield);
989 }
990 }
991 sfield = field;
992 insert_field_into_struct ((mask & 1) ? ctx->record_type
993 : ctx->srecord_type, field);
994 }
953ff289 995
a68ab351
JJ
996 if (mask & 1)
997 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
998 (splay_tree_value) field);
999 if ((mask & 2) && ctx->sfield_map)
1000 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1001 (splay_tree_value) sfield);
953ff289
DN
1002}
1003
1004static tree
1005install_var_local (tree var, omp_context *ctx)
1006{
1007 tree new_var = omp_copy_decl_1 (var, ctx);
1008 insert_decl_map (&ctx->cb, var, new_var);
1009 return new_var;
1010}
1011
1012/* Adjust the replacement for DECL in CTX for the new context. This means
1013 copying the DECL_VALUE_EXPR, and fixing up the type. */
1014
1015static void
1016fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1017{
1018 tree new_decl, size;
1019
1020 new_decl = lookup_decl (decl, ctx);
1021
1022 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1023
1024 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1025 && DECL_HAS_VALUE_EXPR_P (decl))
1026 {
1027 tree ve = DECL_VALUE_EXPR (decl);
726a989a 1028 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
953ff289
DN
1029 SET_DECL_VALUE_EXPR (new_decl, ve);
1030 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1031 }
1032
1033 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1034 {
1035 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1036 if (size == error_mark_node)
1037 size = TYPE_SIZE (TREE_TYPE (new_decl));
1038 DECL_SIZE (new_decl) = size;
1039
1040 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1041 if (size == error_mark_node)
1042 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1043 DECL_SIZE_UNIT (new_decl) = size;
1044 }
1045}
1046
1047/* The callback for remap_decl. Search all containing contexts for a
1048 mapping of the variable; this avoids having to duplicate the splay
1049 tree ahead of time. We know a mapping doesn't already exist in the
1050 given context. Create new mappings to implement default semantics. */
1051
1052static tree
1053omp_copy_decl (tree var, copy_body_data *cb)
1054{
1055 omp_context *ctx = (omp_context *) cb;
1056 tree new_var;
1057
953ff289
DN
1058 if (TREE_CODE (var) == LABEL_DECL)
1059 {
c2255bc4 1060 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
50674e96 1061 DECL_CONTEXT (new_var) = current_function_decl;
953ff289
DN
1062 insert_decl_map (&ctx->cb, var, new_var);
1063 return new_var;
1064 }
1065
a68ab351 1066 while (!is_taskreg_ctx (ctx))
953ff289
DN
1067 {
1068 ctx = ctx->outer;
1069 if (ctx == NULL)
1070 return var;
1071 new_var = maybe_lookup_decl (var, ctx);
1072 if (new_var)
1073 return new_var;
1074 }
1075
8ca5b2a2
JJ
1076 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1077 return var;
1078
953ff289
DN
1079 return error_mark_node;
1080}
1081
50674e96
DN
1082
1083/* Return the parallel region associated with STMT. */
1084
50674e96
DN
1085/* Debugging dumps for parallel regions. */
1086void dump_omp_region (FILE *, struct omp_region *, int);
1087void debug_omp_region (struct omp_region *);
1088void debug_all_omp_regions (void);
1089
1090/* Dump the parallel region tree rooted at REGION. */
1091
1092void
1093dump_omp_region (FILE *file, struct omp_region *region, int indent)
1094{
777f7f9a 1095 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
726a989a 1096 gimple_code_name[region->type]);
50674e96
DN
1097
1098 if (region->inner)
1099 dump_omp_region (file, region->inner, indent + 4);
1100
777f7f9a
RH
1101 if (region->cont)
1102 {
726a989a 1103 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
777f7f9a
RH
1104 region->cont->index);
1105 }
b8698a0f 1106
50674e96 1107 if (region->exit)
726a989a 1108 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
777f7f9a 1109 region->exit->index);
50674e96 1110 else
777f7f9a 1111 fprintf (file, "%*s[no exit marker]\n", indent, "");
50674e96
DN
1112
1113 if (region->next)
777f7f9a 1114 dump_omp_region (file, region->next, indent);
50674e96
DN
1115}
1116
24e47c76 1117DEBUG_FUNCTION void
50674e96
DN
1118debug_omp_region (struct omp_region *region)
1119{
1120 dump_omp_region (stderr, region, 0);
1121}
1122
24e47c76 1123DEBUG_FUNCTION void
50674e96
DN
1124debug_all_omp_regions (void)
1125{
1126 dump_omp_region (stderr, root_omp_region, 0);
1127}
1128
1129
1130/* Create a new parallel region starting at STMT inside region PARENT. */
1131
777f7f9a 1132struct omp_region *
726a989a
RB
1133new_omp_region (basic_block bb, enum gimple_code type,
1134 struct omp_region *parent)
50674e96 1135{
d3bfe4de 1136 struct omp_region *region = XCNEW (struct omp_region);
50674e96
DN
1137
1138 region->outer = parent;
777f7f9a
RH
1139 region->entry = bb;
1140 region->type = type;
50674e96
DN
1141
1142 if (parent)
1143 {
1144 /* This is a nested region. Add it to the list of inner
1145 regions in PARENT. */
1146 region->next = parent->inner;
1147 parent->inner = region;
1148 }
777f7f9a 1149 else
50674e96
DN
1150 {
1151 /* This is a toplevel region. Add it to the list of toplevel
1152 regions in ROOT_OMP_REGION. */
1153 region->next = root_omp_region;
1154 root_omp_region = region;
1155 }
777f7f9a
RH
1156
1157 return region;
1158}
1159
1160/* Release the memory associated with the region tree rooted at REGION. */
1161
1162static void
1163free_omp_region_1 (struct omp_region *region)
1164{
1165 struct omp_region *i, *n;
1166
1167 for (i = region->inner; i ; i = n)
50674e96 1168 {
777f7f9a
RH
1169 n = i->next;
1170 free_omp_region_1 (i);
50674e96
DN
1171 }
1172
777f7f9a
RH
1173 free (region);
1174}
50674e96 1175
777f7f9a
RH
1176/* Release the memory for the entire omp region tree. */
1177
1178void
1179free_omp_regions (void)
1180{
1181 struct omp_region *r, *n;
1182 for (r = root_omp_region; r ; r = n)
1183 {
1184 n = r->next;
1185 free_omp_region_1 (r);
1186 }
1187 root_omp_region = NULL;
50674e96
DN
1188}
1189
1190
953ff289
DN
1191/* Create a new context, with OUTER_CTX being the surrounding context. */
1192
1193static omp_context *
726a989a 1194new_omp_context (gimple stmt, omp_context *outer_ctx)
953ff289
DN
1195{
1196 omp_context *ctx = XCNEW (omp_context);
1197
1198 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1199 (splay_tree_value) ctx);
1200 ctx->stmt = stmt;
1201
1202 if (outer_ctx)
1203 {
1204 ctx->outer = outer_ctx;
1205 ctx->cb = outer_ctx->cb;
1206 ctx->cb.block = NULL;
1207 ctx->depth = outer_ctx->depth + 1;
1208 }
1209 else
1210 {
1211 ctx->cb.src_fn = current_function_decl;
1212 ctx->cb.dst_fn = current_function_decl;
1213 ctx->cb.src_node = cgraph_node (current_function_decl);
1214 ctx->cb.dst_node = ctx->cb.src_node;
1215 ctx->cb.src_cfun = cfun;
1216 ctx->cb.copy_decl = omp_copy_decl;
1d65f45c 1217 ctx->cb.eh_lp_nr = 0;
953ff289
DN
1218 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1219 ctx->depth = 1;
1220 }
1221
6be42dd4 1222 ctx->cb.decl_map = pointer_map_create ();
953ff289
DN
1223
1224 return ctx;
1225}
1226
726a989a 1227static gimple_seq maybe_catch_exception (gimple_seq);
2368a460
JJ
1228
1229/* Finalize task copyfn. */
1230
1231static void
726a989a 1232finalize_task_copyfn (gimple task_stmt)
2368a460
JJ
1233{
1234 struct function *child_cfun;
1235 tree child_fn, old_fn;
726a989a
RB
1236 gimple_seq seq, new_seq;
1237 gimple bind;
2368a460 1238
726a989a 1239 child_fn = gimple_omp_task_copy_fn (task_stmt);
2368a460
JJ
1240 if (child_fn == NULL_TREE)
1241 return;
1242
1243 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1244
1245 /* Inform the callgraph about the new function. */
1246 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1247 = cfun->curr_properties;
1248
1249 old_fn = current_function_decl;
1250 push_cfun (child_cfun);
1251 current_function_decl = child_fn;
726a989a
RB
1252 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1253 seq = gimple_seq_alloc ();
1254 gimple_seq_add_stmt (&seq, bind);
1255 new_seq = maybe_catch_exception (seq);
1256 if (new_seq != seq)
1257 {
1258 bind = gimple_build_bind (NULL, new_seq, NULL);
1259 seq = gimple_seq_alloc ();
1260 gimple_seq_add_stmt (&seq, bind);
1261 }
1262 gimple_set_body (child_fn, seq);
2368a460
JJ
1263 pop_cfun ();
1264 current_function_decl = old_fn;
1265
1266 cgraph_add_new_function (child_fn, false);
1267}
1268
953ff289
DN
1269/* Destroy a omp_context data structures. Called through the splay tree
1270 value delete callback. */
1271
1272static void
1273delete_omp_context (splay_tree_value value)
1274{
1275 omp_context *ctx = (omp_context *) value;
1276
6be42dd4 1277 pointer_map_destroy (ctx->cb.decl_map);
953ff289
DN
1278
1279 if (ctx->field_map)
1280 splay_tree_delete (ctx->field_map);
a68ab351
JJ
1281 if (ctx->sfield_map)
1282 splay_tree_delete (ctx->sfield_map);
953ff289
DN
1283
1284 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1285 it produces corrupt debug information. */
1286 if (ctx->record_type)
1287 {
1288 tree t;
1289 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1290 DECL_ABSTRACT_ORIGIN (t) = NULL;
1291 }
a68ab351
JJ
1292 if (ctx->srecord_type)
1293 {
1294 tree t;
1295 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1296 DECL_ABSTRACT_ORIGIN (t) = NULL;
1297 }
953ff289 1298
2368a460
JJ
1299 if (is_task_ctx (ctx))
1300 finalize_task_copyfn (ctx->stmt);
1301
953ff289
DN
1302 XDELETE (ctx);
1303}
1304
1305/* Fix up RECEIVER_DECL with a type that has been remapped to the child
1306 context. */
1307
1308static void
1309fixup_child_record_type (omp_context *ctx)
1310{
1311 tree f, type = ctx->record_type;
1312
1313 /* ??? It isn't sufficient to just call remap_type here, because
1314 variably_modified_type_p doesn't work the way we expect for
1315 record types. Testing each field for whether it needs remapping
1316 and creating a new record by hand works, however. */
1317 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1318 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1319 break;
1320 if (f)
1321 {
1322 tree name, new_fields = NULL;
1323
1324 type = lang_hooks.types.make_type (RECORD_TYPE);
1325 name = DECL_NAME (TYPE_NAME (ctx->record_type));
c2255bc4
AH
1326 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1327 TYPE_DECL, name, type);
953ff289
DN
1328 TYPE_NAME (type) = name;
1329
1330 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1331 {
1332 tree new_f = copy_node (f);
1333 DECL_CONTEXT (new_f) = type;
1334 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1335 TREE_CHAIN (new_f) = new_fields;
726a989a
RB
1336 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1337 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1338 &ctx->cb, NULL);
1339 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1340 &ctx->cb, NULL);
953ff289
DN
1341 new_fields = new_f;
1342
1343 /* Arrange to be able to look up the receiver field
1344 given the sender field. */
1345 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1346 (splay_tree_value) new_f);
1347 }
1348 TYPE_FIELDS (type) = nreverse (new_fields);
1349 layout_type (type);
1350 }
1351
1352 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1353}
1354
1355/* Instantiate decls as necessary in CTX to satisfy the data sharing
1356 specified by CLAUSES. */
1357
1358static void
1359scan_sharing_clauses (tree clauses, omp_context *ctx)
1360{
1361 tree c, decl;
1362 bool scan_array_reductions = false;
1363
1364 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1365 {
1366 bool by_ref;
1367
aaf46ef9 1368 switch (OMP_CLAUSE_CODE (c))
953ff289
DN
1369 {
1370 case OMP_CLAUSE_PRIVATE:
1371 decl = OMP_CLAUSE_DECL (c);
a68ab351
JJ
1372 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1373 goto do_private;
1374 else if (!is_variable_sized (decl))
953ff289
DN
1375 install_var_local (decl, ctx);
1376 break;
1377
1378 case OMP_CLAUSE_SHARED:
a68ab351 1379 gcc_assert (is_taskreg_ctx (ctx));
953ff289 1380 decl = OMP_CLAUSE_DECL (c);
5da250fc
JJ
1381 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1382 || !is_variable_sized (decl));
8ca5b2a2
JJ
1383 /* Global variables don't need to be copied,
1384 the receiver side will use them directly. */
1385 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1386 break;
a68ab351 1387 by_ref = use_pointer_for_field (decl, ctx);
953ff289
DN
1388 if (! TREE_READONLY (decl)
1389 || TREE_ADDRESSABLE (decl)
1390 || by_ref
1391 || is_reference (decl))
1392 {
a68ab351 1393 install_var_field (decl, by_ref, 3, ctx);
953ff289
DN
1394 install_var_local (decl, ctx);
1395 break;
1396 }
1397 /* We don't need to copy const scalar vars back. */
aaf46ef9 1398 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
953ff289
DN
1399 goto do_private;
1400
1401 case OMP_CLAUSE_LASTPRIVATE:
1402 /* Let the corresponding firstprivate clause create
1403 the variable. */
1404 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1405 break;
1406 /* FALLTHRU */
1407
1408 case OMP_CLAUSE_FIRSTPRIVATE:
1409 case OMP_CLAUSE_REDUCTION:
1410 decl = OMP_CLAUSE_DECL (c);
1411 do_private:
1412 if (is_variable_sized (decl))
953ff289 1413 {
a68ab351
JJ
1414 if (is_task_ctx (ctx))
1415 install_var_field (decl, false, 1, ctx);
1416 break;
1417 }
1418 else if (is_taskreg_ctx (ctx))
1419 {
1420 bool global
1421 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
7c8f7639 1422 by_ref = use_pointer_for_field (decl, NULL);
a68ab351
JJ
1423
1424 if (is_task_ctx (ctx)
1425 && (global || by_ref || is_reference (decl)))
1426 {
1427 install_var_field (decl, false, 1, ctx);
1428 if (!global)
1429 install_var_field (decl, by_ref, 2, ctx);
1430 }
1431 else if (!global)
1432 install_var_field (decl, by_ref, 3, ctx);
953ff289
DN
1433 }
1434 install_var_local (decl, ctx);
1435 break;
1436
1437 case OMP_CLAUSE_COPYPRIVATE:
953ff289
DN
1438 case OMP_CLAUSE_COPYIN:
1439 decl = OMP_CLAUSE_DECL (c);
7c8f7639 1440 by_ref = use_pointer_for_field (decl, NULL);
a68ab351 1441 install_var_field (decl, by_ref, 3, ctx);
953ff289
DN
1442 break;
1443
1444 case OMP_CLAUSE_DEFAULT:
1445 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1446 break;
1447
1448 case OMP_CLAUSE_IF:
1449 case OMP_CLAUSE_NUM_THREADS:
1450 case OMP_CLAUSE_SCHEDULE:
1451 if (ctx->outer)
726a989a 1452 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
953ff289
DN
1453 break;
1454
1455 case OMP_CLAUSE_NOWAIT:
1456 case OMP_CLAUSE_ORDERED:
a68ab351
JJ
1457 case OMP_CLAUSE_COLLAPSE:
1458 case OMP_CLAUSE_UNTIED:
953ff289
DN
1459 break;
1460
1461 default:
1462 gcc_unreachable ();
1463 }
1464 }
1465
1466 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1467 {
aaf46ef9 1468 switch (OMP_CLAUSE_CODE (c))
953ff289
DN
1469 {
1470 case OMP_CLAUSE_LASTPRIVATE:
1471 /* Let the corresponding firstprivate clause create
1472 the variable. */
726a989a 1473 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
a68ab351 1474 scan_array_reductions = true;
953ff289
DN
1475 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1476 break;
1477 /* FALLTHRU */
1478
1479 case OMP_CLAUSE_PRIVATE:
1480 case OMP_CLAUSE_FIRSTPRIVATE:
1481 case OMP_CLAUSE_REDUCTION:
1482 decl = OMP_CLAUSE_DECL (c);
1483 if (is_variable_sized (decl))
1484 install_var_local (decl, ctx);
1485 fixup_remapped_decl (decl, ctx,
aaf46ef9 1486 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
953ff289 1487 && OMP_CLAUSE_PRIVATE_DEBUG (c));
aaf46ef9 1488 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
953ff289
DN
1489 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1490 scan_array_reductions = true;
1491 break;
1492
1493 case OMP_CLAUSE_SHARED:
1494 decl = OMP_CLAUSE_DECL (c);
8ca5b2a2
JJ
1495 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1496 fixup_remapped_decl (decl, ctx, false);
953ff289
DN
1497 break;
1498
1499 case OMP_CLAUSE_COPYPRIVATE:
1500 case OMP_CLAUSE_COPYIN:
1501 case OMP_CLAUSE_DEFAULT:
1502 case OMP_CLAUSE_IF:
1503 case OMP_CLAUSE_NUM_THREADS:
1504 case OMP_CLAUSE_SCHEDULE:
1505 case OMP_CLAUSE_NOWAIT:
1506 case OMP_CLAUSE_ORDERED:
a68ab351
JJ
1507 case OMP_CLAUSE_COLLAPSE:
1508 case OMP_CLAUSE_UNTIED:
953ff289
DN
1509 break;
1510
1511 default:
1512 gcc_unreachable ();
1513 }
1514 }
1515
1516 if (scan_array_reductions)
1517 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
aaf46ef9 1518 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
953ff289
DN
1519 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1520 {
726a989a
RB
1521 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1522 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
953ff289 1523 }
a68ab351 1524 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
726a989a
RB
1525 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1526 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
953ff289
DN
1527}
1528
1529/* Create a new name for omp child function. Returns an identifier. */
1530
1531static GTY(()) unsigned int tmp_ompfn_id_num;
1532
1533static tree
a68ab351 1534create_omp_child_function_name (bool task_copy)
953ff289 1535{
036546e5
JH
1536 return (clone_function_name (current_function_decl,
1537 task_copy ? "_omp_cpyfn" : "_omp_fn"));
953ff289
DN
1538}
1539
1540/* Build a decl for the omp child function. It'll not contain a body
1541 yet, just the bare decl. */
1542
1543static void
a68ab351 1544create_omp_child_function (omp_context *ctx, bool task_copy)
953ff289
DN
1545{
1546 tree decl, type, name, t;
1547
a68ab351
JJ
1548 name = create_omp_child_function_name (task_copy);
1549 if (task_copy)
1550 type = build_function_type_list (void_type_node, ptr_type_node,
1551 ptr_type_node, NULL_TREE);
1552 else
1553 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
953ff289 1554
c2255bc4
AH
1555 decl = build_decl (gimple_location (ctx->stmt),
1556 FUNCTION_DECL, name, type);
953ff289 1557
a68ab351
JJ
1558 if (!task_copy)
1559 ctx->cb.dst_fn = decl;
1560 else
726a989a 1561 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
953ff289
DN
1562
1563 TREE_STATIC (decl) = 1;
1564 TREE_USED (decl) = 1;
1565 DECL_ARTIFICIAL (decl) = 1;
1566 DECL_IGNORED_P (decl) = 0;
1567 TREE_PUBLIC (decl) = 0;
1568 DECL_UNINLINABLE (decl) = 1;
1569 DECL_EXTERNAL (decl) = 0;
1570 DECL_CONTEXT (decl) = NULL_TREE;
50674e96 1571 DECL_INITIAL (decl) = make_node (BLOCK);
953ff289 1572
c2255bc4
AH
1573 t = build_decl (DECL_SOURCE_LOCATION (decl),
1574 RESULT_DECL, NULL_TREE, void_type_node);
953ff289
DN
1575 DECL_ARTIFICIAL (t) = 1;
1576 DECL_IGNORED_P (t) = 1;
07485407 1577 DECL_CONTEXT (t) = decl;
953ff289
DN
1578 DECL_RESULT (decl) = t;
1579
c2255bc4
AH
1580 t = build_decl (DECL_SOURCE_LOCATION (decl),
1581 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
953ff289
DN
1582 DECL_ARTIFICIAL (t) = 1;
1583 DECL_ARG_TYPE (t) = ptr_type_node;
50674e96 1584 DECL_CONTEXT (t) = current_function_decl;
953ff289
DN
1585 TREE_USED (t) = 1;
1586 DECL_ARGUMENTS (decl) = t;
a68ab351
JJ
1587 if (!task_copy)
1588 ctx->receiver_decl = t;
1589 else
1590 {
c2255bc4
AH
1591 t = build_decl (DECL_SOURCE_LOCATION (decl),
1592 PARM_DECL, get_identifier (".omp_data_o"),
a68ab351
JJ
1593 ptr_type_node);
1594 DECL_ARTIFICIAL (t) = 1;
1595 DECL_ARG_TYPE (t) = ptr_type_node;
1596 DECL_CONTEXT (t) = current_function_decl;
1597 TREE_USED (t) = 1;
628c189e 1598 TREE_ADDRESSABLE (t) = 1;
a68ab351
JJ
1599 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1600 DECL_ARGUMENTS (decl) = t;
1601 }
953ff289 1602
b8698a0f 1603 /* Allocate memory for the function structure. The call to
50674e96 1604 allocate_struct_function clobbers CFUN, so we need to restore
953ff289 1605 it afterward. */
db2960f4 1606 push_struct_function (decl);
726a989a 1607 cfun->function_end_locus = gimple_location (ctx->stmt);
db2960f4 1608 pop_cfun ();
953ff289
DN
1609}
1610
953ff289
DN
1611
1612/* Scan an OpenMP parallel directive. */
1613
1614static void
726a989a 1615scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
953ff289
DN
1616{
1617 omp_context *ctx;
1618 tree name;
726a989a 1619 gimple stmt = gsi_stmt (*gsi);
953ff289
DN
1620
1621 /* Ignore parallel directives with empty bodies, unless there
1622 are copyin clauses. */
1623 if (optimize > 0
726a989a
RB
1624 && empty_body_p (gimple_omp_body (stmt))
1625 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1626 OMP_CLAUSE_COPYIN) == NULL)
953ff289 1627 {
726a989a 1628 gsi_replace (gsi, gimple_build_nop (), false);
953ff289
DN
1629 return;
1630 }
1631
726a989a 1632 ctx = new_omp_context (stmt, outer_ctx);
a68ab351 1633 if (taskreg_nesting_level > 1)
50674e96 1634 ctx->is_nested = true;
953ff289 1635 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
953ff289
DN
1636 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1637 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
953ff289 1638 name = create_tmp_var_name (".omp_data_s");
c2255bc4
AH
1639 name = build_decl (gimple_location (stmt),
1640 TYPE_DECL, name, ctx->record_type);
953ff289 1641 TYPE_NAME (ctx->record_type) = name;
a68ab351 1642 create_omp_child_function (ctx, false);
726a989a 1643 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
953ff289 1644
726a989a
RB
1645 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1646 scan_omp (gimple_omp_body (stmt), ctx);
953ff289
DN
1647
1648 if (TYPE_FIELDS (ctx->record_type) == NULL)
1649 ctx->record_type = ctx->receiver_decl = NULL;
1650 else
1651 {
1652 layout_type (ctx->record_type);
1653 fixup_child_record_type (ctx);
1654 }
1655}
1656
a68ab351
JJ
1657/* Scan an OpenMP task directive. */
1658
1659static void
726a989a 1660scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
a68ab351
JJ
1661{
1662 omp_context *ctx;
726a989a
RB
1663 tree name, t;
1664 gimple stmt = gsi_stmt (*gsi);
db3927fb 1665 location_t loc = gimple_location (stmt);
a68ab351
JJ
1666
1667 /* Ignore task directives with empty bodies. */
1668 if (optimize > 0
726a989a 1669 && empty_body_p (gimple_omp_body (stmt)))
a68ab351 1670 {
726a989a 1671 gsi_replace (gsi, gimple_build_nop (), false);
a68ab351
JJ
1672 return;
1673 }
1674
726a989a 1675 ctx = new_omp_context (stmt, outer_ctx);
a68ab351
JJ
1676 if (taskreg_nesting_level > 1)
1677 ctx->is_nested = true;
1678 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1679 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1680 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1681 name = create_tmp_var_name (".omp_data_s");
c2255bc4
AH
1682 name = build_decl (gimple_location (stmt),
1683 TYPE_DECL, name, ctx->record_type);
a68ab351
JJ
1684 TYPE_NAME (ctx->record_type) = name;
1685 create_omp_child_function (ctx, false);
726a989a 1686 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
a68ab351 1687
726a989a 1688 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
a68ab351
JJ
1689
1690 if (ctx->srecord_type)
1691 {
1692 name = create_tmp_var_name (".omp_data_a");
c2255bc4
AH
1693 name = build_decl (gimple_location (stmt),
1694 TYPE_DECL, name, ctx->srecord_type);
a68ab351
JJ
1695 TYPE_NAME (ctx->srecord_type) = name;
1696 create_omp_child_function (ctx, true);
1697 }
1698
726a989a 1699 scan_omp (gimple_omp_body (stmt), ctx);
a68ab351
JJ
1700
1701 if (TYPE_FIELDS (ctx->record_type) == NULL)
1702 {
1703 ctx->record_type = ctx->receiver_decl = NULL;
726a989a
RB
1704 t = build_int_cst (long_integer_type_node, 0);
1705 gimple_omp_task_set_arg_size (stmt, t);
1706 t = build_int_cst (long_integer_type_node, 1);
1707 gimple_omp_task_set_arg_align (stmt, t);
a68ab351
JJ
1708 }
1709 else
1710 {
1711 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1712 /* Move VLA fields to the end. */
1713 p = &TYPE_FIELDS (ctx->record_type);
1714 while (*p)
1715 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1716 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1717 {
1718 *q = *p;
1719 *p = TREE_CHAIN (*p);
1720 TREE_CHAIN (*q) = NULL_TREE;
1721 q = &TREE_CHAIN (*q);
1722 }
1723 else
1724 p = &TREE_CHAIN (*p);
1725 *p = vla_fields;
1726 layout_type (ctx->record_type);
1727 fixup_child_record_type (ctx);
1728 if (ctx->srecord_type)
1729 layout_type (ctx->srecord_type);
db3927fb 1730 t = fold_convert_loc (loc, long_integer_type_node,
a68ab351 1731 TYPE_SIZE_UNIT (ctx->record_type));
726a989a
RB
1732 gimple_omp_task_set_arg_size (stmt, t);
1733 t = build_int_cst (long_integer_type_node,
a68ab351 1734 TYPE_ALIGN_UNIT (ctx->record_type));
726a989a 1735 gimple_omp_task_set_arg_align (stmt, t);
a68ab351
JJ
1736 }
1737}
1738
953ff289 1739
50674e96 1740/* Scan an OpenMP loop directive. */
953ff289
DN
1741
1742static void
726a989a 1743scan_omp_for (gimple stmt, omp_context *outer_ctx)
953ff289 1744{
50674e96 1745 omp_context *ctx;
726a989a 1746 size_t i;
953ff289 1747
50674e96 1748 ctx = new_omp_context (stmt, outer_ctx);
953ff289 1749
726a989a 1750 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
953ff289 1751
726a989a
RB
1752 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1753 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
a68ab351 1754 {
726a989a
RB
1755 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1756 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1757 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1758 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
a68ab351 1759 }
726a989a 1760 scan_omp (gimple_omp_body (stmt), ctx);
953ff289
DN
1761}
1762
1763/* Scan an OpenMP sections directive. */
1764
1765static void
726a989a 1766scan_omp_sections (gimple stmt, omp_context *outer_ctx)
953ff289 1767{
953ff289
DN
1768 omp_context *ctx;
1769
1770 ctx = new_omp_context (stmt, outer_ctx);
726a989a
RB
1771 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1772 scan_omp (gimple_omp_body (stmt), ctx);
953ff289
DN
1773}
1774
1775/* Scan an OpenMP single directive. */
1776
1777static void
726a989a 1778scan_omp_single (gimple stmt, omp_context *outer_ctx)
953ff289 1779{
953ff289
DN
1780 omp_context *ctx;
1781 tree name;
1782
1783 ctx = new_omp_context (stmt, outer_ctx);
1784 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1785 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1786 name = create_tmp_var_name (".omp_copy_s");
c2255bc4
AH
1787 name = build_decl (gimple_location (stmt),
1788 TYPE_DECL, name, ctx->record_type);
953ff289
DN
1789 TYPE_NAME (ctx->record_type) = name;
1790
726a989a
RB
1791 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1792 scan_omp (gimple_omp_body (stmt), ctx);
953ff289
DN
1793
1794 if (TYPE_FIELDS (ctx->record_type) == NULL)
1795 ctx->record_type = NULL;
1796 else
1797 layout_type (ctx->record_type);
1798}
1799
953ff289 1800
a6fc8e21
JJ
1801/* Check OpenMP nesting restrictions. */
1802static void
726a989a 1803check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
a6fc8e21 1804{
726a989a 1805 switch (gimple_code (stmt))
a6fc8e21 1806 {
726a989a
RB
1807 case GIMPLE_OMP_FOR:
1808 case GIMPLE_OMP_SECTIONS:
1809 case GIMPLE_OMP_SINGLE:
1810 case GIMPLE_CALL:
a6fc8e21 1811 for (; ctx != NULL; ctx = ctx->outer)
726a989a 1812 switch (gimple_code (ctx->stmt))
a6fc8e21 1813 {
726a989a
RB
1814 case GIMPLE_OMP_FOR:
1815 case GIMPLE_OMP_SECTIONS:
1816 case GIMPLE_OMP_SINGLE:
1817 case GIMPLE_OMP_ORDERED:
1818 case GIMPLE_OMP_MASTER:
1819 case GIMPLE_OMP_TASK:
1820 if (is_gimple_call (stmt))
a68ab351
JJ
1821 {
1822 warning (0, "barrier region may not be closely nested inside "
1823 "of work-sharing, critical, ordered, master or "
1824 "explicit task region");
1825 return;
1826 }
a6fc8e21 1827 warning (0, "work-sharing region may not be closely nested inside "
a68ab351
JJ
1828 "of work-sharing, critical, ordered, master or explicit "
1829 "task region");
a6fc8e21 1830 return;
726a989a 1831 case GIMPLE_OMP_PARALLEL:
a6fc8e21
JJ
1832 return;
1833 default:
1834 break;
1835 }
1836 break;
726a989a 1837 case GIMPLE_OMP_MASTER:
a6fc8e21 1838 for (; ctx != NULL; ctx = ctx->outer)
726a989a 1839 switch (gimple_code (ctx->stmt))
a6fc8e21 1840 {
726a989a
RB
1841 case GIMPLE_OMP_FOR:
1842 case GIMPLE_OMP_SECTIONS:
1843 case GIMPLE_OMP_SINGLE:
1844 case GIMPLE_OMP_TASK:
a6fc8e21 1845 warning (0, "master region may not be closely nested inside "
a68ab351 1846 "of work-sharing or explicit task region");
a6fc8e21 1847 return;
726a989a 1848 case GIMPLE_OMP_PARALLEL:
a6fc8e21
JJ
1849 return;
1850 default:
1851 break;
1852 }
1853 break;
726a989a 1854 case GIMPLE_OMP_ORDERED:
a6fc8e21 1855 for (; ctx != NULL; ctx = ctx->outer)
726a989a 1856 switch (gimple_code (ctx->stmt))
a6fc8e21 1857 {
726a989a
RB
1858 case GIMPLE_OMP_CRITICAL:
1859 case GIMPLE_OMP_TASK:
a6fc8e21 1860 warning (0, "ordered region may not be closely nested inside "
a68ab351 1861 "of critical or explicit task region");
a6fc8e21 1862 return;
726a989a
RB
1863 case GIMPLE_OMP_FOR:
1864 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
a6fc8e21
JJ
1865 OMP_CLAUSE_ORDERED) == NULL)
1866 warning (0, "ordered region must be closely nested inside "
1867 "a loop region with an ordered clause");
1868 return;
726a989a 1869 case GIMPLE_OMP_PARALLEL:
a6fc8e21
JJ
1870 return;
1871 default:
1872 break;
1873 }
1874 break;
726a989a 1875 case GIMPLE_OMP_CRITICAL:
a6fc8e21 1876 for (; ctx != NULL; ctx = ctx->outer)
726a989a
RB
1877 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1878 && (gimple_omp_critical_name (stmt)
1879 == gimple_omp_critical_name (ctx->stmt)))
a6fc8e21
JJ
1880 {
1881 warning (0, "critical region may not be nested inside a critical "
1882 "region with the same name");
1883 return;
1884 }
1885 break;
1886 default:
1887 break;
1888 }
1889}
1890
1891
726a989a
RB
1892/* Helper function scan_omp.
1893
1894 Callback for walk_tree or operators in walk_gimple_stmt used to
1895 scan for OpenMP directives in TP. */
953ff289
DN
1896
1897static tree
726a989a 1898scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
953ff289 1899{
d3bfe4de
KG
1900 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1901 omp_context *ctx = (omp_context *) wi->info;
953ff289
DN
1902 tree t = *tp;
1903
726a989a
RB
1904 switch (TREE_CODE (t))
1905 {
1906 case VAR_DECL:
1907 case PARM_DECL:
1908 case LABEL_DECL:
1909 case RESULT_DECL:
1910 if (ctx)
1911 *tp = remap_decl (t, &ctx->cb);
1912 break;
1913
1914 default:
1915 if (ctx && TYPE_P (t))
1916 *tp = remap_type (t, &ctx->cb);
1917 else if (!DECL_P (t))
a900ae6b
JJ
1918 {
1919 *walk_subtrees = 1;
1920 if (ctx)
70f34814
RG
1921 {
1922 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1923 if (tem != TREE_TYPE (t))
1924 {
1925 if (TREE_CODE (t) == INTEGER_CST)
1926 *tp = build_int_cst_wide (tem,
1927 TREE_INT_CST_LOW (t),
1928 TREE_INT_CST_HIGH (t));
1929 else
1930 TREE_TYPE (t) = tem;
1931 }
1932 }
a900ae6b 1933 }
726a989a
RB
1934 break;
1935 }
1936
1937 return NULL_TREE;
1938}
1939
1940
1941/* Helper function for scan_omp.
1942
1943 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1944 the current statement in GSI. */
1945
1946static tree
1947scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1948 struct walk_stmt_info *wi)
1949{
1950 gimple stmt = gsi_stmt (*gsi);
1951 omp_context *ctx = (omp_context *) wi->info;
1952
1953 if (gimple_has_location (stmt))
1954 input_location = gimple_location (stmt);
953ff289 1955
a6fc8e21 1956 /* Check the OpenMP nesting restrictions. */
a68ab351
JJ
1957 if (ctx != NULL)
1958 {
726a989a
RB
1959 if (is_gimple_omp (stmt))
1960 check_omp_nesting_restrictions (stmt, ctx);
1961 else if (is_gimple_call (stmt))
a68ab351 1962 {
726a989a 1963 tree fndecl = gimple_call_fndecl (stmt);
a68ab351
JJ
1964 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1965 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
726a989a 1966 check_omp_nesting_restrictions (stmt, ctx);
a68ab351
JJ
1967 }
1968 }
a6fc8e21 1969
726a989a
RB
1970 *handled_ops_p = true;
1971
1972 switch (gimple_code (stmt))
953ff289 1973 {
726a989a 1974 case GIMPLE_OMP_PARALLEL:
a68ab351 1975 taskreg_nesting_level++;
726a989a 1976 scan_omp_parallel (gsi, ctx);
a68ab351
JJ
1977 taskreg_nesting_level--;
1978 break;
1979
726a989a 1980 case GIMPLE_OMP_TASK:
a68ab351 1981 taskreg_nesting_level++;
726a989a 1982 scan_omp_task (gsi, ctx);
a68ab351 1983 taskreg_nesting_level--;
953ff289
DN
1984 break;
1985
726a989a
RB
1986 case GIMPLE_OMP_FOR:
1987 scan_omp_for (stmt, ctx);
953ff289
DN
1988 break;
1989
726a989a
RB
1990 case GIMPLE_OMP_SECTIONS:
1991 scan_omp_sections (stmt, ctx);
953ff289
DN
1992 break;
1993
726a989a
RB
1994 case GIMPLE_OMP_SINGLE:
1995 scan_omp_single (stmt, ctx);
953ff289
DN
1996 break;
1997
726a989a
RB
1998 case GIMPLE_OMP_SECTION:
1999 case GIMPLE_OMP_MASTER:
2000 case GIMPLE_OMP_ORDERED:
2001 case GIMPLE_OMP_CRITICAL:
2002 ctx = new_omp_context (stmt, ctx);
2003 scan_omp (gimple_omp_body (stmt), ctx);
953ff289
DN
2004 break;
2005
726a989a 2006 case GIMPLE_BIND:
953ff289
DN
2007 {
2008 tree var;
953ff289 2009
726a989a
RB
2010 *handled_ops_p = false;
2011 if (ctx)
2012 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
2013 insert_decl_map (&ctx->cb, var, var);
953ff289
DN
2014 }
2015 break;
953ff289 2016 default:
726a989a 2017 *handled_ops_p = false;
953ff289
DN
2018 break;
2019 }
2020
2021 return NULL_TREE;
2022}
2023
2024
726a989a
RB
2025/* Scan all the statements starting at the current statement. CTX
2026 contains context information about the OpenMP directives and
2027 clauses found during the scan. */
953ff289
DN
2028
2029static void
726a989a 2030scan_omp (gimple_seq body, omp_context *ctx)
953ff289
DN
2031{
2032 location_t saved_location;
2033 struct walk_stmt_info wi;
2034
2035 memset (&wi, 0, sizeof (wi));
953ff289 2036 wi.info = ctx;
953ff289
DN
2037 wi.want_locations = true;
2038
2039 saved_location = input_location;
726a989a 2040 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
953ff289
DN
2041 input_location = saved_location;
2042}
2043\f
2044/* Re-gimplification and code generation routines. */
2045
2046/* Build a call to GOMP_barrier. */
2047
917948d3
ZD
2048static tree
2049build_omp_barrier (void)
953ff289 2050{
917948d3 2051 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
953ff289
DN
2052}
2053
2054/* If a context was created for STMT when it was scanned, return it. */
2055
2056static omp_context *
726a989a 2057maybe_lookup_ctx (gimple stmt)
953ff289
DN
2058{
2059 splay_tree_node n;
2060 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2061 return n ? (omp_context *) n->value : NULL;
2062}
2063
50674e96
DN
2064
2065/* Find the mapping for DECL in CTX or the immediately enclosing
2066 context that has a mapping for DECL.
2067
2068 If CTX is a nested parallel directive, we may have to use the decl
2069 mappings created in CTX's parent context. Suppose that we have the
2070 following parallel nesting (variable UIDs showed for clarity):
2071
2072 iD.1562 = 0;
2073 #omp parallel shared(iD.1562) -> outer parallel
2074 iD.1562 = iD.1562 + 1;
2075
2076 #omp parallel shared (iD.1562) -> inner parallel
2077 iD.1562 = iD.1562 - 1;
2078
2079 Each parallel structure will create a distinct .omp_data_s structure
2080 for copying iD.1562 in/out of the directive:
2081
2082 outer parallel .omp_data_s.1.i -> iD.1562
2083 inner parallel .omp_data_s.2.i -> iD.1562
2084
2085 A shared variable mapping will produce a copy-out operation before
2086 the parallel directive and a copy-in operation after it. So, in
2087 this case we would have:
2088
2089 iD.1562 = 0;
2090 .omp_data_o.1.i = iD.1562;
2091 #omp parallel shared(iD.1562) -> outer parallel
2092 .omp_data_i.1 = &.omp_data_o.1
2093 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2094
2095 .omp_data_o.2.i = iD.1562; -> **
2096 #omp parallel shared(iD.1562) -> inner parallel
2097 .omp_data_i.2 = &.omp_data_o.2
2098 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2099
2100
2101 ** This is a problem. The symbol iD.1562 cannot be referenced
2102 inside the body of the outer parallel region. But since we are
2103 emitting this copy operation while expanding the inner parallel
2104 directive, we need to access the CTX structure of the outer
2105 parallel directive to get the correct mapping:
2106
2107 .omp_data_o.2.i = .omp_data_i.1->i
2108
2109 Since there may be other workshare or parallel directives enclosing
2110 the parallel directive, it may be necessary to walk up the context
2111 parent chain. This is not a problem in general because nested
2112 parallelism happens only rarely. */
2113
2114static tree
2115lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2116{
2117 tree t;
2118 omp_context *up;
2119
50674e96
DN
2120 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2121 t = maybe_lookup_decl (decl, up);
2122
d2dda7fe 2123 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
50674e96 2124
64964499 2125 return t ? t : decl;
50674e96
DN
2126}
2127
2128
8ca5b2a2
JJ
2129/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2130 in outer contexts. */
2131
2132static tree
2133maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2134{
2135 tree t = NULL;
2136 omp_context *up;
2137
d2dda7fe
JJ
2138 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2139 t = maybe_lookup_decl (decl, up);
8ca5b2a2
JJ
2140
2141 return t ? t : decl;
2142}
2143
2144
953ff289
DN
2145/* Construct the initialization value for reduction CLAUSE. */
2146
2147tree
2148omp_reduction_init (tree clause, tree type)
2149{
db3927fb 2150 location_t loc = OMP_CLAUSE_LOCATION (clause);
953ff289
DN
2151 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2152 {
2153 case PLUS_EXPR:
2154 case MINUS_EXPR:
2155 case BIT_IOR_EXPR:
2156 case BIT_XOR_EXPR:
2157 case TRUTH_OR_EXPR:
2158 case TRUTH_ORIF_EXPR:
2159 case TRUTH_XOR_EXPR:
2160 case NE_EXPR:
db3927fb 2161 return fold_convert_loc (loc, type, integer_zero_node);
953ff289
DN
2162
2163 case MULT_EXPR:
2164 case TRUTH_AND_EXPR:
2165 case TRUTH_ANDIF_EXPR:
2166 case EQ_EXPR:
db3927fb 2167 return fold_convert_loc (loc, type, integer_one_node);
953ff289
DN
2168
2169 case BIT_AND_EXPR:
db3927fb 2170 return fold_convert_loc (loc, type, integer_minus_one_node);
953ff289
DN
2171
2172 case MAX_EXPR:
2173 if (SCALAR_FLOAT_TYPE_P (type))
2174 {
2175 REAL_VALUE_TYPE max, min;
2176 if (HONOR_INFINITIES (TYPE_MODE (type)))
2177 {
2178 real_inf (&max);
2179 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2180 }
2181 else
2182 real_maxval (&min, 1, TYPE_MODE (type));
2183 return build_real (type, min);
2184 }
2185 else
2186 {
2187 gcc_assert (INTEGRAL_TYPE_P (type));
2188 return TYPE_MIN_VALUE (type);
2189 }
2190
2191 case MIN_EXPR:
2192 if (SCALAR_FLOAT_TYPE_P (type))
2193 {
2194 REAL_VALUE_TYPE max;
2195 if (HONOR_INFINITIES (TYPE_MODE (type)))
2196 real_inf (&max);
2197 else
2198 real_maxval (&max, 0, TYPE_MODE (type));
2199 return build_real (type, max);
2200 }
2201 else
2202 {
2203 gcc_assert (INTEGRAL_TYPE_P (type));
2204 return TYPE_MAX_VALUE (type);
2205 }
2206
2207 default:
2208 gcc_unreachable ();
2209 }
2210}
2211
2212/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2213 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2214 private variables. Initialization statements go in ILIST, while calls
2215 to destructors go in DLIST. */
2216
2217static void
726a989a 2218lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3d55c64b 2219 omp_context *ctx)
953ff289 2220{
726a989a 2221 gimple_stmt_iterator diter;
5039610b 2222 tree c, dtor, copyin_seq, x, ptr;
953ff289 2223 bool copyin_by_ref = false;
8ca5b2a2 2224 bool lastprivate_firstprivate = false;
953ff289
DN
2225 int pass;
2226
726a989a
RB
2227 *dlist = gimple_seq_alloc ();
2228 diter = gsi_start (*dlist);
953ff289
DN
2229 copyin_seq = NULL;
2230
2231 /* Do all the fixed sized types in the first pass, and the variable sized
2232 types in the second pass. This makes sure that the scalar arguments to
b8698a0f 2233 the variable sized types are processed before we use them in the
953ff289
DN
2234 variable sized operations. */
2235 for (pass = 0; pass < 2; ++pass)
2236 {
2237 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2238 {
aaf46ef9 2239 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
953ff289
DN
2240 tree var, new_var;
2241 bool by_ref;
db3927fb 2242 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
953ff289
DN
2243
2244 switch (c_kind)
2245 {
2246 case OMP_CLAUSE_PRIVATE:
2247 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2248 continue;
2249 break;
2250 case OMP_CLAUSE_SHARED:
8ca5b2a2
JJ
2251 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2252 {
2253 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2254 continue;
2255 }
953ff289 2256 case OMP_CLAUSE_FIRSTPRIVATE:
953ff289
DN
2257 case OMP_CLAUSE_COPYIN:
2258 case OMP_CLAUSE_REDUCTION:
2259 break;
077b0dfb 2260 case OMP_CLAUSE_LASTPRIVATE:
8ca5b2a2
JJ
2261 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2262 {
2263 lastprivate_firstprivate = true;
2264 if (pass != 0)
2265 continue;
2266 }
077b0dfb 2267 break;
953ff289
DN
2268 default:
2269 continue;
2270 }
2271
2272 new_var = var = OMP_CLAUSE_DECL (c);
2273 if (c_kind != OMP_CLAUSE_COPYIN)
2274 new_var = lookup_decl (var, ctx);
2275
2276 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2277 {
2278 if (pass != 0)
2279 continue;
2280 }
953ff289
DN
2281 else if (is_variable_sized (var))
2282 {
50674e96
DN
2283 /* For variable sized types, we need to allocate the
2284 actual storage here. Call alloca and store the
2285 result in the pointer decl that we created elsewhere. */
953ff289
DN
2286 if (pass == 0)
2287 continue;
2288
a68ab351
JJ
2289 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2290 {
726a989a
RB
2291 gimple stmt;
2292 tree tmp;
2293
a68ab351
JJ
2294 ptr = DECL_VALUE_EXPR (new_var);
2295 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2296 ptr = TREE_OPERAND (ptr, 0);
2297 gcc_assert (DECL_P (ptr));
2298 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
726a989a
RB
2299
2300 /* void *tmp = __builtin_alloca */
2301 stmt
2302 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2303 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2304 gimple_add_tmp_var (tmp);
2305 gimple_call_set_lhs (stmt, tmp);
2306
2307 gimple_seq_add_stmt (ilist, stmt);
2308
db3927fb 2309 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
726a989a 2310 gimplify_assign (ptr, x, ilist);
a68ab351 2311 }
953ff289 2312 }
953ff289
DN
2313 else if (is_reference (var))
2314 {
50674e96
DN
2315 /* For references that are being privatized for Fortran,
2316 allocate new backing storage for the new pointer
2317 variable. This allows us to avoid changing all the
2318 code that expects a pointer to something that expects
2319 a direct variable. Note that this doesn't apply to
2320 C++, since reference types are disallowed in data
077b0dfb
JJ
2321 sharing clauses there, except for NRV optimized
2322 return values. */
953ff289
DN
2323 if (pass == 0)
2324 continue;
2325
2326 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
a68ab351
JJ
2327 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2328 {
2329 x = build_receiver_ref (var, false, ctx);
db3927fb 2330 x = build_fold_addr_expr_loc (clause_loc, x);
a68ab351
JJ
2331 }
2332 else if (TREE_CONSTANT (x))
953ff289
DN
2333 {
2334 const char *name = NULL;
2335 if (DECL_NAME (var))
2336 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2337
077b0dfb
JJ
2338 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2339 name);
2340 gimple_add_tmp_var (x);
628c189e 2341 TREE_ADDRESSABLE (x) = 1;
db3927fb 2342 x = build_fold_addr_expr_loc (clause_loc, x);
953ff289
DN
2343 }
2344 else
2345 {
db3927fb
AH
2346 x = build_call_expr_loc (clause_loc,
2347 built_in_decls[BUILT_IN_ALLOCA], 1, x);
953ff289
DN
2348 }
2349
db3927fb 2350 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
726a989a 2351 gimplify_assign (new_var, x, ilist);
953ff289 2352
70f34814 2353 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
953ff289
DN
2354 }
2355 else if (c_kind == OMP_CLAUSE_REDUCTION
2356 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2357 {
2358 if (pass == 0)
2359 continue;
2360 }
2361 else if (pass != 0)
2362 continue;
2363
aaf46ef9 2364 switch (OMP_CLAUSE_CODE (c))
953ff289
DN
2365 {
2366 case OMP_CLAUSE_SHARED:
8ca5b2a2
JJ
2367 /* Shared global vars are just accessed directly. */
2368 if (is_global_var (new_var))
2369 break;
953ff289
DN
2370 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2371 needs to be delayed until after fixup_child_record_type so
2372 that we get the correct type during the dereference. */
7c8f7639 2373 by_ref = use_pointer_for_field (var, ctx);
953ff289
DN
2374 x = build_receiver_ref (var, by_ref, ctx);
2375 SET_DECL_VALUE_EXPR (new_var, x);
2376 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2377
2378 /* ??? If VAR is not passed by reference, and the variable
2379 hasn't been initialized yet, then we'll get a warning for
2380 the store into the omp_data_s structure. Ideally, we'd be
b8698a0f 2381 able to notice this and not store anything at all, but
953ff289
DN
2382 we're generating code too early. Suppress the warning. */
2383 if (!by_ref)
2384 TREE_NO_WARNING (var) = 1;
2385 break;
2386
2387 case OMP_CLAUSE_LASTPRIVATE:
2388 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2389 break;
2390 /* FALLTHRU */
2391
2392 case OMP_CLAUSE_PRIVATE:
a68ab351
JJ
2393 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2394 x = build_outer_var_ref (var, ctx);
2395 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2396 {
2397 if (is_task_ctx (ctx))
2398 x = build_receiver_ref (var, false, ctx);
2399 else
2400 x = build_outer_var_ref (var, ctx);
2401 }
2402 else
2403 x = NULL;
2404 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
953ff289
DN
2405 if (x)
2406 gimplify_and_add (x, ilist);
2407 /* FALLTHRU */
2408
2409 do_dtor:
2410 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2411 if (x)
2412 {
726a989a
RB
2413 gimple_seq tseq = NULL;
2414
953ff289 2415 dtor = x;
726a989a
RB
2416 gimplify_stmt (&dtor, &tseq);
2417 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
953ff289
DN
2418 }
2419 break;
2420
2421 case OMP_CLAUSE_FIRSTPRIVATE:
a68ab351
JJ
2422 if (is_task_ctx (ctx))
2423 {
2424 if (is_reference (var) || is_variable_sized (var))
2425 goto do_dtor;
2426 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2427 ctx))
2428 || use_pointer_for_field (var, NULL))
2429 {
2430 x = build_receiver_ref (var, false, ctx);
2431 SET_DECL_VALUE_EXPR (new_var, x);
2432 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2433 goto do_dtor;
2434 }
2435 }
953ff289
DN
2436 x = build_outer_var_ref (var, ctx);
2437 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2438 gimplify_and_add (x, ilist);
2439 goto do_dtor;
2440 break;
2441
2442 case OMP_CLAUSE_COPYIN:
7c8f7639 2443 by_ref = use_pointer_for_field (var, NULL);
953ff289
DN
2444 x = build_receiver_ref (var, by_ref, ctx);
2445 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2446 append_to_statement_list (x, &copyin_seq);
2447 copyin_by_ref |= by_ref;
2448 break;
2449
2450 case OMP_CLAUSE_REDUCTION:
2451 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2452 {
a68ab351
JJ
2453 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2454 x = build_outer_var_ref (var, ctx);
2455
2456 if (is_reference (var))
db3927fb 2457 x = build_fold_addr_expr_loc (clause_loc, x);
a68ab351
JJ
2458 SET_DECL_VALUE_EXPR (placeholder, x);
2459 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
726a989a
RB
2460 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2461 gimple_seq_add_seq (ilist,
2462 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2463 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
a68ab351 2464 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
953ff289
DN
2465 }
2466 else
2467 {
2468 x = omp_reduction_init (c, TREE_TYPE (new_var));
2469 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
726a989a 2470 gimplify_assign (new_var, x, ilist);
953ff289
DN
2471 }
2472 break;
2473
2474 default:
2475 gcc_unreachable ();
2476 }
2477 }
2478 }
2479
2480 /* The copyin sequence is not to be executed by the main thread, since
2481 that would result in self-copies. Perhaps not visible to scalars,
2482 but it certainly is to C++ operator=. */
2483 if (copyin_seq)
2484 {
5039610b 2485 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
953ff289
DN
2486 x = build2 (NE_EXPR, boolean_type_node, x,
2487 build_int_cst (TREE_TYPE (x), 0));
2488 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2489 gimplify_and_add (x, ilist);
2490 }
2491
2492 /* If any copyin variable is passed by reference, we must ensure the
2493 master thread doesn't modify it before it is copied over in all
8ca5b2a2
JJ
2494 threads. Similarly for variables in both firstprivate and
2495 lastprivate clauses we need to ensure the lastprivate copying
2496 happens after firstprivate copying in all threads. */
2497 if (copyin_by_ref || lastprivate_firstprivate)
917948d3 2498 gimplify_and_add (build_omp_barrier (), ilist);
953ff289
DN
2499}
2500
50674e96 2501
953ff289
DN
2502/* Generate code to implement the LASTPRIVATE clauses. This is used for
2503 both parallel and workshare constructs. PREDICATE may be NULL if it's
2504 always true. */
2505
2506static void
726a989a
RB
2507lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2508 omp_context *ctx)
953ff289 2509{
726a989a 2510 tree x, c, label = NULL;
a68ab351 2511 bool par_clauses = false;
953ff289
DN
2512
2513 /* Early exit if there are no lastprivate clauses. */
2514 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2515 if (clauses == NULL)
2516 {
2517 /* If this was a workshare clause, see if it had been combined
2518 with its parallel. In that case, look for the clauses on the
2519 parallel statement itself. */
2520 if (is_parallel_ctx (ctx))
2521 return;
2522
2523 ctx = ctx->outer;
2524 if (ctx == NULL || !is_parallel_ctx (ctx))
2525 return;
2526
726a989a 2527 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
953ff289
DN
2528 OMP_CLAUSE_LASTPRIVATE);
2529 if (clauses == NULL)
2530 return;
a68ab351 2531 par_clauses = true;
953ff289
DN
2532 }
2533
726a989a
RB
2534 if (predicate)
2535 {
2536 gimple stmt;
2537 tree label_true, arm1, arm2;
2538
c2255bc4
AH
2539 label = create_artificial_label (UNKNOWN_LOCATION);
2540 label_true = create_artificial_label (UNKNOWN_LOCATION);
726a989a
RB
2541 arm1 = TREE_OPERAND (predicate, 0);
2542 arm2 = TREE_OPERAND (predicate, 1);
2543 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2544 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2545 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2546 label_true, label);
2547 gimple_seq_add_stmt (stmt_list, stmt);
2548 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2549 }
953ff289 2550
a68ab351 2551 for (c = clauses; c ;)
953ff289
DN
2552 {
2553 tree var, new_var;
db3927fb 2554 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
953ff289 2555
a68ab351
JJ
2556 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2557 {
2558 var = OMP_CLAUSE_DECL (c);
2559 new_var = lookup_decl (var, ctx);
953ff289 2560
726a989a
RB
2561 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2562 {
2563 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2564 gimple_seq_add_seq (stmt_list,
2565 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2566 }
2567 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
953ff289 2568
a68ab351
JJ
2569 x = build_outer_var_ref (var, ctx);
2570 if (is_reference (var))
70f34814 2571 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
a68ab351 2572 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
726a989a 2573 gimplify_and_add (x, stmt_list);
a68ab351
JJ
2574 }
2575 c = OMP_CLAUSE_CHAIN (c);
2576 if (c == NULL && !par_clauses)
2577 {
2578 /* If this was a workshare clause, see if it had been combined
2579 with its parallel. In that case, continue looking for the
2580 clauses also on the parallel statement itself. */
2581 if (is_parallel_ctx (ctx))
2582 break;
2583
2584 ctx = ctx->outer;
2585 if (ctx == NULL || !is_parallel_ctx (ctx))
2586 break;
2587
726a989a 2588 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
a68ab351
JJ
2589 OMP_CLAUSE_LASTPRIVATE);
2590 par_clauses = true;
2591 }
953ff289
DN
2592 }
2593
726a989a
RB
2594 if (label)
2595 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
953ff289
DN
2596}
2597
50674e96 2598
953ff289
DN
2599/* Generate code to implement the REDUCTION clauses. */
2600
2601static void
726a989a 2602lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
953ff289 2603{
726a989a
RB
2604 gimple_seq sub_seq = NULL;
2605 gimple stmt;
2606 tree x, c;
953ff289
DN
2607 int count = 0;
2608
2609 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2610 update in that case, otherwise use a lock. */
2611 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
aaf46ef9 2612 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
953ff289
DN
2613 {
2614 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2615 {
2616 /* Never use OMP_ATOMIC for array reductions. */
2617 count = -1;
2618 break;
2619 }
2620 count++;
2621 }
2622
2623 if (count == 0)
2624 return;
2625
2626 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2627 {
2628 tree var, ref, new_var;
2629 enum tree_code code;
db3927fb 2630 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
953ff289 2631
aaf46ef9 2632 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
953ff289
DN
2633 continue;
2634
2635 var = OMP_CLAUSE_DECL (c);
2636 new_var = lookup_decl (var, ctx);
2637 if (is_reference (var))
70f34814 2638 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
953ff289
DN
2639 ref = build_outer_var_ref (var, ctx);
2640 code = OMP_CLAUSE_REDUCTION_CODE (c);
50674e96
DN
2641
2642 /* reduction(-:var) sums up the partial results, so it acts
2643 identically to reduction(+:var). */
953ff289
DN
2644 if (code == MINUS_EXPR)
2645 code = PLUS_EXPR;
2646
2647 if (count == 1)
2648 {
db3927fb 2649 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
953ff289
DN
2650
2651 addr = save_expr (addr);
2652 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
db3927fb 2653 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
953ff289 2654 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
726a989a 2655 gimplify_and_add (x, stmt_seqp);
953ff289
DN
2656 return;
2657 }
2658
2659 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2660 {
2661 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2662
2663 if (is_reference (var))
db3927fb 2664 ref = build_fold_addr_expr_loc (clause_loc, ref);
953ff289
DN
2665 SET_DECL_VALUE_EXPR (placeholder, ref);
2666 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
726a989a
RB
2667 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2668 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2669 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
953ff289
DN
2670 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2671 }
2672 else
2673 {
2674 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2675 ref = build_outer_var_ref (var, ctx);
726a989a 2676 gimplify_assign (ref, x, &sub_seq);
953ff289
DN
2677 }
2678 }
2679
726a989a
RB
2680 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2681 gimple_seq_add_stmt (stmt_seqp, stmt);
953ff289 2682
726a989a 2683 gimple_seq_add_seq (stmt_seqp, sub_seq);
953ff289 2684
726a989a
RB
2685 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2686 gimple_seq_add_stmt (stmt_seqp, stmt);
953ff289
DN
2687}
2688
50674e96 2689
953ff289
DN
2690/* Generate code to implement the COPYPRIVATE clauses. */
2691
2692static void
726a989a 2693lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
953ff289
DN
2694 omp_context *ctx)
2695{
2696 tree c;
2697
2698 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2699 {
78db7d92 2700 tree var, new_var, ref, x;
953ff289 2701 bool by_ref;
db3927fb 2702 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
953ff289 2703
aaf46ef9 2704 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
953ff289
DN
2705 continue;
2706
2707 var = OMP_CLAUSE_DECL (c);
7c8f7639 2708 by_ref = use_pointer_for_field (var, NULL);
953ff289
DN
2709
2710 ref = build_sender_ref (var, ctx);
78db7d92
JJ
2711 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2712 if (by_ref)
2713 {
2714 x = build_fold_addr_expr_loc (clause_loc, new_var);
2715 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2716 }
726a989a 2717 gimplify_assign (ref, x, slist);
953ff289 2718
78db7d92
JJ
2719 ref = build_receiver_ref (var, false, ctx);
2720 if (by_ref)
2721 {
2722 ref = fold_convert_loc (clause_loc,
2723 build_pointer_type (TREE_TYPE (new_var)),
2724 ref);
2725 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2726 }
953ff289
DN
2727 if (is_reference (var))
2728 {
78db7d92 2729 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
70f34814
RG
2730 ref = build_simple_mem_ref_loc (clause_loc, ref);
2731 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
953ff289 2732 }
78db7d92 2733 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
953ff289
DN
2734 gimplify_and_add (x, rlist);
2735 }
2736}
2737
50674e96 2738
953ff289
DN
2739/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2740 and REDUCTION from the sender (aka parent) side. */
2741
2742static void
726a989a
RB
2743lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2744 omp_context *ctx)
953ff289
DN
2745{
2746 tree c;
2747
2748 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2749 {
50674e96 2750 tree val, ref, x, var;
953ff289 2751 bool by_ref, do_in = false, do_out = false;
db3927fb 2752 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
953ff289 2753
aaf46ef9 2754 switch (OMP_CLAUSE_CODE (c))
953ff289 2755 {
a68ab351
JJ
2756 case OMP_CLAUSE_PRIVATE:
2757 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2758 break;
2759 continue;
953ff289
DN
2760 case OMP_CLAUSE_FIRSTPRIVATE:
2761 case OMP_CLAUSE_COPYIN:
2762 case OMP_CLAUSE_LASTPRIVATE:
2763 case OMP_CLAUSE_REDUCTION:
2764 break;
2765 default:
2766 continue;
2767 }
2768
d2dda7fe
JJ
2769 val = OMP_CLAUSE_DECL (c);
2770 var = lookup_decl_in_outer_ctx (val, ctx);
50674e96 2771
8ca5b2a2
JJ
2772 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2773 && is_global_var (var))
2774 continue;
953ff289
DN
2775 if (is_variable_sized (val))
2776 continue;
7c8f7639 2777 by_ref = use_pointer_for_field (val, NULL);
953ff289 2778
aaf46ef9 2779 switch (OMP_CLAUSE_CODE (c))
953ff289 2780 {
a68ab351 2781 case OMP_CLAUSE_PRIVATE:
953ff289
DN
2782 case OMP_CLAUSE_FIRSTPRIVATE:
2783 case OMP_CLAUSE_COPYIN:
2784 do_in = true;
2785 break;
2786
2787 case OMP_CLAUSE_LASTPRIVATE:
2788 if (by_ref || is_reference (val))
2789 {
2790 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2791 continue;
2792 do_in = true;
2793 }
2794 else
a68ab351
JJ
2795 {
2796 do_out = true;
2797 if (lang_hooks.decls.omp_private_outer_ref (val))
2798 do_in = true;
2799 }
953ff289
DN
2800 break;
2801
2802 case OMP_CLAUSE_REDUCTION:
2803 do_in = true;
2804 do_out = !(by_ref || is_reference (val));
2805 break;
2806
2807 default:
2808 gcc_unreachable ();
2809 }
2810
2811 if (do_in)
2812 {
2813 ref = build_sender_ref (val, ctx);
db3927fb 2814 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
726a989a 2815 gimplify_assign (ref, x, ilist);
a68ab351
JJ
2816 if (is_task_ctx (ctx))
2817 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
953ff289 2818 }
50674e96 2819
953ff289
DN
2820 if (do_out)
2821 {
2822 ref = build_sender_ref (val, ctx);
726a989a 2823 gimplify_assign (var, ref, olist);
953ff289
DN
2824 }
2825 }
2826}
2827
726a989a
RB
2828/* Generate code to implement SHARED from the sender (aka parent)
2829 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2830 list things that got automatically shared. */
953ff289
DN
2831
2832static void
726a989a 2833lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
953ff289 2834{
a68ab351 2835 tree var, ovar, nvar, f, x, record_type;
953ff289
DN
2836
2837 if (ctx->record_type == NULL)
2838 return;
50674e96 2839
a68ab351
JJ
2840 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2841 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
953ff289
DN
2842 {
2843 ovar = DECL_ABSTRACT_ORIGIN (f);
2844 nvar = maybe_lookup_decl (ovar, ctx);
2845 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2846 continue;
2847
50674e96
DN
2848 /* If CTX is a nested parallel directive. Find the immediately
2849 enclosing parallel or workshare construct that contains a
2850 mapping for OVAR. */
d2dda7fe 2851 var = lookup_decl_in_outer_ctx (ovar, ctx);
50674e96 2852
7c8f7639 2853 if (use_pointer_for_field (ovar, ctx))
953ff289
DN
2854 {
2855 x = build_sender_ref (ovar, ctx);
50674e96 2856 var = build_fold_addr_expr (var);
726a989a 2857 gimplify_assign (x, var, ilist);
953ff289
DN
2858 }
2859 else
2860 {
2861 x = build_sender_ref (ovar, ctx);
726a989a 2862 gimplify_assign (x, var, ilist);
953ff289 2863
14e5b285
RG
2864 if (!TREE_READONLY (var)
2865 /* We don't need to receive a new reference to a result
2866 or parm decl. In fact we may not store to it as we will
2867 invalidate any pending RSO and generate wrong gimple
2868 during inlining. */
2869 && !((TREE_CODE (var) == RESULT_DECL
2870 || TREE_CODE (var) == PARM_DECL)
2871 && DECL_BY_REFERENCE (var)))
a68ab351
JJ
2872 {
2873 x = build_sender_ref (ovar, ctx);
726a989a 2874 gimplify_assign (var, x, olist);
a68ab351 2875 }
953ff289
DN
2876 }
2877 }
2878}
2879
726a989a
RB
2880
2881/* A convenience function to build an empty GIMPLE_COND with just the
2882 condition. */
2883
2884static gimple
2885gimple_build_cond_empty (tree cond)
2886{
2887 enum tree_code pred_code;
2888 tree lhs, rhs;
2889
2890 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2891 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2892}
2893
2894
b8698a0f 2895/* Build the function calls to GOMP_parallel_start etc to actually
50674e96
DN
2896 generate the parallel operation. REGION is the parallel region
2897 being expanded. BB is the block where to insert the code. WS_ARGS
2898 will be set if this is a call to a combined parallel+workshare
2899 construct, it contains the list of additional arguments needed by
2900 the workshare construct. */
953ff289
DN
2901
2902static void
777f7f9a 2903expand_parallel_call (struct omp_region *region, basic_block bb,
3bb06db4 2904 gimple entry_stmt, VEC(tree,gc) *ws_args)
953ff289 2905{
917948d3 2906 tree t, t1, t2, val, cond, c, clauses;
726a989a
RB
2907 gimple_stmt_iterator gsi;
2908 gimple stmt;
50674e96 2909 int start_ix;
db3927fb 2910 location_t clause_loc;
3bb06db4 2911 VEC(tree,gc) *args;
50674e96 2912
726a989a 2913 clauses = gimple_omp_parallel_clauses (entry_stmt);
50674e96 2914
c0220ea4 2915 /* Determine what flavor of GOMP_parallel_start we will be
50674e96
DN
2916 emitting. */
2917 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2918 if (is_combined_parallel (region))
2919 {
777f7f9a 2920 switch (region->inner->type)
50674e96 2921 {
726a989a 2922 case GIMPLE_OMP_FOR:
a68ab351 2923 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
21a66e91 2924 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
a68ab351
JJ
2925 + (region->inner->sched_kind
2926 == OMP_CLAUSE_SCHEDULE_RUNTIME
2927 ? 3 : region->inner->sched_kind);
777f7f9a 2928 break;
726a989a 2929 case GIMPLE_OMP_SECTIONS:
777f7f9a
RH
2930 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2931 break;
2932 default:
2933 gcc_unreachable ();
50674e96 2934 }
50674e96 2935 }
953ff289
DN
2936
2937 /* By default, the value of NUM_THREADS is zero (selected at run time)
2938 and there is no conditional. */
2939 cond = NULL_TREE;
2940 val = build_int_cst (unsigned_type_node, 0);
2941
2942 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2943 if (c)
2944 cond = OMP_CLAUSE_IF_EXPR (c);
2945
2946 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2947 if (c)
db3927fb
AH
2948 {
2949 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2950 clause_loc = OMP_CLAUSE_LOCATION (c);
2951 }
2952 else
2953 clause_loc = gimple_location (entry_stmt);
953ff289
DN
2954
2955 /* Ensure 'val' is of the correct type. */
db3927fb 2956 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
953ff289
DN
2957
2958 /* If we found the clause 'if (cond)', build either
2959 (cond != 0) or (cond ? val : 1u). */
2960 if (cond)
2961 {
726a989a 2962 gimple_stmt_iterator gsi;
50674e96
DN
2963
2964 cond = gimple_boolify (cond);
2965
953ff289 2966 if (integer_zerop (val))
db3927fb
AH
2967 val = fold_build2_loc (clause_loc,
2968 EQ_EXPR, unsigned_type_node, cond,
917948d3 2969 build_int_cst (TREE_TYPE (cond), 0));
953ff289 2970 else
50674e96
DN
2971 {
2972 basic_block cond_bb, then_bb, else_bb;
917948d3 2973 edge e, e_then, e_else;
726a989a 2974 tree tmp_then, tmp_else, tmp_join, tmp_var;
917948d3
ZD
2975
2976 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2977 if (gimple_in_ssa_p (cfun))
2978 {
726a989a
RB
2979 tmp_then = make_ssa_name (tmp_var, NULL);
2980 tmp_else = make_ssa_name (tmp_var, NULL);
2981 tmp_join = make_ssa_name (tmp_var, NULL);
917948d3
ZD
2982 }
2983 else
2984 {
2985 tmp_then = tmp_var;
2986 tmp_else = tmp_var;
2987 tmp_join = tmp_var;
2988 }
50674e96 2989
50674e96
DN
2990 e = split_block (bb, NULL);
2991 cond_bb = e->src;
2992 bb = e->dest;
2993 remove_edge (e);
2994
2995 then_bb = create_empty_bb (cond_bb);
2996 else_bb = create_empty_bb (then_bb);
917948d3
ZD
2997 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2998 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
50674e96 2999
726a989a
RB
3000 stmt = gimple_build_cond_empty (cond);
3001 gsi = gsi_start_bb (cond_bb);
3002 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
50674e96 3003
726a989a
RB
3004 gsi = gsi_start_bb (then_bb);
3005 stmt = gimple_build_assign (tmp_then, val);
3006 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
50674e96 3007
726a989a
RB
3008 gsi = gsi_start_bb (else_bb);
3009 stmt = gimple_build_assign
3010 (tmp_else, build_int_cst (unsigned_type_node, 1));
3011 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
50674e96
DN
3012
3013 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3014 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
917948d3
ZD
3015 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3016 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
50674e96 3017
917948d3
ZD
3018 if (gimple_in_ssa_p (cfun))
3019 {
726a989a 3020 gimple phi = create_phi_node (tmp_join, bb);
917948d3 3021 SSA_NAME_DEF_STMT (tmp_join) = phi;
f5045c96
AM
3022 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3023 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
917948d3
ZD
3024 }
3025
3026 val = tmp_join;
50674e96
DN
3027 }
3028
726a989a
RB
3029 gsi = gsi_start_bb (bb);
3030 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3031 false, GSI_CONTINUE_LINKING);
953ff289
DN
3032 }
3033
726a989a
RB
3034 gsi = gsi_last_bb (bb);
3035 t = gimple_omp_parallel_data_arg (entry_stmt);
953ff289 3036 if (t == NULL)
5039610b 3037 t1 = null_pointer_node;
953ff289 3038 else
5039610b 3039 t1 = build_fold_addr_expr (t);
726a989a 3040 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
50674e96 3041
3bb06db4
NF
3042 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3043 VEC_quick_push (tree, args, t2);
3044 VEC_quick_push (tree, args, t1);
3045 VEC_quick_push (tree, args, val);
3046 VEC_splice (tree, args, ws_args);
3047
3048 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3049 built_in_decls[start_ix], args);
50674e96 3050
726a989a
RB
3051 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3052 false, GSI_CONTINUE_LINKING);
953ff289 3053
726a989a 3054 t = gimple_omp_parallel_data_arg (entry_stmt);
953ff289
DN
3055 if (t == NULL)
3056 t = null_pointer_node;
3057 else
3058 t = build_fold_addr_expr (t);
db3927fb
AH
3059 t = build_call_expr_loc (gimple_location (entry_stmt),
3060 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
726a989a
RB
3061 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3062 false, GSI_CONTINUE_LINKING);
953ff289 3063
db3927fb
AH
3064 t = build_call_expr_loc (gimple_location (entry_stmt),
3065 built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
726a989a
RB
3066 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3067 false, GSI_CONTINUE_LINKING);
953ff289
DN
3068}
3069
50674e96 3070
a68ab351
JJ
3071/* Build the function call to GOMP_task to actually
3072 generate the task operation. BB is the block where to insert the code. */
3073
3074static void
726a989a 3075expand_task_call (basic_block bb, gimple entry_stmt)
a68ab351
JJ
3076{
3077 tree t, t1, t2, t3, flags, cond, c, clauses;
726a989a 3078 gimple_stmt_iterator gsi;
db3927fb 3079 location_t loc = gimple_location (entry_stmt);
a68ab351 3080
726a989a 3081 clauses = gimple_omp_task_clauses (entry_stmt);
a68ab351 3082
a68ab351
JJ
3083 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3084 if (c)
3085 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3086 else
3087 cond = boolean_true_node;
3088
3089 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3090 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3091
726a989a
RB
3092 gsi = gsi_last_bb (bb);
3093 t = gimple_omp_task_data_arg (entry_stmt);
a68ab351
JJ
3094 if (t == NULL)
3095 t2 = null_pointer_node;
3096 else
db3927fb
AH
3097 t2 = build_fold_addr_expr_loc (loc, t);
3098 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
726a989a 3099 t = gimple_omp_task_copy_fn (entry_stmt);
a68ab351
JJ
3100 if (t == NULL)
3101 t3 = null_pointer_node;
3102 else
db3927fb 3103 t3 = build_fold_addr_expr_loc (loc, t);
a68ab351
JJ
3104
3105 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
726a989a
RB
3106 gimple_omp_task_arg_size (entry_stmt),
3107 gimple_omp_task_arg_align (entry_stmt), cond, flags);
a68ab351 3108
726a989a
RB
3109 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3110 false, GSI_CONTINUE_LINKING);
a68ab351
JJ
3111}
3112
3113
726a989a
RB
3114/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3115 catch handler and return it. This prevents programs from violating the
3116 structured block semantics with throws. */
953ff289 3117
726a989a
RB
3118static gimple_seq
3119maybe_catch_exception (gimple_seq body)
953ff289 3120{
1d65f45c
RH
3121 gimple g;
3122 tree decl;
953ff289
DN
3123
3124 if (!flag_exceptions)
726a989a 3125 return body;
953ff289 3126
3b06d379
SB
3127 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3128 decl = lang_hooks.eh_protect_cleanup_actions ();
953ff289 3129 else
1d65f45c 3130 decl = built_in_decls[BUILT_IN_TRAP];
726a989a 3131
1d65f45c
RH
3132 g = gimple_build_eh_must_not_throw (decl);
3133 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
726a989a 3134 GIMPLE_TRY_CATCH);
953ff289 3135
1d65f45c 3136 return gimple_seq_alloc_with_stmt (g);
953ff289
DN
3137}
3138
50674e96 3139/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
953ff289 3140
50674e96 3141static tree
c021f10b 3142vec2chain (VEC(tree,gc) *v)
953ff289 3143{
c021f10b
NF
3144 tree chain = NULL_TREE, t;
3145 unsigned ix;
953ff289 3146
c021f10b 3147 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
50674e96 3148 {
c021f10b
NF
3149 TREE_CHAIN (t) = chain;
3150 chain = t;
50674e96 3151 }
953ff289 3152
c021f10b 3153 return chain;
50674e96 3154}
953ff289 3155
953ff289 3156
50674e96 3157/* Remove barriers in REGION->EXIT's block. Note that this is only
726a989a
RB
3158 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3159 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3160 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
50674e96 3161 removed. */
953ff289 3162
50674e96
DN
3163static void
3164remove_exit_barrier (struct omp_region *region)
3165{
726a989a 3166 gimple_stmt_iterator gsi;
50674e96 3167 basic_block exit_bb;
777f7f9a
RH
3168 edge_iterator ei;
3169 edge e;
726a989a 3170 gimple stmt;
03742a9b 3171 int any_addressable_vars = -1;
953ff289 3172
777f7f9a 3173 exit_bb = region->exit;
953ff289 3174
2aee3e57
JJ
3175 /* If the parallel region doesn't return, we don't have REGION->EXIT
3176 block at all. */
3177 if (! exit_bb)
3178 return;
3179
726a989a
RB
3180 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3181 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
777f7f9a
RH
3182 statements that can appear in between are extremely limited -- no
3183 memory operations at all. Here, we allow nothing at all, so the
726a989a
RB
3184 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3185 gsi = gsi_last_bb (exit_bb);
3186 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3187 gsi_prev (&gsi);
3188 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
50674e96 3189 return;
953ff289 3190
777f7f9a
RH
3191 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3192 {
726a989a
RB
3193 gsi = gsi_last_bb (e->src);
3194 if (gsi_end_p (gsi))
777f7f9a 3195 continue;
726a989a 3196 stmt = gsi_stmt (gsi);
03742a9b
JJ
3197 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3198 && !gimple_omp_return_nowait_p (stmt))
3199 {
3200 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3201 in many cases. If there could be tasks queued, the barrier
3202 might be needed to let the tasks run before some local
3203 variable of the parallel that the task uses as shared
3204 runs out of scope. The task can be spawned either
3205 from within current function (this would be easy to check)
3206 or from some function it calls and gets passed an address
3207 of such a variable. */
3208 if (any_addressable_vars < 0)
3209 {
3210 gimple parallel_stmt = last_stmt (region->entry);
3211 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
c021f10b
NF
3212 tree local_decls, block, decl;
3213 unsigned ix;
03742a9b
JJ
3214
3215 any_addressable_vars = 0;
c021f10b
NF
3216 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3217 if (TREE_ADDRESSABLE (decl))
03742a9b
JJ
3218 {
3219 any_addressable_vars = 1;
3220 break;
3221 }
3222 for (block = gimple_block (stmt);
3223 !any_addressable_vars
3224 && block
3225 && TREE_CODE (block) == BLOCK;
3226 block = BLOCK_SUPERCONTEXT (block))
3227 {
3228 for (local_decls = BLOCK_VARS (block);
3229 local_decls;
3230 local_decls = TREE_CHAIN (local_decls))
3231 if (TREE_ADDRESSABLE (local_decls))
3232 {
3233 any_addressable_vars = 1;
3234 break;
3235 }
3236 if (block == gimple_block (parallel_stmt))
3237 break;
3238 }
3239 }
3240 if (!any_addressable_vars)
3241 gimple_omp_return_set_nowait (stmt);
3242 }
777f7f9a 3243 }
953ff289
DN
3244}
3245
777f7f9a
RH
3246static void
3247remove_exit_barriers (struct omp_region *region)
3248{
726a989a 3249 if (region->type == GIMPLE_OMP_PARALLEL)
777f7f9a
RH
3250 remove_exit_barrier (region);
3251
3252 if (region->inner)
3253 {
3254 region = region->inner;
3255 remove_exit_barriers (region);
3256 while (region->next)
3257 {
3258 region = region->next;
3259 remove_exit_barriers (region);
3260 }
3261 }
3262}
50674e96 3263
2b4cf991
JJ
3264/* Optimize omp_get_thread_num () and omp_get_num_threads ()
3265 calls. These can't be declared as const functions, but
3266 within one parallel body they are constant, so they can be
3267 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
a68ab351
JJ
3268 which are declared const. Similarly for task body, except
3269 that in untied task omp_get_thread_num () can change at any task
3270 scheduling point. */
2b4cf991
JJ
3271
3272static void
726a989a 3273optimize_omp_library_calls (gimple entry_stmt)
2b4cf991
JJ
3274{
3275 basic_block bb;
726a989a 3276 gimple_stmt_iterator gsi;
2b4cf991
JJ
3277 tree thr_num_id
3278 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3279 tree num_thr_id
3280 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
726a989a
RB
3281 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3282 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
a68ab351 3283 OMP_CLAUSE_UNTIED) != NULL);
2b4cf991
JJ
3284
3285 FOR_EACH_BB (bb)
726a989a 3286 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2b4cf991 3287 {
726a989a 3288 gimple call = gsi_stmt (gsi);
2b4cf991
JJ
3289 tree decl;
3290
726a989a
RB
3291 if (is_gimple_call (call)
3292 && (decl = gimple_call_fndecl (call))
2b4cf991
JJ
3293 && DECL_EXTERNAL (decl)
3294 && TREE_PUBLIC (decl)
3295 && DECL_INITIAL (decl) == NULL)
3296 {
3297 tree built_in;
3298
3299 if (DECL_NAME (decl) == thr_num_id)
a68ab351
JJ
3300 {
3301 /* In #pragma omp task untied omp_get_thread_num () can change
3302 during the execution of the task region. */
3303 if (untied_task)
3304 continue;
3305 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3306 }
2b4cf991
JJ
3307 else if (DECL_NAME (decl) == num_thr_id)
3308 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3309 else
3310 continue;
3311
3312 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
726a989a 3313 || gimple_call_num_args (call) != 0)
2b4cf991
JJ
3314 continue;
3315
3316 if (flag_exceptions && !TREE_NOTHROW (decl))
3317 continue;
3318
3319 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
9600efe1
MM
3320 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3321 TREE_TYPE (TREE_TYPE (built_in))))
2b4cf991
JJ
3322 continue;
3323
7c9577be 3324 gimple_call_set_fndecl (call, built_in);
2b4cf991
JJ
3325 }
3326 }
3327}
3328
a68ab351 3329/* Expand the OpenMP parallel or task directive starting at REGION. */
953ff289
DN
3330
3331static void
a68ab351 3332expand_omp_taskreg (struct omp_region *region)
953ff289 3333{
50674e96 3334 basic_block entry_bb, exit_bb, new_bb;
db2960f4 3335 struct function *child_cfun;
3bb06db4 3336 tree child_fn, block, t;
133f9369 3337 tree save_current;
726a989a
RB
3338 gimple_stmt_iterator gsi;
3339 gimple entry_stmt, stmt;
50674e96 3340 edge e;
3bb06db4 3341 VEC(tree,gc) *ws_args;
50674e96 3342
777f7f9a 3343 entry_stmt = last_stmt (region->entry);
726a989a 3344 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
50674e96 3345 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
135a171d
JJ
3346 /* If this function has been already instrumented, make sure
3347 the child function isn't instrumented again. */
3348 child_cfun->after_tree_profile = cfun->after_tree_profile;
50674e96 3349
777f7f9a
RH
3350 entry_bb = region->entry;
3351 exit_bb = region->exit;
50674e96 3352
50674e96 3353 if (is_combined_parallel (region))
777f7f9a 3354 ws_args = region->ws_args;
50674e96 3355 else
3bb06db4 3356 ws_args = NULL;
953ff289 3357
777f7f9a 3358 if (child_cfun->cfg)
953ff289 3359 {
50674e96
DN
3360 /* Due to inlining, it may happen that we have already outlined
3361 the region, in which case all we need to do is make the
3362 sub-graph unreachable and emit the parallel call. */
3363 edge entry_succ_e, exit_succ_e;
726a989a 3364 gimple_stmt_iterator gsi;
50674e96
DN
3365
3366 entry_succ_e = single_succ_edge (entry_bb);
50674e96 3367
726a989a
RB
3368 gsi = gsi_last_bb (entry_bb);
3369 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3370 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3371 gsi_remove (&gsi, true);
50674e96
DN
3372
3373 new_bb = entry_bb;
d3c673c7
JJ
3374 if (exit_bb)
3375 {
3376 exit_succ_e = single_succ_edge (exit_bb);
3377 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3378 }
917948d3 3379 remove_edge_and_dominated_blocks (entry_succ_e);
953ff289 3380 }
50674e96
DN
3381 else
3382 {
2fed2012 3383 unsigned srcidx, dstidx, num;
c021f10b 3384
50674e96 3385 /* If the parallel region needs data sent from the parent
b570947c
JJ
3386 function, then the very first statement (except possible
3387 tree profile counter updates) of the parallel body
50674e96
DN
3388 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3389 &.OMP_DATA_O is passed as an argument to the child function,
3390 we need to replace it with the argument as seen by the child
3391 function.
3392
3393 In most cases, this will end up being the identity assignment
3394 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3395 a function call that has been inlined, the original PARM_DECL
3396 .OMP_DATA_I may have been converted into a different local
3397 variable. In which case, we need to keep the assignment. */
726a989a 3398 if (gimple_omp_taskreg_data_arg (entry_stmt))
50674e96
DN
3399 {
3400 basic_block entry_succ_bb = single_succ (entry_bb);
726a989a
RB
3401 gimple_stmt_iterator gsi;
3402 tree arg, narg;
3403 gimple parcopy_stmt = NULL;
953ff289 3404
726a989a 3405 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
b570947c 3406 {
726a989a 3407 gimple stmt;
b570947c 3408
726a989a
RB
3409 gcc_assert (!gsi_end_p (gsi));
3410 stmt = gsi_stmt (gsi);
3411 if (gimple_code (stmt) != GIMPLE_ASSIGN)
018b899b
JJ
3412 continue;
3413
726a989a 3414 if (gimple_num_ops (stmt) == 2)
b570947c 3415 {
726a989a
RB
3416 tree arg = gimple_assign_rhs1 (stmt);
3417
3418 /* We're ignore the subcode because we're
3419 effectively doing a STRIP_NOPS. */
3420
3421 if (TREE_CODE (arg) == ADDR_EXPR
3422 && TREE_OPERAND (arg, 0)
3423 == gimple_omp_taskreg_data_arg (entry_stmt))
3424 {
3425 parcopy_stmt = stmt;
3426 break;
3427 }
b570947c
JJ
3428 }
3429 }
917948d3 3430
726a989a 3431 gcc_assert (parcopy_stmt != NULL);
917948d3
ZD
3432 arg = DECL_ARGUMENTS (child_fn);
3433
3434 if (!gimple_in_ssa_p (cfun))
3435 {
726a989a
RB
3436 if (gimple_assign_lhs (parcopy_stmt) == arg)
3437 gsi_remove (&gsi, true);
917948d3 3438 else
726a989a
RB
3439 {
3440 /* ?? Is setting the subcode really necessary ?? */
3441 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3442 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3443 }
917948d3
ZD
3444 }
3445 else
3446 {
3447 /* If we are in ssa form, we must load the value from the default
3448 definition of the argument. That should not be defined now,
3449 since the argument is not used uninitialized. */
3450 gcc_assert (gimple_default_def (cfun, arg) == NULL);
726a989a 3451 narg = make_ssa_name (arg, gimple_build_nop ());
917948d3 3452 set_default_def (arg, narg);
726a989a
RB
3453 /* ?? Is setting the subcode really necessary ?? */
3454 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3455 gimple_assign_set_rhs1 (parcopy_stmt, narg);
917948d3
ZD
3456 update_stmt (parcopy_stmt);
3457 }
50674e96
DN
3458 }
3459
3460 /* Declare local variables needed in CHILD_CFUN. */
3461 block = DECL_INITIAL (child_fn);
c021f10b 3462 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
4f0ae266
JJ
3463 /* The gimplifier could record temporaries in parallel/task block
3464 rather than in containing function's local_decls chain,
3465 which would mean cgraph missed finalizing them. Do it now. */
3466 for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
3467 if (TREE_CODE (t) == VAR_DECL
3468 && TREE_STATIC (t)
3469 && !DECL_EXTERNAL (t))
3470 varpool_finalize_decl (t);
726a989a
RB
3471 DECL_SAVED_TREE (child_fn) = NULL;
3472 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
b357f682 3473 TREE_USED (block) = 1;
50674e96 3474
917948d3 3475 /* Reset DECL_CONTEXT on function arguments. */
50674e96
DN
3476 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3477 DECL_CONTEXT (t) = child_fn;
3478
726a989a
RB
3479 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3480 so that it can be moved to the child function. */
3481 gsi = gsi_last_bb (entry_bb);
3482 stmt = gsi_stmt (gsi);
3483 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3484 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3485 gsi_remove (&gsi, true);
3486 e = split_block (entry_bb, stmt);
50674e96
DN
3487 entry_bb = e->dest;
3488 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3489
726a989a 3490 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
2aee3e57
JJ
3491 if (exit_bb)
3492 {
726a989a
RB
3493 gsi = gsi_last_bb (exit_bb);
3494 gcc_assert (!gsi_end_p (gsi)
3495 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3496 stmt = gimple_build_return (NULL);
3497 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3498 gsi_remove (&gsi, true);
2aee3e57 3499 }
917948d3
ZD
3500
3501 /* Move the parallel region into CHILD_CFUN. */
b8698a0f 3502
917948d3
ZD
3503 if (gimple_in_ssa_p (cfun))
3504 {
3505 push_cfun (child_cfun);
5db9ba0c 3506 init_tree_ssa (child_cfun);
917948d3
ZD
3507 init_ssa_operands ();
3508 cfun->gimple_df->in_ssa_p = true;
3509 pop_cfun ();
b357f682 3510 block = NULL_TREE;
917948d3 3511 }
b357f682 3512 else
726a989a 3513 block = gimple_block (entry_stmt);
b357f682
JJ
3514
3515 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
917948d3
ZD
3516 if (exit_bb)
3517 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3518
b357f682 3519 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
2fed2012
JJ
3520 num = VEC_length (tree, child_cfun->local_decls);
3521 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3522 {
3523 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3524 if (DECL_CONTEXT (t) == cfun->decl)
3525 continue;
3526 if (srcidx != dstidx)
3527 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3528 dstidx++;
3529 }
3530 if (dstidx != num)
3531 VEC_truncate (tree, child_cfun->local_decls, dstidx);
b357f682 3532
917948d3
ZD
3533 /* Inform the callgraph about the new function. */
3534 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3535 = cfun->curr_properties;
3536 cgraph_add_new_function (child_fn, true);
3537
3538 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3539 fixed in a following pass. */
3540 push_cfun (child_cfun);
133f9369
MJ
3541 save_current = current_function_decl;
3542 current_function_decl = child_fn;
2b4cf991 3543 if (optimize)
a68ab351 3544 optimize_omp_library_calls (entry_stmt);
917948d3 3545 rebuild_cgraph_edges ();
99819c63
JJ
3546
3547 /* Some EH regions might become dead, see PR34608. If
3548 pass_cleanup_cfg isn't the first pass to happen with the
3549 new child, these dead EH edges might cause problems.
3550 Clean them up now. */
3551 if (flag_exceptions)
3552 {
3553 basic_block bb;
99819c63
JJ
3554 bool changed = false;
3555
99819c63 3556 FOR_EACH_BB (bb)
726a989a 3557 changed |= gimple_purge_dead_eh_edges (bb);
99819c63
JJ
3558 if (changed)
3559 cleanup_tree_cfg ();
99819c63 3560 }
5006671f
RG
3561 if (gimple_in_ssa_p (cfun))
3562 update_ssa (TODO_update_ssa);
133f9369 3563 current_function_decl = save_current;
917948d3 3564 pop_cfun ();
50674e96 3565 }
b8698a0f 3566
50674e96 3567 /* Emit a library call to launch the children threads. */
726a989a 3568 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
a68ab351
JJ
3569 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3570 else
3571 expand_task_call (new_bb, entry_stmt);
5f40b3cb 3572 update_ssa (TODO_update_ssa_only_virtuals);
953ff289
DN
3573}
3574
50674e96
DN
3575
3576/* A subroutine of expand_omp_for. Generate code for a parallel
953ff289
DN
3577 loop with any schedule. Given parameters:
3578
3579 for (V = N1; V cond N2; V += STEP) BODY;
3580
3581 where COND is "<" or ">", we generate pseudocode
3582
3583 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
50674e96 3584 if (more) goto L0; else goto L3;
953ff289
DN
3585 L0:
3586 V = istart0;
3587 iend = iend0;
3588 L1:
3589 BODY;
3590 V += STEP;
50674e96 3591 if (V cond iend) goto L1; else goto L2;
953ff289 3592 L2:
50674e96
DN
3593 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3594 L3:
953ff289 3595
50674e96 3596 If this is a combined omp parallel loop, instead of the call to
a68ab351
JJ
3597 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3598
3599 For collapsed loops, given parameters:
3600 collapse(3)
3601 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3602 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3603 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3604 BODY;
3605
3606 we generate pseudocode
3607
3608 if (cond3 is <)
3609 adj = STEP3 - 1;
3610 else
3611 adj = STEP3 + 1;
3612 count3 = (adj + N32 - N31) / STEP3;
3613 if (cond2 is <)
3614 adj = STEP2 - 1;
3615 else
3616 adj = STEP2 + 1;
3617 count2 = (adj + N22 - N21) / STEP2;
3618 if (cond1 is <)
3619 adj = STEP1 - 1;
3620 else
3621 adj = STEP1 + 1;
3622 count1 = (adj + N12 - N11) / STEP1;
3623 count = count1 * count2 * count3;
3624 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3625 if (more) goto L0; else goto L3;
3626 L0:
3627 V = istart0;
3628 T = V;
3629 V3 = N31 + (T % count3) * STEP3;
3630 T = T / count3;
3631 V2 = N21 + (T % count2) * STEP2;
3632 T = T / count2;
3633 V1 = N11 + T * STEP1;
3634 iend = iend0;
3635 L1:
3636 BODY;
3637 V += 1;
3638 if (V < iend) goto L10; else goto L2;
3639 L10:
3640 V3 += STEP3;
3641 if (V3 cond3 N32) goto L1; else goto L11;
3642 L11:
3643 V3 = N31;
3644 V2 += STEP2;
3645 if (V2 cond2 N22) goto L1; else goto L12;
3646 L12:
3647 V2 = N21;
3648 V1 += STEP1;
3649 goto L1;
3650 L2:
3651 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3652 L3:
3653
3654 */
953ff289 3655
777f7f9a 3656static void
50674e96
DN
3657expand_omp_for_generic (struct omp_region *region,
3658 struct omp_for_data *fd,
953ff289
DN
3659 enum built_in_function start_fn,
3660 enum built_in_function next_fn)
3661{
726a989a 3662 tree type, istart0, iend0, iend;
a68ab351
JJ
3663 tree t, vmain, vback, bias = NULL_TREE;
3664 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
d3c673c7 3665 basic_block l2_bb = NULL, l3_bb = NULL;
726a989a
RB
3666 gimple_stmt_iterator gsi;
3667 gimple stmt;
50674e96 3668 bool in_combined_parallel = is_combined_parallel (region);
e5c95afe 3669 bool broken_loop = region->cont == NULL;
917948d3 3670 edge e, ne;
a68ab351
JJ
3671 tree *counts = NULL;
3672 int i;
e5c95afe
ZD
3673
3674 gcc_assert (!broken_loop || !in_combined_parallel);
a68ab351
JJ
3675 gcc_assert (fd->iter_type == long_integer_type_node
3676 || !in_combined_parallel);
953ff289 3677
a68ab351
JJ
3678 type = TREE_TYPE (fd->loop.v);
3679 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3680 iend0 = create_tmp_var (fd->iter_type, ".iend0");
5b4fc8fb
JJ
3681 TREE_ADDRESSABLE (istart0) = 1;
3682 TREE_ADDRESSABLE (iend0) = 1;
917948d3
ZD
3683 if (gimple_in_ssa_p (cfun))
3684 {
3685 add_referenced_var (istart0);
3686 add_referenced_var (iend0);
3687 }
953ff289 3688
a68ab351
JJ
3689 /* See if we need to bias by LLONG_MIN. */
3690 if (fd->iter_type == long_long_unsigned_type_node
3691 && TREE_CODE (type) == INTEGER_TYPE
3692 && !TYPE_UNSIGNED (type))
3693 {
3694 tree n1, n2;
3695
3696 if (fd->loop.cond_code == LT_EXPR)
3697 {
3698 n1 = fd->loop.n1;
3699 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3700 }
3701 else
3702 {
3703 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3704 n2 = fd->loop.n1;
3705 }
3706 if (TREE_CODE (n1) != INTEGER_CST
3707 || TREE_CODE (n2) != INTEGER_CST
3708 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3709 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3710 }
3711
777f7f9a 3712 entry_bb = region->entry;
d3c673c7 3713 cont_bb = region->cont;
a68ab351 3714 collapse_bb = NULL;
e5c95afe
ZD
3715 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3716 gcc_assert (broken_loop
3717 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3718 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3719 l1_bb = single_succ (l0_bb);
3720 if (!broken_loop)
d3c673c7
JJ
3721 {
3722 l2_bb = create_empty_bb (cont_bb);
e5c95afe
ZD
3723 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3724 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
d3c673c7 3725 }
e5c95afe
ZD
3726 else
3727 l2_bb = NULL;
3728 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3729 exit_bb = region->exit;
50674e96 3730
726a989a 3731 gsi = gsi_last_bb (entry_bb);
a68ab351 3732
726a989a 3733 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
a68ab351
JJ
3734 if (fd->collapse > 1)
3735 {
3736 /* collapsed loops need work for expansion in SSA form. */
3737 gcc_assert (!gimple_in_ssa_p (cfun));
3738 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3739 for (i = 0; i < fd->collapse; i++)
3740 {
3741 tree itype = TREE_TYPE (fd->loops[i].v);
3742
3743 if (POINTER_TYPE_P (itype))
3744 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3745 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3746 ? -1 : 1));
3747 t = fold_build2 (PLUS_EXPR, itype,
3748 fold_convert (itype, fd->loops[i].step), t);
3749 t = fold_build2 (PLUS_EXPR, itype, t,
3750 fold_convert (itype, fd->loops[i].n2));
3751 t = fold_build2 (MINUS_EXPR, itype, t,
3752 fold_convert (itype, fd->loops[i].n1));
3753 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3754 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3755 fold_build1 (NEGATE_EXPR, itype, t),
3756 fold_build1 (NEGATE_EXPR, itype,
3757 fold_convert (itype,
3758 fd->loops[i].step)));
3759 else
3760 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3761 fold_convert (itype, fd->loops[i].step));
3762 t = fold_convert (type, t);
3763 if (TREE_CODE (t) == INTEGER_CST)
3764 counts[i] = t;
3765 else
3766 {
3767 counts[i] = create_tmp_var (type, ".count");
726a989a
RB
3768 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3769 true, GSI_SAME_STMT);
3770 stmt = gimple_build_assign (counts[i], t);
3771 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
a68ab351
JJ
3772 }
3773 if (SSA_VAR_P (fd->loop.n2))
3774 {
3775 if (i == 0)
726a989a 3776 t = counts[0];
a68ab351
JJ
3777 else
3778 {
3779 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
726a989a
RB
3780 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3781 true, GSI_SAME_STMT);
a68ab351 3782 }
726a989a
RB
3783 stmt = gimple_build_assign (fd->loop.n2, t);
3784 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
a68ab351
JJ
3785 }
3786 }
3787 }
917948d3
ZD
3788 if (in_combined_parallel)
3789 {
3790 /* In a combined parallel loop, emit a call to
3791 GOMP_loop_foo_next. */
3792 t = build_call_expr (built_in_decls[next_fn], 2,
3793 build_fold_addr_expr (istart0),
3794 build_fold_addr_expr (iend0));
3795 }
3796 else
953ff289 3797 {
5039610b 3798 tree t0, t1, t2, t3, t4;
50674e96
DN
3799 /* If this is not a combined parallel loop, emit a call to
3800 GOMP_loop_foo_start in ENTRY_BB. */
5039610b
SL
3801 t4 = build_fold_addr_expr (iend0);
3802 t3 = build_fold_addr_expr (istart0);
a68ab351 3803 t2 = fold_convert (fd->iter_type, fd->loop.step);
c6ff4493
SE
3804 if (POINTER_TYPE_P (type)
3805 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3806 {
3807 /* Avoid casting pointers to integer of a different size. */
3808 tree itype
3809 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3810 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3811 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3812 }
3813 else
3814 {
3815 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3816 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3817 }
a68ab351 3818 if (bias)
953ff289 3819 {
a68ab351
JJ
3820 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3821 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3822 }
3823 if (fd->iter_type == long_integer_type_node)
3824 {
3825 if (fd->chunk_size)
3826 {
3827 t = fold_convert (fd->iter_type, fd->chunk_size);
3828 t = build_call_expr (built_in_decls[start_fn], 6,
3829 t0, t1, t2, t, t3, t4);
3830 }
3831 else
3832 t = build_call_expr (built_in_decls[start_fn], 5,
3833 t0, t1, t2, t3, t4);
953ff289 3834 }
5039610b 3835 else
a68ab351
JJ
3836 {
3837 tree t5;
3838 tree c_bool_type;
3839
3840 /* The GOMP_loop_ull_*start functions have additional boolean
3841 argument, true for < loops and false for > loops.
3842 In Fortran, the C bool type can be different from
3843 boolean_type_node. */
3844 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3845 t5 = build_int_cst (c_bool_type,
3846 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3847 if (fd->chunk_size)
3848 {
3849 t = fold_convert (fd->iter_type, fd->chunk_size);
3850 t = build_call_expr (built_in_decls[start_fn], 7,
3851 t5, t0, t1, t2, t, t3, t4);
3852 }
3853 else
3854 t = build_call_expr (built_in_decls[start_fn], 6,
3855 t5, t0, t1, t2, t3, t4);
3856 }
953ff289 3857 }
a68ab351
JJ
3858 if (TREE_TYPE (t) != boolean_type_node)
3859 t = fold_build2 (NE_EXPR, boolean_type_node,
3860 t, build_int_cst (TREE_TYPE (t), 0));
726a989a
RB
3861 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3862 true, GSI_SAME_STMT);
3863 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
917948d3 3864
726a989a
RB
3865 /* Remove the GIMPLE_OMP_FOR statement. */
3866 gsi_remove (&gsi, true);
953ff289 3867
50674e96 3868 /* Iteration setup for sequential loop goes in L0_BB. */
726a989a 3869 gsi = gsi_start_bb (l0_bb);
550918ca 3870 t = istart0;
a68ab351 3871 if (bias)
550918ca
RG
3872 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3873 if (POINTER_TYPE_P (type))
3874 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3875 0), t);
3876 t = fold_convert (type, t);
726a989a
RB
3877 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3878 false, GSI_CONTINUE_LINKING);
3879 stmt = gimple_build_assign (fd->loop.v, t);
3880 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
953ff289 3881
550918ca 3882 t = iend0;
a68ab351 3883 if (bias)
550918ca
RG
3884 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3885 if (POINTER_TYPE_P (type))
3886 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3887 0), t);
3888 t = fold_convert (type, t);
726a989a
RB
3889 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3890 false, GSI_CONTINUE_LINKING);
a68ab351
JJ
3891 if (fd->collapse > 1)
3892 {
3893 tree tem = create_tmp_var (type, ".tem");
3894
726a989a
RB
3895 stmt = gimple_build_assign (tem, fd->loop.v);
3896 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
3897 for (i = fd->collapse - 1; i >= 0; i--)
3898 {
3899 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3900 itype = vtype;
3901 if (POINTER_TYPE_P (vtype))
3902 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3903 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3904 t = fold_convert (itype, t);
ada39f0b
RG
3905 t = fold_build2 (MULT_EXPR, itype, t,
3906 fold_convert (itype, fd->loops[i].step));
a68ab351
JJ
3907 if (POINTER_TYPE_P (vtype))
3908 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3909 fd->loops[i].n1, fold_convert (sizetype, t));
3910 else
3911 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
726a989a
RB
3912 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3913 false, GSI_CONTINUE_LINKING);
3914 stmt = gimple_build_assign (fd->loops[i].v, t);
3915 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
3916 if (i != 0)
3917 {
3918 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
726a989a
RB
3919 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3920 false, GSI_CONTINUE_LINKING);
3921 stmt = gimple_build_assign (tem, t);
3922 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
3923 }
3924 }
3925 }
50674e96 3926
e5c95afe 3927 if (!broken_loop)
d3c673c7 3928 {
e5c95afe
ZD
3929 /* Code to control the increment and predicate for the sequential
3930 loop goes in the CONT_BB. */
726a989a
RB
3931 gsi = gsi_last_bb (cont_bb);
3932 stmt = gsi_stmt (gsi);
3933 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3934 vmain = gimple_omp_continue_control_use (stmt);
3935 vback = gimple_omp_continue_control_def (stmt);
917948d3 3936
a68ab351
JJ
3937 if (POINTER_TYPE_P (type))
3938 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3939 fold_convert (sizetype, fd->loop.step));
3940 else
3941 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
726a989a
RB
3942 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3943 true, GSI_SAME_STMT);
3944 stmt = gimple_build_assign (vback, t);
3945 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3946
a68ab351 3947 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
726a989a
RB
3948 stmt = gimple_build_cond_empty (t);
3949 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
50674e96 3950
726a989a
RB
3951 /* Remove GIMPLE_OMP_CONTINUE. */
3952 gsi_remove (&gsi, true);
50674e96 3953
a68ab351
JJ
3954 if (fd->collapse > 1)
3955 {
3956 basic_block last_bb, bb;
3957
3958 last_bb = cont_bb;
3959 for (i = fd->collapse - 1; i >= 0; i--)
3960 {
3961 tree vtype = TREE_TYPE (fd->loops[i].v);
3962
3963 bb = create_empty_bb (last_bb);
726a989a 3964 gsi = gsi_start_bb (bb);
a68ab351
JJ
3965
3966 if (i < fd->collapse - 1)
3967 {
3968 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3969 e->probability = REG_BR_PROB_BASE / 8;
3970
726a989a
RB
3971 t = fd->loops[i + 1].n1;
3972 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3973 false, GSI_CONTINUE_LINKING);
3974 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3975 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
3976 }
3977 else
3978 collapse_bb = bb;
3979
3980 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3981
3982 if (POINTER_TYPE_P (vtype))
3983 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3984 fd->loops[i].v,
3985 fold_convert (sizetype, fd->loops[i].step));
3986 else
3987 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3988 fd->loops[i].step);
726a989a
RB
3989 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3990 false, GSI_CONTINUE_LINKING);
3991 stmt = gimple_build_assign (fd->loops[i].v, t);
3992 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
3993
3994 if (i > 0)
3995 {
726a989a
RB
3996 t = fd->loops[i].n2;
3997 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3998 false, GSI_CONTINUE_LINKING);
a68ab351 3999 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
726a989a
RB
4000 fd->loops[i].v, t);
4001 stmt = gimple_build_cond_empty (t);
4002 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
4003 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4004 e->probability = REG_BR_PROB_BASE * 7 / 8;
4005 }
4006 else
4007 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4008 last_bb = bb;
4009 }
4010 }
4011
e5c95afe 4012 /* Emit code to get the next parallel iteration in L2_BB. */
726a989a 4013 gsi = gsi_start_bb (l2_bb);
50674e96 4014
e5c95afe
ZD
4015 t = build_call_expr (built_in_decls[next_fn], 2,
4016 build_fold_addr_expr (istart0),
4017 build_fold_addr_expr (iend0));
726a989a
RB
4018 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4019 false, GSI_CONTINUE_LINKING);
a68ab351
JJ
4020 if (TREE_TYPE (t) != boolean_type_node)
4021 t = fold_build2 (NE_EXPR, boolean_type_node,
4022 t, build_int_cst (TREE_TYPE (t), 0));
726a989a
RB
4023 stmt = gimple_build_cond_empty (t);
4024 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
e5c95afe 4025 }
953ff289 4026
777f7f9a 4027 /* Add the loop cleanup function. */
726a989a
RB
4028 gsi = gsi_last_bb (exit_bb);
4029 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
777f7f9a
RH
4030 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
4031 else
4032 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
726a989a
RB
4033 stmt = gimple_build_call (t, 0);
4034 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4035 gsi_remove (&gsi, true);
50674e96
DN
4036
4037 /* Connect the new blocks. */
917948d3
ZD
4038 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4039 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
953ff289 4040
e5c95afe
ZD
4041 if (!broken_loop)
4042 {
726a989a
RB
4043 gimple_seq phis;
4044
917948d3
ZD
4045 e = find_edge (cont_bb, l3_bb);
4046 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4047
726a989a
RB
4048 phis = phi_nodes (l3_bb);
4049 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4050 {
4051 gimple phi = gsi_stmt (gsi);
4052 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4053 PHI_ARG_DEF_FROM_EDGE (phi, e));
4054 }
917948d3
ZD
4055 remove_edge (e);
4056
e5c95afe 4057 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
a68ab351
JJ
4058 if (fd->collapse > 1)
4059 {
4060 e = find_edge (cont_bb, l1_bb);
4061 remove_edge (e);
4062 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4063 }
4064 else
4065 {
4066 e = find_edge (cont_bb, l1_bb);
4067 e->flags = EDGE_TRUE_VALUE;
4068 }
4069 e->probability = REG_BR_PROB_BASE * 7 / 8;
4070 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
e5c95afe 4071 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
917948d3
ZD
4072
4073 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4074 recompute_dominator (CDI_DOMINATORS, l2_bb));
4075 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4076 recompute_dominator (CDI_DOMINATORS, l3_bb));
4077 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4078 recompute_dominator (CDI_DOMINATORS, l0_bb));
4079 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4080 recompute_dominator (CDI_DOMINATORS, l1_bb));
e5c95afe 4081 }
953ff289
DN
4082}
4083
4084
50674e96
DN
4085/* A subroutine of expand_omp_for. Generate code for a parallel
4086 loop with static schedule and no specified chunk size. Given
4087 parameters:
953ff289
DN
4088
4089 for (V = N1; V cond N2; V += STEP) BODY;
4090
4091 where COND is "<" or ">", we generate pseudocode
4092
4093 if (cond is <)
4094 adj = STEP - 1;
4095 else
4096 adj = STEP + 1;
a68ab351
JJ
4097 if ((__typeof (V)) -1 > 0 && cond is >)
4098 n = -(adj + N2 - N1) / -STEP;
4099 else
4100 n = (adj + N2 - N1) / STEP;
953ff289
DN
4101 q = n / nthreads;
4102 q += (q * nthreads != n);
4103 s0 = q * threadid;
4104 e0 = min(s0 + q, n);
917948d3 4105 V = s0 * STEP + N1;
953ff289
DN
4106 if (s0 >= e0) goto L2; else goto L0;
4107 L0:
953ff289
DN
4108 e = e0 * STEP + N1;
4109 L1:
4110 BODY;
4111 V += STEP;
4112 if (V cond e) goto L1;
953ff289
DN
4113 L2:
4114*/
4115
777f7f9a 4116static void
50674e96
DN
4117expand_omp_for_static_nochunk (struct omp_region *region,
4118 struct omp_for_data *fd)
953ff289 4119{
a9b77cd1 4120 tree n, q, s0, e0, e, t, nthreads, threadid;
a68ab351 4121 tree type, itype, vmain, vback;
777f7f9a
RH
4122 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4123 basic_block fin_bb;
726a989a
RB
4124 gimple_stmt_iterator gsi;
4125 gimple stmt;
953ff289 4126
a68ab351
JJ
4127 itype = type = TREE_TYPE (fd->loop.v);
4128 if (POINTER_TYPE_P (type))
4129 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
953ff289 4130
777f7f9a 4131 entry_bb = region->entry;
777f7f9a 4132 cont_bb = region->cont;
e5c95afe
ZD
4133 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4134 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4135 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4136 body_bb = single_succ (seq_start_bb);
4137 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4138 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4139 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
777f7f9a
RH
4140 exit_bb = region->exit;
4141
50674e96 4142 /* Iteration space partitioning goes in ENTRY_BB. */
726a989a
RB
4143 gsi = gsi_last_bb (entry_bb);
4144 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
777f7f9a 4145
5039610b 4146 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
a68ab351 4147 t = fold_convert (itype, t);
726a989a
RB
4148 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4149 true, GSI_SAME_STMT);
b8698a0f 4150
5039610b 4151 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
a68ab351 4152 t = fold_convert (itype, t);
726a989a
RB
4153 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4154 true, GSI_SAME_STMT);
953ff289 4155
a68ab351 4156 fd->loop.n1
726a989a
RB
4157 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4158 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351 4159 fd->loop.n2
726a989a
RB
4160 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4161 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351 4162 fd->loop.step
726a989a
RB
4163 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4164 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351
JJ
4165
4166 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4167 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4168 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4169 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4170 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4171 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4172 fold_build1 (NEGATE_EXPR, itype, t),
4173 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4174 else
4175 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4176 t = fold_convert (itype, t);
726a989a 4177 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
953ff289 4178
a68ab351 4179 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
726a989a 4180 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
953ff289 4181
a68ab351
JJ
4182 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4183 t = fold_build2 (NE_EXPR, itype, t, n);
4184 t = fold_build2 (PLUS_EXPR, itype, q, t);
726a989a 4185 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
953ff289 4186
a68ab351 4187 t = build2 (MULT_EXPR, itype, q, threadid);
726a989a 4188 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
953ff289 4189
a68ab351
JJ
4190 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4191 t = fold_build2 (MIN_EXPR, itype, t, n);
726a989a 4192 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
917948d3 4193
953ff289 4194 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
726a989a 4195 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
50674e96 4196
726a989a
RB
4197 /* Remove the GIMPLE_OMP_FOR statement. */
4198 gsi_remove (&gsi, true);
50674e96
DN
4199
4200 /* Setup code for sequential iteration goes in SEQ_START_BB. */
726a989a 4201 gsi = gsi_start_bb (seq_start_bb);
953ff289 4202
a68ab351
JJ
4203 t = fold_convert (itype, s0);
4204 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4205 if (POINTER_TYPE_P (type))
4206 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4207 fold_convert (sizetype, t));
4208 else
4209 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
726a989a
RB
4210 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4211 false, GSI_CONTINUE_LINKING);
4212 stmt = gimple_build_assign (fd->loop.v, t);
4213 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
b8698a0f 4214
a68ab351
JJ
4215 t = fold_convert (itype, e0);
4216 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4217 if (POINTER_TYPE_P (type))
4218 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4219 fold_convert (sizetype, t));
4220 else
4221 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
726a989a
RB
4222 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4223 false, GSI_CONTINUE_LINKING);
953ff289 4224
726a989a
RB
4225 /* The code controlling the sequential loop replaces the
4226 GIMPLE_OMP_CONTINUE. */
4227 gsi = gsi_last_bb (cont_bb);
4228 stmt = gsi_stmt (gsi);
4229 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4230 vmain = gimple_omp_continue_control_use (stmt);
4231 vback = gimple_omp_continue_control_def (stmt);
917948d3 4232
a68ab351
JJ
4233 if (POINTER_TYPE_P (type))
4234 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4235 fold_convert (sizetype, fd->loop.step));
4236 else
4237 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
726a989a
RB
4238 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4239 true, GSI_SAME_STMT);
4240 stmt = gimple_build_assign (vback, t);
4241 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
917948d3 4242
a68ab351 4243 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
726a989a 4244 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
953ff289 4245
726a989a
RB
4246 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4247 gsi_remove (&gsi, true);
50674e96 4248
726a989a
RB
4249 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4250 gsi = gsi_last_bb (exit_bb);
4251 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4252 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4253 false, GSI_SAME_STMT);
4254 gsi_remove (&gsi, true);
50674e96
DN
4255
4256 /* Connect all the blocks. */
e5c95afe
ZD
4257 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4258 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
917948d3 4259
e5c95afe 4260 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
777f7f9a 4261 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
b8698a0f 4262
917948d3
ZD
4263 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4264 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4265 recompute_dominator (CDI_DOMINATORS, body_bb));
4266 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4267 recompute_dominator (CDI_DOMINATORS, fin_bb));
953ff289
DN
4268}
4269
50674e96
DN
4270
4271/* A subroutine of expand_omp_for. Generate code for a parallel
4272 loop with static schedule and a specified chunk size. Given
4273 parameters:
953ff289
DN
4274
4275 for (V = N1; V cond N2; V += STEP) BODY;
4276
4277 where COND is "<" or ">", we generate pseudocode
4278
4279 if (cond is <)
4280 adj = STEP - 1;
4281 else
4282 adj = STEP + 1;
a68ab351
JJ
4283 if ((__typeof (V)) -1 > 0 && cond is >)
4284 n = -(adj + N2 - N1) / -STEP;
4285 else
4286 n = (adj + N2 - N1) / STEP;
953ff289 4287 trip = 0;
917948d3
ZD
4288 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4289 here so that V is defined
4290 if the loop is not entered
953ff289
DN
4291 L0:
4292 s0 = (trip * nthreads + threadid) * CHUNK;
4293 e0 = min(s0 + CHUNK, n);
4294 if (s0 < n) goto L1; else goto L4;
4295 L1:
4296 V = s0 * STEP + N1;
4297 e = e0 * STEP + N1;
4298 L2:
4299 BODY;
4300 V += STEP;
4301 if (V cond e) goto L2; else goto L3;
4302 L3:
4303 trip += 1;
4304 goto L0;
4305 L4:
953ff289
DN
4306*/
4307
777f7f9a 4308static void
726a989a 4309expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
953ff289 4310{
726a989a 4311 tree n, s0, e0, e, t;
917948d3 4312 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
726a989a 4313 tree type, itype, v_main, v_back, v_extra;
50674e96 4314 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
777f7f9a 4315 basic_block trip_update_bb, cont_bb, fin_bb;
726a989a
RB
4316 gimple_stmt_iterator si;
4317 gimple stmt;
4318 edge se;
953ff289 4319
a68ab351
JJ
4320 itype = type = TREE_TYPE (fd->loop.v);
4321 if (POINTER_TYPE_P (type))
4322 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
953ff289 4323
777f7f9a 4324 entry_bb = region->entry;
e5c95afe
ZD
4325 se = split_block (entry_bb, last_stmt (entry_bb));
4326 entry_bb = se->src;
4327 iter_part_bb = se->dest;
777f7f9a 4328 cont_bb = region->cont;
e5c95afe
ZD
4329 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4330 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4331 == FALLTHRU_EDGE (cont_bb)->dest);
4332 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4333 body_bb = single_succ (seq_start_bb);
4334 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4335 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4336 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4337 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
777f7f9a 4338 exit_bb = region->exit;
50674e96 4339
50674e96 4340 /* Trip and adjustment setup goes in ENTRY_BB. */
726a989a
RB
4341 si = gsi_last_bb (entry_bb);
4342 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
50674e96 4343
5039610b 4344 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
a68ab351 4345 t = fold_convert (itype, t);
726a989a
RB
4346 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4347 true, GSI_SAME_STMT);
b8698a0f 4348
5039610b 4349 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
a68ab351 4350 t = fold_convert (itype, t);
726a989a
RB
4351 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4352 true, GSI_SAME_STMT);
917948d3 4353
a68ab351 4354 fd->loop.n1
726a989a
RB
4355 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4356 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351 4357 fd->loop.n2
726a989a
RB
4358 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4359 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351 4360 fd->loop.step
726a989a
RB
4361 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4362 true, NULL_TREE, true, GSI_SAME_STMT);
917948d3 4363 fd->chunk_size
726a989a
RB
4364 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4365 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351
JJ
4366
4367 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4368 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4369 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4370 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4371 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4372 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4373 fold_build1 (NEGATE_EXPR, itype, t),
4374 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4375 else
4376 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4377 t = fold_convert (itype, t);
726a989a
RB
4378 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4379 true, GSI_SAME_STMT);
917948d3 4380
a68ab351 4381 trip_var = create_tmp_var (itype, ".trip");
917948d3
ZD
4382 if (gimple_in_ssa_p (cfun))
4383 {
4384 add_referenced_var (trip_var);
726a989a
RB
4385 trip_init = make_ssa_name (trip_var, NULL);
4386 trip_main = make_ssa_name (trip_var, NULL);
4387 trip_back = make_ssa_name (trip_var, NULL);
917948d3 4388 }
953ff289 4389 else
917948d3
ZD
4390 {
4391 trip_init = trip_var;
4392 trip_main = trip_var;
4393 trip_back = trip_var;
4394 }
953ff289 4395
726a989a
RB
4396 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4397 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
50674e96 4398
a68ab351
JJ
4399 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4400 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4401 if (POINTER_TYPE_P (type))
4402 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4403 fold_convert (sizetype, t));
4404 else
4405 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
726a989a
RB
4406 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4407 true, GSI_SAME_STMT);
917948d3 4408
726a989a
RB
4409 /* Remove the GIMPLE_OMP_FOR. */
4410 gsi_remove (&si, true);
50674e96
DN
4411
4412 /* Iteration space partitioning goes in ITER_PART_BB. */
726a989a 4413 si = gsi_last_bb (iter_part_bb);
953ff289 4414
a68ab351
JJ
4415 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4416 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4417 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
726a989a
RB
4418 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4419 false, GSI_CONTINUE_LINKING);
953ff289 4420
a68ab351
JJ
4421 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4422 t = fold_build2 (MIN_EXPR, itype, t, n);
726a989a
RB
4423 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4424 false, GSI_CONTINUE_LINKING);
953ff289
DN
4425
4426 t = build2 (LT_EXPR, boolean_type_node, s0, n);
726a989a 4427 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
50674e96
DN
4428
4429 /* Setup code for sequential iteration goes in SEQ_START_BB. */
726a989a 4430 si = gsi_start_bb (seq_start_bb);
953ff289 4431
a68ab351
JJ
4432 t = fold_convert (itype, s0);
4433 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4434 if (POINTER_TYPE_P (type))
4435 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4436 fold_convert (sizetype, t));
4437 else
4438 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
726a989a
RB
4439 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4440 false, GSI_CONTINUE_LINKING);
4441 stmt = gimple_build_assign (fd->loop.v, t);
4442 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
953ff289 4443
a68ab351
JJ
4444 t = fold_convert (itype, e0);
4445 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4446 if (POINTER_TYPE_P (type))
4447 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4448 fold_convert (sizetype, t));
4449 else
4450 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
726a989a
RB
4451 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4452 false, GSI_CONTINUE_LINKING);
953ff289 4453
777f7f9a 4454 /* The code controlling the sequential loop goes in CONT_BB,
726a989a
RB
4455 replacing the GIMPLE_OMP_CONTINUE. */
4456 si = gsi_last_bb (cont_bb);
4457 stmt = gsi_stmt (si);
4458 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4459 v_main = gimple_omp_continue_control_use (stmt);
4460 v_back = gimple_omp_continue_control_def (stmt);
917948d3 4461
a68ab351
JJ
4462 if (POINTER_TYPE_P (type))
4463 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4464 fold_convert (sizetype, fd->loop.step));
4465 else
726a989a
RB
4466 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4467 stmt = gimple_build_assign (v_back, t);
4468 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
917948d3 4469
a68ab351 4470 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
726a989a 4471 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
b8698a0f 4472
726a989a
RB
4473 /* Remove GIMPLE_OMP_CONTINUE. */
4474 gsi_remove (&si, true);
50674e96
DN
4475
4476 /* Trip update code goes into TRIP_UPDATE_BB. */
726a989a 4477 si = gsi_start_bb (trip_update_bb);
953ff289 4478
a68ab351
JJ
4479 t = build_int_cst (itype, 1);
4480 t = build2 (PLUS_EXPR, itype, trip_main, t);
726a989a
RB
4481 stmt = gimple_build_assign (trip_back, t);
4482 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
953ff289 4483
726a989a
RB
4484 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4485 si = gsi_last_bb (exit_bb);
4486 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4487 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4488 false, GSI_SAME_STMT);
4489 gsi_remove (&si, true);
953ff289 4490
50674e96 4491 /* Connect the new blocks. */
e5c95afe
ZD
4492 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4493 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
917948d3 4494
e5c95afe
ZD
4495 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4496 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
917948d3 4497
e5c95afe 4498 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
917948d3
ZD
4499
4500 if (gimple_in_ssa_p (cfun))
4501 {
726a989a
RB
4502 gimple_stmt_iterator psi;
4503 gimple phi;
4504 edge re, ene;
4505 edge_var_map_vector head;
4506 edge_var_map *vm;
4507 size_t i;
4508
917948d3
ZD
4509 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4510 remove arguments of the phi nodes in fin_bb. We need to create
4511 appropriate phi nodes in iter_part_bb instead. */
4512 se = single_pred_edge (fin_bb);
4513 re = single_succ_edge (trip_update_bb);
726a989a 4514 head = redirect_edge_var_map_vector (re);
917948d3
ZD
4515 ene = single_succ_edge (entry_bb);
4516
726a989a
RB
4517 psi = gsi_start_phis (fin_bb);
4518 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4519 gsi_next (&psi), ++i)
917948d3 4520 {
726a989a 4521 gimple nphi;
f5045c96 4522 source_location locus;
726a989a
RB
4523
4524 phi = gsi_stmt (psi);
4525 t = gimple_phi_result (phi);
4526 gcc_assert (t == redirect_edge_var_map_result (vm));
917948d3
ZD
4527 nphi = create_phi_node (t, iter_part_bb);
4528 SSA_NAME_DEF_STMT (t) = nphi;
4529
4530 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
f5045c96
AM
4531 locus = gimple_phi_arg_location_from_edge (phi, se);
4532
a68ab351
JJ
4533 /* A special case -- fd->loop.v is not yet computed in
4534 iter_part_bb, we need to use v_extra instead. */
4535 if (t == fd->loop.v)
917948d3 4536 t = v_extra;
f5045c96
AM
4537 add_phi_arg (nphi, t, ene, locus);
4538 locus = redirect_edge_var_map_location (vm);
4539 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
726a989a
RB
4540 }
4541 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4542 redirect_edge_var_map_clear (re);
4543 while (1)
4544 {
4545 psi = gsi_start_phis (fin_bb);
4546 if (gsi_end_p (psi))
4547 break;
4548 remove_phi_node (&psi, false);
917948d3 4549 }
917948d3
ZD
4550
4551 /* Make phi node for trip. */
4552 phi = create_phi_node (trip_main, iter_part_bb);
4553 SSA_NAME_DEF_STMT (trip_main) = phi;
f5045c96
AM
4554 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4555 UNKNOWN_LOCATION);
4556 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4557 UNKNOWN_LOCATION);
917948d3
ZD
4558 }
4559
4560 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4561 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4562 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4563 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4564 recompute_dominator (CDI_DOMINATORS, fin_bb));
4565 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4566 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4567 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4568 recompute_dominator (CDI_DOMINATORS, body_bb));
953ff289
DN
4569}
4570
953ff289 4571
50674e96 4572/* Expand the OpenMP loop defined by REGION. */
953ff289 4573
50674e96
DN
4574static void
4575expand_omp_for (struct omp_region *region)
4576{
4577 struct omp_for_data fd;
a68ab351 4578 struct omp_for_data_loop *loops;
953ff289 4579
a68ab351
JJ
4580 loops
4581 = (struct omp_for_data_loop *)
726a989a 4582 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
a68ab351 4583 * sizeof (struct omp_for_data_loop));
a68ab351 4584 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
21a66e91 4585 region->sched_kind = fd.sched_kind;
953ff289 4586
135a171d
JJ
4587 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4588 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4589 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4590 if (region->cont)
4591 {
4592 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4593 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4594 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4595 }
4596
d3c673c7
JJ
4597 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4598 && !fd.have_ordered
a68ab351 4599 && fd.collapse == 1
e5c95afe 4600 && region->cont != NULL)
953ff289
DN
4601 {
4602 if (fd.chunk_size == NULL)
777f7f9a 4603 expand_omp_for_static_nochunk (region, &fd);
953ff289 4604 else
777f7f9a 4605 expand_omp_for_static_chunk (region, &fd);
953ff289
DN
4606 }
4607 else
4608 {
a68ab351
JJ
4609 int fn_index, start_ix, next_ix;
4610
4611 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4612 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
726a989a 4613 ? 3 : fd.sched_kind;
a68ab351
JJ
4614 fn_index += fd.have_ordered * 4;
4615 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4616 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4617 if (fd.iter_type == long_long_unsigned_type_node)
4618 {
4619 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4620 - BUILT_IN_GOMP_LOOP_STATIC_START;
4621 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4622 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4623 }
bbbbb16a
ILT
4624 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4625 (enum built_in_function) next_ix);
953ff289 4626 }
5f40b3cb
ZD
4627
4628 update_ssa (TODO_update_ssa_only_virtuals);
953ff289
DN
4629}
4630
953ff289
DN
4631
4632/* Expand code for an OpenMP sections directive. In pseudo code, we generate
4633
953ff289
DN
4634 v = GOMP_sections_start (n);
4635 L0:
4636 switch (v)
4637 {
4638 case 0:
4639 goto L2;
4640 case 1:
4641 section 1;
4642 goto L1;
4643 case 2:
4644 ...
4645 case n:
4646 ...
953ff289
DN
4647 default:
4648 abort ();
4649 }
4650 L1:
4651 v = GOMP_sections_next ();
4652 goto L0;
4653 L2:
4654 reduction;
4655
50674e96 4656 If this is a combined parallel sections, replace the call to
917948d3 4657 GOMP_sections_start with call to GOMP_sections_next. */
953ff289
DN
4658
4659static void
50674e96 4660expand_omp_sections (struct omp_region *region)
953ff289 4661{
0f900dfa 4662 tree t, u, vin = NULL, vmain, vnext, l2;
726a989a
RB
4663 VEC (tree,heap) *label_vec;
4664 unsigned len;
e5c95afe 4665 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
726a989a
RB
4666 gimple_stmt_iterator si, switch_si;
4667 gimple sections_stmt, stmt, cont;
c34938a8
JJ
4668 edge_iterator ei;
4669 edge e;
777f7f9a 4670 struct omp_region *inner;
726a989a 4671 unsigned i, casei;
e5c95afe 4672 bool exit_reachable = region->cont != NULL;
953ff289 4673
e5c95afe 4674 gcc_assert (exit_reachable == (region->exit != NULL));
777f7f9a 4675 entry_bb = region->entry;
e5c95afe 4676 l0_bb = single_succ (entry_bb);
777f7f9a 4677 l1_bb = region->cont;
e5c95afe
ZD
4678 l2_bb = region->exit;
4679 if (exit_reachable)
d3c673c7 4680 {
057dd91d 4681 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
726a989a 4682 l2 = gimple_block_label (l2_bb);
c34938a8
JJ
4683 else
4684 {
4685 /* This can happen if there are reductions. */
4686 len = EDGE_COUNT (l0_bb->succs);
4687 gcc_assert (len > 0);
4688 e = EDGE_SUCC (l0_bb, len - 1);
726a989a 4689 si = gsi_last_bb (e->dest);
3ac4c44a 4690 l2 = NULL_TREE;
726a989a
RB
4691 if (gsi_end_p (si)
4692 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4693 l2 = gimple_block_label (e->dest);
c34938a8
JJ
4694 else
4695 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4696 {
726a989a
RB
4697 si = gsi_last_bb (e->dest);
4698 if (gsi_end_p (si)
4699 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
c34938a8 4700 {
726a989a 4701 l2 = gimple_block_label (e->dest);
c34938a8
JJ
4702 break;
4703 }
4704 }
4705 }
d3c673c7 4706 default_bb = create_empty_bb (l1_bb->prev_bb);
d3c673c7
JJ
4707 }
4708 else
4709 {
e5c95afe 4710 default_bb = create_empty_bb (l0_bb);
726a989a 4711 l2 = gimple_block_label (default_bb);
d3c673c7 4712 }
50674e96
DN
4713
4714 /* We will build a switch() with enough cases for all the
726a989a 4715 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
50674e96 4716 and a default case to abort if something goes wrong. */
e5c95afe 4717 len = EDGE_COUNT (l0_bb->succs);
726a989a
RB
4718
4719 /* Use VEC_quick_push on label_vec throughout, since we know the size
4720 in advance. */
4721 label_vec = VEC_alloc (tree, heap, len);
953ff289 4722
777f7f9a 4723 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
726a989a
RB
4724 GIMPLE_OMP_SECTIONS statement. */
4725 si = gsi_last_bb (entry_bb);
4726 sections_stmt = gsi_stmt (si);
4727 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4728 vin = gimple_omp_sections_control (sections_stmt);
50674e96 4729 if (!is_combined_parallel (region))
953ff289 4730 {
50674e96
DN
4731 /* If we are not inside a combined parallel+sections region,
4732 call GOMP_sections_start. */
e5c95afe
ZD
4733 t = build_int_cst (unsigned_type_node,
4734 exit_reachable ? len - 1 : len);
953ff289 4735 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
726a989a 4736 stmt = gimple_build_call (u, 1, t);
953ff289 4737 }
917948d3
ZD
4738 else
4739 {
4740 /* Otherwise, call GOMP_sections_next. */
4741 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
726a989a 4742 stmt = gimple_build_call (u, 0);
917948d3 4743 }
726a989a
RB
4744 gimple_call_set_lhs (stmt, vin);
4745 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4746 gsi_remove (&si, true);
4747
4748 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4749 L0_BB. */
4750 switch_si = gsi_last_bb (l0_bb);
4751 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
917948d3
ZD
4752 if (exit_reachable)
4753 {
4754 cont = last_stmt (l1_bb);
726a989a
RB
4755 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4756 vmain = gimple_omp_continue_control_use (cont);
4757 vnext = gimple_omp_continue_control_def (cont);
917948d3
ZD
4758 }
4759 else
4760 {
4761 vmain = vin;
4762 vnext = NULL_TREE;
4763 }
953ff289 4764
e5c95afe
ZD
4765 i = 0;
4766 if (exit_reachable)
4767 {
4768 t = build3 (CASE_LABEL_EXPR, void_type_node,
4769 build_int_cst (unsigned_type_node, 0), NULL, l2);
726a989a 4770 VEC_quick_push (tree, label_vec, t);
e5c95afe
ZD
4771 i++;
4772 }
d3c673c7 4773
726a989a 4774 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
e5c95afe
ZD
4775 for (inner = region->inner, casei = 1;
4776 inner;
4777 inner = inner->next, i++, casei++)
953ff289 4778 {
50674e96
DN
4779 basic_block s_entry_bb, s_exit_bb;
4780
c34938a8 4781 /* Skip optional reduction region. */
726a989a 4782 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
c34938a8
JJ
4783 {
4784 --i;
4785 --casei;
4786 continue;
4787 }
4788
777f7f9a
RH
4789 s_entry_bb = inner->entry;
4790 s_exit_bb = inner->exit;
953ff289 4791
726a989a 4792 t = gimple_block_label (s_entry_bb);
e5c95afe 4793 u = build_int_cst (unsigned_type_node, casei);
953ff289 4794 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
726a989a 4795 VEC_quick_push (tree, label_vec, u);
777f7f9a 4796
726a989a
RB
4797 si = gsi_last_bb (s_entry_bb);
4798 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4799 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4800 gsi_remove (&si, true);
777f7f9a 4801 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
d3c673c7
JJ
4802
4803 if (s_exit_bb == NULL)
4804 continue;
4805
726a989a
RB
4806 si = gsi_last_bb (s_exit_bb);
4807 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4808 gsi_remove (&si, true);
d3c673c7 4809
50674e96 4810 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
953ff289
DN
4811 }
4812
50674e96 4813 /* Error handling code goes in DEFAULT_BB. */
726a989a 4814 t = gimple_block_label (default_bb);
953ff289 4815 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
777f7f9a 4816 make_edge (l0_bb, default_bb, 0);
953ff289 4817
726a989a
RB
4818 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4819 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4820 gsi_remove (&switch_si, true);
4821 VEC_free (tree, heap, label_vec);
4822
4823 si = gsi_start_bb (default_bb);
4824 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4825 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
50674e96 4826
e5c95afe 4827 if (exit_reachable)
d3c673c7 4828 {
e5c95afe 4829 /* Code to get the next section goes in L1_BB. */
726a989a
RB
4830 si = gsi_last_bb (l1_bb);
4831 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
953ff289 4832
726a989a
RB
4833 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4834 gimple_call_set_lhs (stmt, vnext);
4835 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4836 gsi_remove (&si, true);
50674e96 4837
e5c95afe
ZD
4838 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4839
726a989a
RB
4840 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4841 si = gsi_last_bb (l2_bb);
4842 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
d3c673c7
JJ
4843 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4844 else
4845 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
726a989a
RB
4846 stmt = gimple_build_call (t, 0);
4847 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4848 gsi_remove (&si, true);
d3c673c7 4849 }
50674e96 4850
917948d3 4851 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
50674e96 4852}
953ff289 4853
953ff289 4854
777f7f9a
RH
4855/* Expand code for an OpenMP single directive. We've already expanded
4856 much of the code, here we simply place the GOMP_barrier call. */
4857
4858static void
4859expand_omp_single (struct omp_region *region)
4860{
4861 basic_block entry_bb, exit_bb;
726a989a 4862 gimple_stmt_iterator si;
777f7f9a
RH
4863 bool need_barrier = false;
4864
4865 entry_bb = region->entry;
4866 exit_bb = region->exit;
4867
726a989a 4868 si = gsi_last_bb (entry_bb);
777f7f9a
RH
4869 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4870 be removed. We need to ensure that the thread that entered the single
4871 does not exit before the data is copied out by the other threads. */
726a989a 4872 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
777f7f9a
RH
4873 OMP_CLAUSE_COPYPRIVATE))
4874 need_barrier = true;
726a989a
RB
4875 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4876 gsi_remove (&si, true);
777f7f9a
RH
4877 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4878
726a989a
RB
4879 si = gsi_last_bb (exit_bb);
4880 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4881 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4882 false, GSI_SAME_STMT);
4883 gsi_remove (&si, true);
777f7f9a
RH
4884 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4885}
4886
4887
4888/* Generic expansion for OpenMP synchronization directives: master,
4889 ordered and critical. All we need to do here is remove the entry
4890 and exit markers for REGION. */
50674e96
DN
4891
4892static void
4893expand_omp_synch (struct omp_region *region)
4894{
4895 basic_block entry_bb, exit_bb;
726a989a 4896 gimple_stmt_iterator si;
50674e96 4897
777f7f9a
RH
4898 entry_bb = region->entry;
4899 exit_bb = region->exit;
50674e96 4900
726a989a
RB
4901 si = gsi_last_bb (entry_bb);
4902 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4903 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4904 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4905 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4906 gsi_remove (&si, true);
50674e96
DN
4907 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4908
d3c673c7
JJ
4909 if (exit_bb)
4910 {
726a989a
RB
4911 si = gsi_last_bb (exit_bb);
4912 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4913 gsi_remove (&si, true);
d3c673c7
JJ
4914 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4915 }
50674e96 4916}
953ff289 4917
a509ebb5
RL
4918/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4919 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4920 size of the data type, and thus usable to find the index of the builtin
4921 decl. Returns false if the expression is not of the proper form. */
4922
4923static bool
4924expand_omp_atomic_fetch_op (basic_block load_bb,
4925 tree addr, tree loaded_val,
4926 tree stored_val, int index)
4927{
4928 enum built_in_function base;
4929 tree decl, itype, call;
f9621cc4 4930 direct_optab optab;
a509ebb5
RL
4931 tree rhs;
4932 basic_block store_bb = single_succ (load_bb);
726a989a
RB
4933 gimple_stmt_iterator gsi;
4934 gimple stmt;
db3927fb 4935 location_t loc;
a509ebb5
RL
4936
4937 /* We expect to find the following sequences:
b8698a0f 4938
a509ebb5 4939 load_bb:
726a989a 4940 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
a509ebb5
RL
4941
4942 store_bb:
4943 val = tmp OP something; (or: something OP tmp)
b8698a0f 4944 GIMPLE_OMP_STORE (val)
a509ebb5 4945
b8698a0f 4946 ???FIXME: Allow a more flexible sequence.
a509ebb5 4947 Perhaps use data flow to pick the statements.
b8698a0f 4948
a509ebb5
RL
4949 */
4950
726a989a
RB
4951 gsi = gsi_after_labels (store_bb);
4952 stmt = gsi_stmt (gsi);
db3927fb 4953 loc = gimple_location (stmt);
726a989a 4954 if (!is_gimple_assign (stmt))
a509ebb5 4955 return false;
726a989a
RB
4956 gsi_next (&gsi);
4957 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
a509ebb5
RL
4958 return false;
4959
726a989a 4960 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
a509ebb5
RL
4961 return false;
4962
a509ebb5 4963 /* Check for one of the supported fetch-op operations. */
726a989a 4964 switch (gimple_assign_rhs_code (stmt))
a509ebb5
RL
4965 {
4966 case PLUS_EXPR:
4967 case POINTER_PLUS_EXPR:
4968 base = BUILT_IN_FETCH_AND_ADD_N;
4969 optab = sync_add_optab;
4970 break;
4971 case MINUS_EXPR:
4972 base = BUILT_IN_FETCH_AND_SUB_N;
4973 optab = sync_add_optab;
4974 break;
4975 case BIT_AND_EXPR:
4976 base = BUILT_IN_FETCH_AND_AND_N;
4977 optab = sync_and_optab;
4978 break;
4979 case BIT_IOR_EXPR:
4980 base = BUILT_IN_FETCH_AND_OR_N;
4981 optab = sync_ior_optab;
4982 break;
4983 case BIT_XOR_EXPR:
4984 base = BUILT_IN_FETCH_AND_XOR_N;
4985 optab = sync_xor_optab;
4986 break;
4987 default:
4988 return false;
4989 }
4990 /* Make sure the expression is of the proper form. */
726a989a
RB
4991 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4992 rhs = gimple_assign_rhs2 (stmt);
4993 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4994 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4995 rhs = gimple_assign_rhs1 (stmt);
a509ebb5
RL
4996 else
4997 return false;
4998
4999 decl = built_in_decls[base + index + 1];
5000 itype = TREE_TYPE (TREE_TYPE (decl));
5001
f9621cc4 5002 if (direct_optab_handler (optab, TYPE_MODE (itype)) == CODE_FOR_nothing)
a509ebb5
RL
5003 return false;
5004
726a989a
RB
5005 gsi = gsi_last_bb (load_bb);
5006 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
db3927fb
AH
5007 call = build_call_expr_loc (loc,
5008 decl, 2, addr,
5009 fold_convert_loc (loc, itype, rhs));
5010 call = fold_convert_loc (loc, void_type_node, call);
726a989a
RB
5011 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5012 gsi_remove (&gsi, true);
a509ebb5 5013
726a989a
RB
5014 gsi = gsi_last_bb (store_bb);
5015 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5016 gsi_remove (&gsi, true);
5017 gsi = gsi_last_bb (store_bb);
5018 gsi_remove (&gsi, true);
a509ebb5
RL
5019
5020 if (gimple_in_ssa_p (cfun))
5021 update_ssa (TODO_update_ssa_no_phi);
5022
5023 return true;
5024}
5025
5026/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5027
5028 oldval = *addr;
5029 repeat:
5030 newval = rhs; // with oldval replacing *addr in rhs
5031 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5032 if (oldval != newval)
5033 goto repeat;
5034
5035 INDEX is log2 of the size of the data type, and thus usable to find the
5036 index of the builtin decl. */
5037
5038static bool
5039expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5040 tree addr, tree loaded_val, tree stored_val,
5041 int index)
5042{
c18c98c0 5043 tree loadedi, storedi, initial, new_storedi, old_vali;
a509ebb5 5044 tree type, itype, cmpxchg, iaddr;
726a989a 5045 gimple_stmt_iterator si;
a509ebb5 5046 basic_block loop_header = single_succ (load_bb);
726a989a 5047 gimple phi, stmt;
a509ebb5
RL
5048 edge e;
5049
5050 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
5051 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5052 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5053
f9621cc4
RS
5054 if (direct_optab_handler (sync_compare_and_swap_optab, TYPE_MODE (itype))
5055 == CODE_FOR_nothing)
a509ebb5
RL
5056 return false;
5057
726a989a
RB
5058 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5059 si = gsi_last_bb (load_bb);
5060 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5061
c18c98c0
JJ
5062 /* For floating-point values, we'll need to view-convert them to integers
5063 so that we can perform the atomic compare and swap. Simplify the
5064 following code by always setting up the "i"ntegral variables. */
5065 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5066 {
726a989a
RB
5067 tree iaddr_val;
5068
5b21f0f3
RG
5069 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5070 true), NULL);
726a989a
RB
5071 iaddr_val
5072 = force_gimple_operand_gsi (&si,
5073 fold_convert (TREE_TYPE (iaddr), addr),
5074 false, NULL_TREE, true, GSI_SAME_STMT);
5075 stmt = gimple_build_assign (iaddr, iaddr_val);
5076 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
c18c98c0
JJ
5077 loadedi = create_tmp_var (itype, NULL);
5078 if (gimple_in_ssa_p (cfun))
5079 {
5080 add_referenced_var (iaddr);
5081 add_referenced_var (loadedi);
5082 loadedi = make_ssa_name (loadedi, NULL);
5083 }
5084 }
5085 else
5086 {
5087 iaddr = addr;
5088 loadedi = loaded_val;
5089 }
726a989a 5090
70f34814
RG
5091 initial
5092 = force_gimple_operand_gsi (&si,
5093 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5094 iaddr,
5095 build_int_cst (TREE_TYPE (iaddr), 0)),
5096 true, NULL_TREE, true, GSI_SAME_STMT);
c18c98c0
JJ
5097
5098 /* Move the value to the LOADEDI temporary. */
a509ebb5
RL
5099 if (gimple_in_ssa_p (cfun))
5100 {
726a989a 5101 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
c18c98c0
JJ
5102 phi = create_phi_node (loadedi, loop_header);
5103 SSA_NAME_DEF_STMT (loadedi) = phi;
a509ebb5
RL
5104 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5105 initial);
5106 }
5107 else
726a989a
RB
5108 gsi_insert_before (&si,
5109 gimple_build_assign (loadedi, initial),
5110 GSI_SAME_STMT);
c18c98c0
JJ
5111 if (loadedi != loaded_val)
5112 {
726a989a
RB
5113 gimple_stmt_iterator gsi2;
5114 tree x;
c18c98c0
JJ
5115
5116 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
726a989a 5117 gsi2 = gsi_start_bb (loop_header);
c18c98c0
JJ
5118 if (gimple_in_ssa_p (cfun))
5119 {
726a989a
RB
5120 gimple stmt;
5121 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5122 true, GSI_SAME_STMT);
5123 stmt = gimple_build_assign (loaded_val, x);
5124 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
c18c98c0
JJ
5125 }
5126 else
5127 {
726a989a
RB
5128 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5129 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5130 true, GSI_SAME_STMT);
c18c98c0
JJ
5131 }
5132 }
726a989a 5133 gsi_remove (&si, true);
a509ebb5 5134
726a989a
RB
5135 si = gsi_last_bb (store_bb);
5136 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
a509ebb5 5137
c18c98c0
JJ
5138 if (iaddr == addr)
5139 storedi = stored_val;
a509ebb5 5140 else
c18c98c0 5141 storedi =
726a989a 5142 force_gimple_operand_gsi (&si,
c18c98c0
JJ
5143 build1 (VIEW_CONVERT_EXPR, itype,
5144 stored_val), true, NULL_TREE, true,
726a989a 5145 GSI_SAME_STMT);
a509ebb5
RL
5146
5147 /* Build the compare&swap statement. */
5148 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
726a989a 5149 new_storedi = force_gimple_operand_gsi (&si,
587aa063
RG
5150 fold_convert (TREE_TYPE (loadedi),
5151 new_storedi),
a509ebb5 5152 true, NULL_TREE,
726a989a 5153 true, GSI_SAME_STMT);
a509ebb5
RL
5154
5155 if (gimple_in_ssa_p (cfun))
5156 old_vali = loadedi;
5157 else
5158 {
587aa063 5159 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
c18c98c0
JJ
5160 if (gimple_in_ssa_p (cfun))
5161 add_referenced_var (old_vali);
726a989a
RB
5162 stmt = gimple_build_assign (old_vali, loadedi);
5163 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
a509ebb5 5164
726a989a
RB
5165 stmt = gimple_build_assign (loadedi, new_storedi);
5166 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
a509ebb5
RL
5167 }
5168
5169 /* Note that we always perform the comparison as an integer, even for
b8698a0f 5170 floating point. This allows the atomic operation to properly
a509ebb5 5171 succeed even with NaNs and -0.0. */
726a989a
RB
5172 stmt = gimple_build_cond_empty
5173 (build2 (NE_EXPR, boolean_type_node,
5174 new_storedi, old_vali));
5175 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
a509ebb5
RL
5176
5177 /* Update cfg. */
5178 e = single_succ_edge (store_bb);
5179 e->flags &= ~EDGE_FALLTHRU;
5180 e->flags |= EDGE_FALSE_VALUE;
5181
5182 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5183
c18c98c0 5184 /* Copy the new value to loadedi (we already did that before the condition
a509ebb5
RL
5185 if we are not in SSA). */
5186 if (gimple_in_ssa_p (cfun))
5187 {
726a989a 5188 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
c18c98c0 5189 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
a509ebb5
RL
5190 }
5191
726a989a
RB
5192 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5193 gsi_remove (&si, true);
a509ebb5
RL
5194
5195 if (gimple_in_ssa_p (cfun))
5196 update_ssa (TODO_update_ssa_no_phi);
5197
5198 return true;
5199}
5200
5201/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5202
5203 GOMP_atomic_start ();
5204 *addr = rhs;
5205 GOMP_atomic_end ();
5206
5207 The result is not globally atomic, but works so long as all parallel
5208 references are within #pragma omp atomic directives. According to
5209 responses received from omp@openmp.org, appears to be within spec.
5210 Which makes sense, since that's how several other compilers handle
b8698a0f 5211 this situation as well.
726a989a
RB
5212 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5213 expanding. STORED_VAL is the operand of the matching
5214 GIMPLE_OMP_ATOMIC_STORE.
a509ebb5 5215
b8698a0f
L
5216 We replace
5217 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
a509ebb5
RL
5218 loaded_val = *addr;
5219
5220 and replace
726a989a 5221 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
b8698a0f 5222 *addr = stored_val;
a509ebb5
RL
5223*/
5224
5225static bool
5226expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5227 tree addr, tree loaded_val, tree stored_val)
5228{
726a989a
RB
5229 gimple_stmt_iterator si;
5230 gimple stmt;
a509ebb5
RL
5231 tree t;
5232
726a989a
RB
5233 si = gsi_last_bb (load_bb);
5234 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
a509ebb5
RL
5235
5236 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
3bb06db4 5237 t = build_call_expr (t, 0);
726a989a 5238 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
a509ebb5 5239
70f34814 5240 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
726a989a
RB
5241 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5242 gsi_remove (&si, true);
a509ebb5 5243
726a989a
RB
5244 si = gsi_last_bb (store_bb);
5245 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
a509ebb5 5246
70f34814
RG
5247 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5248 stored_val);
726a989a 5249 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
a509ebb5
RL
5250
5251 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
3bb06db4 5252 t = build_call_expr (t, 0);
726a989a
RB
5253 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5254 gsi_remove (&si, true);
a509ebb5
RL
5255
5256 if (gimple_in_ssa_p (cfun))
5257 update_ssa (TODO_update_ssa_no_phi);
5258 return true;
5259}
5260
b8698a0f
L
5261/* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5262 using expand_omp_atomic_fetch_op. If it failed, we try to
a509ebb5
RL
5263 call expand_omp_atomic_pipeline, and if it fails too, the
5264 ultimate fallback is wrapping the operation in a mutex
b8698a0f
L
5265 (expand_omp_atomic_mutex). REGION is the atomic region built
5266 by build_omp_regions_1(). */
a509ebb5
RL
5267
5268static void
5269expand_omp_atomic (struct omp_region *region)
5270{
5271 basic_block load_bb = region->entry, store_bb = region->exit;
726a989a
RB
5272 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5273 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5274 tree addr = gimple_omp_atomic_load_rhs (load);
5275 tree stored_val = gimple_omp_atomic_store_val (store);
a509ebb5
RL
5276 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5277 HOST_WIDE_INT index;
5278
5279 /* Make sure the type is one of the supported sizes. */
5280 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5281 index = exact_log2 (index);
5282 if (index >= 0 && index <= 4)
5283 {
5284 unsigned int align = TYPE_ALIGN_UNIT (type);
5285
5286 /* __sync builtins require strict data alignment. */
5287 if (exact_log2 (align) >= index)
5288 {
5289 /* When possible, use specialized atomic update functions. */
5290 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5291 && store_bb == single_succ (load_bb))
5292 {
5293 if (expand_omp_atomic_fetch_op (load_bb, addr,
5294 loaded_val, stored_val, index))
5295 return;
5296 }
5297
5298 /* If we don't have specialized __sync builtins, try and implement
5299 as a compare and swap loop. */
5300 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5301 loaded_val, stored_val, index))
5302 return;
5303 }
5304 }
5305
5306 /* The ultimate fallback is wrapping the operation in a mutex. */
5307 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5308}
5309
953ff289 5310
50674e96
DN
5311/* Expand the parallel region tree rooted at REGION. Expansion
5312 proceeds in depth-first order. Innermost regions are expanded
5313 first. This way, parallel regions that require a new function to
726a989a 5314 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
50674e96
DN
5315 internal dependencies in their body. */
5316
5317static void
5318expand_omp (struct omp_region *region)
5319{
5320 while (region)
5321 {
b357f682
JJ
5322 location_t saved_location;
5323
068e1875
ZD
5324 /* First, determine whether this is a combined parallel+workshare
5325 region. */
726a989a 5326 if (region->type == GIMPLE_OMP_PARALLEL)
068e1875
ZD
5327 determine_parallel_type (region);
5328
50674e96
DN
5329 if (region->inner)
5330 expand_omp (region->inner);
5331
b357f682 5332 saved_location = input_location;
726a989a
RB
5333 if (gimple_has_location (last_stmt (region->entry)))
5334 input_location = gimple_location (last_stmt (region->entry));
b357f682 5335
777f7f9a 5336 switch (region->type)
50674e96 5337 {
726a989a
RB
5338 case GIMPLE_OMP_PARALLEL:
5339 case GIMPLE_OMP_TASK:
a68ab351
JJ
5340 expand_omp_taskreg (region);
5341 break;
5342
726a989a 5343 case GIMPLE_OMP_FOR:
777f7f9a
RH
5344 expand_omp_for (region);
5345 break;
50674e96 5346
726a989a 5347 case GIMPLE_OMP_SECTIONS:
777f7f9a
RH
5348 expand_omp_sections (region);
5349 break;
50674e96 5350
726a989a 5351 case GIMPLE_OMP_SECTION:
777f7f9a 5352 /* Individual omp sections are handled together with their
726a989a 5353 parent GIMPLE_OMP_SECTIONS region. */
777f7f9a 5354 break;
50674e96 5355
726a989a 5356 case GIMPLE_OMP_SINGLE:
777f7f9a
RH
5357 expand_omp_single (region);
5358 break;
50674e96 5359
726a989a
RB
5360 case GIMPLE_OMP_MASTER:
5361 case GIMPLE_OMP_ORDERED:
5362 case GIMPLE_OMP_CRITICAL:
777f7f9a
RH
5363 expand_omp_synch (region);
5364 break;
50674e96 5365
726a989a 5366 case GIMPLE_OMP_ATOMIC_LOAD:
a509ebb5
RL
5367 expand_omp_atomic (region);
5368 break;
5369
777f7f9a
RH
5370 default:
5371 gcc_unreachable ();
5372 }
8d9c1aec 5373
b357f682 5374 input_location = saved_location;
50674e96
DN
5375 region = region->next;
5376 }
5377}
5378
5379
5380/* Helper for build_omp_regions. Scan the dominator tree starting at
5f40b3cb
ZD
5381 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5382 true, the function ends once a single tree is built (otherwise, whole
5383 forest of OMP constructs may be built). */
50674e96
DN
5384
5385static void
5f40b3cb
ZD
5386build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5387 bool single_tree)
50674e96 5388{
726a989a
RB
5389 gimple_stmt_iterator gsi;
5390 gimple stmt;
50674e96
DN
5391 basic_block son;
5392
726a989a
RB
5393 gsi = gsi_last_bb (bb);
5394 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
50674e96
DN
5395 {
5396 struct omp_region *region;
726a989a 5397 enum gimple_code code;
50674e96 5398
726a989a
RB
5399 stmt = gsi_stmt (gsi);
5400 code = gimple_code (stmt);
5401 if (code == GIMPLE_OMP_RETURN)
50674e96
DN
5402 {
5403 /* STMT is the return point out of region PARENT. Mark it
5404 as the exit point and make PARENT the immediately
5405 enclosing region. */
5406 gcc_assert (parent);
5407 region = parent;
777f7f9a 5408 region->exit = bb;
50674e96 5409 parent = parent->outer;
50674e96 5410 }
726a989a 5411 else if (code == GIMPLE_OMP_ATOMIC_STORE)
a509ebb5 5412 {
726a989a
RB
5413 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5414 GIMPLE_OMP_RETURN, but matches with
5415 GIMPLE_OMP_ATOMIC_LOAD. */
a509ebb5 5416 gcc_assert (parent);
726a989a 5417 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
a509ebb5
RL
5418 region = parent;
5419 region->exit = bb;
5420 parent = parent->outer;
5421 }
5422
726a989a 5423 else if (code == GIMPLE_OMP_CONTINUE)
777f7f9a
RH
5424 {
5425 gcc_assert (parent);
5426 parent->cont = bb;
5427 }
726a989a 5428 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
e5c95afe 5429 {
726a989a
RB
5430 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5431 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5432 ;
e5c95afe 5433 }
50674e96
DN
5434 else
5435 {
5436 /* Otherwise, this directive becomes the parent for a new
5437 region. */
777f7f9a 5438 region = new_omp_region (bb, code, parent);
50674e96
DN
5439 parent = region;
5440 }
50674e96
DN
5441 }
5442
5f40b3cb
ZD
5443 if (single_tree && !parent)
5444 return;
5445
50674e96
DN
5446 for (son = first_dom_son (CDI_DOMINATORS, bb);
5447 son;
5448 son = next_dom_son (CDI_DOMINATORS, son))
5f40b3cb
ZD
5449 build_omp_regions_1 (son, parent, single_tree);
5450}
5451
5452/* Builds the tree of OMP regions rooted at ROOT, storing it to
5453 root_omp_region. */
5454
5455static void
5456build_omp_regions_root (basic_block root)
5457{
5458 gcc_assert (root_omp_region == NULL);
5459 build_omp_regions_1 (root, NULL, true);
5460 gcc_assert (root_omp_region != NULL);
50674e96
DN
5461}
5462
5f40b3cb
ZD
5463/* Expands omp construct (and its subconstructs) starting in HEAD. */
5464
5465void
5466omp_expand_local (basic_block head)
5467{
5468 build_omp_regions_root (head);
5469 if (dump_file && (dump_flags & TDF_DETAILS))
5470 {
5471 fprintf (dump_file, "\nOMP region tree\n\n");
5472 dump_omp_region (dump_file, root_omp_region, 0);
5473 fprintf (dump_file, "\n");
5474 }
5475
5476 remove_exit_barriers (root_omp_region);
5477 expand_omp (root_omp_region);
5478
5479 free_omp_regions ();
5480}
50674e96
DN
5481
5482/* Scan the CFG and build a tree of OMP regions. Return the root of
5483 the OMP region tree. */
5484
5485static void
5486build_omp_regions (void)
5487{
777f7f9a 5488 gcc_assert (root_omp_region == NULL);
50674e96 5489 calculate_dominance_info (CDI_DOMINATORS);
5f40b3cb 5490 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
50674e96
DN
5491}
5492
50674e96
DN
5493/* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5494
c2924966 5495static unsigned int
50674e96
DN
5496execute_expand_omp (void)
5497{
5498 build_omp_regions ();
5499
777f7f9a
RH
5500 if (!root_omp_region)
5501 return 0;
50674e96 5502
777f7f9a
RH
5503 if (dump_file)
5504 {
5505 fprintf (dump_file, "\nOMP region tree\n\n");
5506 dump_omp_region (dump_file, root_omp_region, 0);
5507 fprintf (dump_file, "\n");
50674e96 5508 }
777f7f9a
RH
5509
5510 remove_exit_barriers (root_omp_region);
5511
5512 expand_omp (root_omp_region);
5513
777f7f9a
RH
5514 cleanup_tree_cfg ();
5515
5516 free_omp_regions ();
5517
c2924966 5518 return 0;
50674e96
DN
5519}
5520
917948d3
ZD
5521/* OMP expansion -- the default pass, run before creation of SSA form. */
5522
50674e96
DN
5523static bool
5524gate_expand_omp (void)
5525{
1da2ed5f 5526 return (flag_openmp != 0 && !seen_error ());
50674e96
DN
5527}
5528
b8698a0f 5529struct gimple_opt_pass pass_expand_omp =
50674e96 5530{
8ddbbcae
JH
5531 {
5532 GIMPLE_PASS,
50674e96
DN
5533 "ompexp", /* name */
5534 gate_expand_omp, /* gate */
5535 execute_expand_omp, /* execute */
5536 NULL, /* sub */
5537 NULL, /* next */
5538 0, /* static_pass_number */
7072a650 5539 TV_NONE, /* tv_id */
50674e96 5540 PROP_gimple_any, /* properties_required */
535b544a 5541 0, /* properties_provided */
50674e96
DN
5542 0, /* properties_destroyed */
5543 0, /* todo_flags_start */
8ddbbcae
JH
5544 TODO_dump_func /* todo_flags_finish */
5545 }
50674e96
DN
5546};
5547\f
5548/* Routines to lower OpenMP directives into OMP-GIMPLE. */
5549
726a989a
RB
5550/* Lower the OpenMP sections directive in the current statement in GSI_P.
5551 CTX is the enclosing OMP context for the current statement. */
50674e96
DN
5552
5553static void
726a989a 5554lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
50674e96 5555{
726a989a
RB
5556 tree block, control;
5557 gimple_stmt_iterator tgsi;
50674e96 5558 unsigned i, len;
726a989a
RB
5559 gimple stmt, new_stmt, bind, t;
5560 gimple_seq ilist, dlist, olist, new_body, body;
d406b663 5561 struct gimplify_ctx gctx;
50674e96 5562
726a989a 5563 stmt = gsi_stmt (*gsi_p);
50674e96 5564
d406b663 5565 push_gimplify_context (&gctx);
50674e96
DN
5566
5567 dlist = NULL;
5568 ilist = NULL;
726a989a
RB
5569 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5570 &ilist, &dlist, ctx);
50674e96 5571
726a989a
RB
5572 tgsi = gsi_start (gimple_omp_body (stmt));
5573 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
50674e96
DN
5574 continue;
5575
726a989a
RB
5576 tgsi = gsi_start (gimple_omp_body (stmt));
5577 body = NULL;
5578 for (i = 0; i < len; i++, gsi_next (&tgsi))
50674e96
DN
5579 {
5580 omp_context *sctx;
726a989a 5581 gimple sec_start;
50674e96 5582
726a989a 5583 sec_start = gsi_stmt (tgsi);
50674e96
DN
5584 sctx = maybe_lookup_ctx (sec_start);
5585 gcc_assert (sctx);
5586
726a989a 5587 gimple_seq_add_stmt (&body, sec_start);
777f7f9a 5588
726a989a
RB
5589 lower_omp (gimple_omp_body (sec_start), sctx);
5590 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5591 gimple_omp_set_body (sec_start, NULL);
50674e96
DN
5592
5593 if (i == len - 1)
5594 {
726a989a
RB
5595 gimple_seq l = NULL;
5596 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
50674e96 5597 &l, ctx);
726a989a
RB
5598 gimple_seq_add_seq (&body, l);
5599 gimple_omp_section_set_last (sec_start);
50674e96 5600 }
b8698a0f 5601
726a989a 5602 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
50674e96 5603 }
953ff289
DN
5604
5605 block = make_node (BLOCK);
726a989a 5606 bind = gimple_build_bind (NULL, body, block);
953ff289 5607
726a989a
RB
5608 olist = NULL;
5609 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
50674e96 5610
b357f682 5611 block = make_node (BLOCK);
726a989a 5612 new_stmt = gimple_build_bind (NULL, NULL, block);
50674e96 5613
b357f682 5614 pop_gimplify_context (new_stmt);
726a989a
RB
5615 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5616 BLOCK_VARS (block) = gimple_bind_vars (bind);
b357f682
JJ
5617 if (BLOCK_VARS (block))
5618 TREE_USED (block) = 1;
5619
726a989a
RB
5620 new_body = NULL;
5621 gimple_seq_add_seq (&new_body, ilist);
5622 gimple_seq_add_stmt (&new_body, stmt);
5623 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5624 gimple_seq_add_stmt (&new_body, bind);
777f7f9a 5625
e5c95afe 5626 control = create_tmp_var (unsigned_type_node, ".section");
726a989a
RB
5627 t = gimple_build_omp_continue (control, control);
5628 gimple_omp_sections_set_control (stmt, control);
5629 gimple_seq_add_stmt (&new_body, t);
777f7f9a 5630
726a989a
RB
5631 gimple_seq_add_seq (&new_body, olist);
5632 gimple_seq_add_seq (&new_body, dlist);
50674e96 5633
726a989a 5634 new_body = maybe_catch_exception (new_body);
4a31b7ee 5635
726a989a
RB
5636 t = gimple_build_omp_return
5637 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5638 OMP_CLAUSE_NOWAIT));
5639 gimple_seq_add_stmt (&new_body, t);
777f7f9a 5640
726a989a
RB
5641 gimple_bind_set_body (new_stmt, new_body);
5642 gimple_omp_set_body (stmt, NULL);
50674e96 5643
726a989a 5644 gsi_replace (gsi_p, new_stmt, true);
953ff289
DN
5645}
5646
5647
50674e96 5648/* A subroutine of lower_omp_single. Expand the simple form of
726a989a 5649 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
953ff289
DN
5650
5651 if (GOMP_single_start ())
5652 BODY;
5653 [ GOMP_barrier (); ] -> unless 'nowait' is present.
50674e96
DN
5654
5655 FIXME. It may be better to delay expanding the logic of this until
5656 pass_expand_omp. The expanded logic may make the job more difficult
5657 to a synchronization analysis pass. */
953ff289
DN
5658
5659static void
726a989a 5660lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
953ff289 5661{
c2255bc4
AH
5662 location_t loc = gimple_location (single_stmt);
5663 tree tlabel = create_artificial_label (loc);
5664 tree flabel = create_artificial_label (loc);
726a989a
RB
5665 gimple call, cond;
5666 tree lhs, decl;
5667
5668 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5669 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5670 call = gimple_build_call (decl, 0);
5671 gimple_call_set_lhs (call, lhs);
5672 gimple_seq_add_stmt (pre_p, call);
5673
5674 cond = gimple_build_cond (EQ_EXPR, lhs,
db3927fb
AH
5675 fold_convert_loc (loc, TREE_TYPE (lhs),
5676 boolean_true_node),
726a989a
RB
5677 tlabel, flabel);
5678 gimple_seq_add_stmt (pre_p, cond);
5679 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5680 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5681 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
953ff289
DN
5682}
5683
50674e96
DN
5684
5685/* A subroutine of lower_omp_single. Expand the simple form of
726a989a 5686 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
953ff289
DN
5687
5688 #pragma omp single copyprivate (a, b, c)
5689
5690 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5691
5692 {
5693 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5694 {
5695 BODY;
5696 copyout.a = a;
5697 copyout.b = b;
5698 copyout.c = c;
5699 GOMP_single_copy_end (&copyout);
5700 }
5701 else
5702 {
5703 a = copyout_p->a;
5704 b = copyout_p->b;
5705 c = copyout_p->c;
5706 }
5707 GOMP_barrier ();
5708 }
50674e96
DN
5709
5710 FIXME. It may be better to delay expanding the logic of this until
5711 pass_expand_omp. The expanded logic may make the job more difficult
5712 to a synchronization analysis pass. */
953ff289
DN
5713
5714static void
726a989a 5715lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
953ff289 5716{
726a989a
RB
5717 tree ptr_type, t, l0, l1, l2;
5718 gimple_seq copyin_seq;
c2255bc4 5719 location_t loc = gimple_location (single_stmt);
953ff289
DN
5720
5721 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5722
5723 ptr_type = build_pointer_type (ctx->record_type);
5724 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5725
c2255bc4
AH
5726 l0 = create_artificial_label (loc);
5727 l1 = create_artificial_label (loc);
5728 l2 = create_artificial_label (loc);
953ff289 5729
db3927fb
AH
5730 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5731 t = fold_convert_loc (loc, ptr_type, t);
726a989a 5732 gimplify_assign (ctx->receiver_decl, t, pre_p);
953ff289
DN
5733
5734 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5735 build_int_cst (ptr_type, 0));
5736 t = build3 (COND_EXPR, void_type_node, t,
5737 build_and_jump (&l0), build_and_jump (&l1));
5738 gimplify_and_add (t, pre_p);
5739
726a989a 5740 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
953ff289 5741
726a989a 5742 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
953ff289
DN
5743
5744 copyin_seq = NULL;
726a989a 5745 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
953ff289
DN
5746 &copyin_seq, ctx);
5747
db3927fb
AH
5748 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5749 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
5750 1, t);
953ff289
DN
5751 gimplify_and_add (t, pre_p);
5752
5753 t = build_and_jump (&l2);
5754 gimplify_and_add (t, pre_p);
5755
726a989a 5756 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
953ff289 5757
726a989a 5758 gimple_seq_add_seq (pre_p, copyin_seq);
953ff289 5759
726a989a 5760 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
953ff289
DN
5761}
5762
50674e96 5763
953ff289
DN
5764/* Expand code for an OpenMP single directive. */
5765
5766static void
726a989a 5767lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
953ff289 5768{
726a989a
RB
5769 tree block;
5770 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5771 gimple_seq bind_body, dlist;
d406b663 5772 struct gimplify_ctx gctx;
953ff289 5773
d406b663 5774 push_gimplify_context (&gctx);
953ff289 5775
726a989a
RB
5776 bind_body = NULL;
5777 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5778 &bind_body, &dlist, ctx);
5779 lower_omp (gimple_omp_body (single_stmt), ctx);
953ff289 5780
726a989a 5781 gimple_seq_add_stmt (&bind_body, single_stmt);
953ff289
DN
5782
5783 if (ctx->record_type)
726a989a 5784 lower_omp_single_copy (single_stmt, &bind_body, ctx);
953ff289 5785 else
726a989a
RB
5786 lower_omp_single_simple (single_stmt, &bind_body);
5787
5788 gimple_omp_set_body (single_stmt, NULL);
953ff289 5789
726a989a 5790 gimple_seq_add_seq (&bind_body, dlist);
777f7f9a 5791
726a989a 5792 bind_body = maybe_catch_exception (bind_body);
777f7f9a 5793
b8698a0f 5794 t = gimple_build_omp_return
726a989a
RB
5795 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5796 OMP_CLAUSE_NOWAIT));
5797 gimple_seq_add_stmt (&bind_body, t);
4a31b7ee 5798
726a989a
RB
5799 block = make_node (BLOCK);
5800 bind = gimple_build_bind (NULL, bind_body, block);
777f7f9a 5801
953ff289 5802 pop_gimplify_context (bind);
50674e96 5803
726a989a
RB
5804 gimple_bind_append_vars (bind, ctx->block_vars);
5805 BLOCK_VARS (block) = ctx->block_vars;
5806 gsi_replace (gsi_p, bind, true);
b357f682
JJ
5807 if (BLOCK_VARS (block))
5808 TREE_USED (block) = 1;
953ff289
DN
5809}
5810
50674e96 5811
953ff289
DN
5812/* Expand code for an OpenMP master directive. */
5813
5814static void
726a989a 5815lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
953ff289 5816{
726a989a
RB
5817 tree block, lab = NULL, x;
5818 gimple stmt = gsi_stmt (*gsi_p), bind;
db3927fb 5819 location_t loc = gimple_location (stmt);
726a989a 5820 gimple_seq tseq;
d406b663 5821 struct gimplify_ctx gctx;
953ff289 5822
d406b663 5823 push_gimplify_context (&gctx);
953ff289
DN
5824
5825 block = make_node (BLOCK);
726a989a
RB
5826 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5827 block);
777f7f9a 5828
db3927fb 5829 x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
953ff289
DN
5830 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5831 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
726a989a
RB
5832 tseq = NULL;
5833 gimplify_and_add (x, &tseq);
5834 gimple_bind_add_seq (bind, tseq);
953ff289 5835
726a989a
RB
5836 lower_omp (gimple_omp_body (stmt), ctx);
5837 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5838 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5839 gimple_omp_set_body (stmt, NULL);
953ff289 5840
726a989a 5841 gimple_bind_add_stmt (bind, gimple_build_label (lab));
777f7f9a 5842
726a989a 5843 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
777f7f9a 5844
953ff289 5845 pop_gimplify_context (bind);
50674e96 5846
726a989a
RB
5847 gimple_bind_append_vars (bind, ctx->block_vars);
5848 BLOCK_VARS (block) = ctx->block_vars;
5849 gsi_replace (gsi_p, bind, true);
953ff289
DN
5850}
5851
50674e96 5852
953ff289
DN
5853/* Expand code for an OpenMP ordered directive. */
5854
5855static void
726a989a 5856lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
953ff289 5857{
726a989a
RB
5858 tree block;
5859 gimple stmt = gsi_stmt (*gsi_p), bind, x;
d406b663 5860 struct gimplify_ctx gctx;
953ff289 5861
d406b663 5862 push_gimplify_context (&gctx);
953ff289
DN
5863
5864 block = make_node (BLOCK);
726a989a
RB
5865 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5866 block);
777f7f9a 5867
726a989a
RB
5868 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5869 gimple_bind_add_stmt (bind, x);
953ff289 5870
726a989a
RB
5871 lower_omp (gimple_omp_body (stmt), ctx);
5872 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5873 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5874 gimple_omp_set_body (stmt, NULL);
953ff289 5875
726a989a
RB
5876 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5877 gimple_bind_add_stmt (bind, x);
777f7f9a 5878
726a989a 5879 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
777f7f9a 5880
953ff289 5881 pop_gimplify_context (bind);
50674e96 5882
726a989a
RB
5883 gimple_bind_append_vars (bind, ctx->block_vars);
5884 BLOCK_VARS (block) = gimple_bind_vars (bind);
5885 gsi_replace (gsi_p, bind, true);
953ff289
DN
5886}
5887
953ff289 5888
726a989a 5889/* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
953ff289
DN
5890 substitution of a couple of function calls. But in the NAMED case,
5891 requires that languages coordinate a symbol name. It is therefore
5892 best put here in common code. */
5893
5894static GTY((param1_is (tree), param2_is (tree)))
5895 splay_tree critical_name_mutexes;
5896
5897static void
726a989a 5898lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
953ff289 5899{
726a989a
RB
5900 tree block;
5901 tree name, lock, unlock;
5902 gimple stmt = gsi_stmt (*gsi_p), bind;
db3927fb 5903 location_t loc = gimple_location (stmt);
726a989a 5904 gimple_seq tbody;
d406b663 5905 struct gimplify_ctx gctx;
953ff289 5906
726a989a 5907 name = gimple_omp_critical_name (stmt);
953ff289
DN
5908 if (name)
5909 {
5039610b 5910 tree decl;
953ff289
DN
5911 splay_tree_node n;
5912
5913 if (!critical_name_mutexes)
5914 critical_name_mutexes
a9429e29
LB
5915 = splay_tree_new_ggc (splay_tree_compare_pointers,
5916 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
5917 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
953ff289
DN
5918
5919 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5920 if (n == NULL)
5921 {
5922 char *new_str;
5923
5924 decl = create_tmp_var_raw (ptr_type_node, NULL);
5925
5926 new_str = ACONCAT ((".gomp_critical_user_",
5927 IDENTIFIER_POINTER (name), NULL));
5928 DECL_NAME (decl) = get_identifier (new_str);
5929 TREE_PUBLIC (decl) = 1;
5930 TREE_STATIC (decl) = 1;
5931 DECL_COMMON (decl) = 1;
5932 DECL_ARTIFICIAL (decl) = 1;
5933 DECL_IGNORED_P (decl) = 1;
8a4a83ed 5934 varpool_finalize_decl (decl);
953ff289
DN
5935
5936 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5937 (splay_tree_value) decl);
5938 }
5939 else
5940 decl = (tree) n->value;
5941
953ff289 5942 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
db3927fb 5943 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
953ff289 5944
953ff289 5945 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
db3927fb
AH
5946 unlock = build_call_expr_loc (loc, unlock, 1,
5947 build_fold_addr_expr_loc (loc, decl));
953ff289
DN
5948 }
5949 else
5950 {
5951 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
db3927fb 5952 lock = build_call_expr_loc (loc, lock, 0);
953ff289
DN
5953
5954 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
db3927fb 5955 unlock = build_call_expr_loc (loc, unlock, 0);
953ff289
DN
5956 }
5957
d406b663 5958 push_gimplify_context (&gctx);
953ff289
DN
5959
5960 block = make_node (BLOCK);
726a989a 5961 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
777f7f9a 5962
726a989a
RB
5963 tbody = gimple_bind_body (bind);
5964 gimplify_and_add (lock, &tbody);
5965 gimple_bind_set_body (bind, tbody);
953ff289 5966
726a989a
RB
5967 lower_omp (gimple_omp_body (stmt), ctx);
5968 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5969 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5970 gimple_omp_set_body (stmt, NULL);
953ff289 5971
726a989a
RB
5972 tbody = gimple_bind_body (bind);
5973 gimplify_and_add (unlock, &tbody);
5974 gimple_bind_set_body (bind, tbody);
777f7f9a 5975
726a989a 5976 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
953ff289
DN
5977
5978 pop_gimplify_context (bind);
726a989a
RB
5979 gimple_bind_append_vars (bind, ctx->block_vars);
5980 BLOCK_VARS (block) = gimple_bind_vars (bind);
5981 gsi_replace (gsi_p, bind, true);
50674e96
DN
5982}
5983
5984
5985/* A subroutine of lower_omp_for. Generate code to emit the predicate
5986 for a lastprivate clause. Given a loop control predicate of (V
5987 cond N2), we gate the clause on (!(V cond N2)). The lowered form
3d55c64b
JJ
5988 is appended to *DLIST, iterator initialization is appended to
5989 *BODY_P. */
50674e96
DN
5990
5991static void
726a989a
RB
5992lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5993 gimple_seq *dlist, struct omp_context *ctx)
50674e96 5994{
726a989a 5995 tree clauses, cond, vinit;
50674e96 5996 enum tree_code cond_code;
726a989a 5997 gimple_seq stmts;
b8698a0f 5998
a68ab351 5999 cond_code = fd->loop.cond_code;
50674e96
DN
6000 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6001
6002 /* When possible, use a strict equality expression. This can let VRP
6003 type optimizations deduce the value and remove a copy. */
a68ab351 6004 if (host_integerp (fd->loop.step, 0))
50674e96 6005 {
a68ab351 6006 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
50674e96
DN
6007 if (step == 1 || step == -1)
6008 cond_code = EQ_EXPR;
6009 }
6010
a68ab351 6011 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
50674e96 6012
726a989a 6013 clauses = gimple_omp_for_clauses (fd->for_stmt);
3d55c64b
JJ
6014 stmts = NULL;
6015 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
726a989a 6016 if (!gimple_seq_empty_p (stmts))
3d55c64b 6017 {
726a989a 6018 gimple_seq_add_seq (&stmts, *dlist);
a68ab351 6019 *dlist = stmts;
3d55c64b
JJ
6020
6021 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
a68ab351 6022 vinit = fd->loop.n1;
3d55c64b 6023 if (cond_code == EQ_EXPR
a68ab351
JJ
6024 && host_integerp (fd->loop.n2, 0)
6025 && ! integer_zerop (fd->loop.n2))
6026 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
3d55c64b
JJ
6027
6028 /* Initialize the iterator variable, so that threads that don't execute
6029 any iterations don't execute the lastprivate clauses by accident. */
726a989a 6030 gimplify_assign (fd->loop.v, vinit, body_p);
3d55c64b 6031 }
50674e96
DN
6032}
6033
6034
6035/* Lower code for an OpenMP loop directive. */
6036
6037static void
726a989a 6038lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
50674e96 6039{
726a989a 6040 tree *rhs_p, block;
50674e96 6041 struct omp_for_data fd;
726a989a 6042 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
0f900dfa 6043 gimple_seq omp_for_body, body, dlist;
726a989a 6044 size_t i;
d406b663 6045 struct gimplify_ctx gctx;
50674e96 6046
d406b663 6047 push_gimplify_context (&gctx);
50674e96 6048
726a989a
RB
6049 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6050 lower_omp (gimple_omp_body (stmt), ctx);
50674e96 6051
b357f682 6052 block = make_node (BLOCK);
726a989a 6053 new_stmt = gimple_build_bind (NULL, NULL, block);
b357f682 6054
50674e96
DN
6055 /* Move declaration of temporaries in the loop body before we make
6056 it go away. */
726a989a
RB
6057 omp_for_body = gimple_omp_body (stmt);
6058 if (!gimple_seq_empty_p (omp_for_body)
6059 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6060 {
6061 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6062 gimple_bind_append_vars (new_stmt, vars);
6063 }
50674e96 6064
726a989a 6065 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
50674e96 6066 dlist = NULL;
726a989a
RB
6067 body = NULL;
6068 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6069 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
50674e96
DN
6070
6071 /* Lower the header expressions. At this point, we can assume that
6072 the header is of the form:
6073
6074 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6075
6076 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6077 using the .omp_data_s mapping, if needed. */
726a989a 6078 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
a68ab351 6079 {
726a989a 6080 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
a68ab351 6081 if (!is_gimple_min_invariant (*rhs_p))
726a989a 6082 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
a68ab351 6083
726a989a 6084 rhs_p = gimple_omp_for_final_ptr (stmt, i);
a68ab351 6085 if (!is_gimple_min_invariant (*rhs_p))
726a989a 6086 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
a68ab351 6087
726a989a 6088 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
a68ab351 6089 if (!is_gimple_min_invariant (*rhs_p))
726a989a 6090 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
a68ab351 6091 }
50674e96
DN
6092
6093 /* Once lowered, extract the bounds and clauses. */
a68ab351 6094 extract_omp_for_data (stmt, &fd, NULL);
50674e96 6095
726a989a 6096 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
50674e96 6097
726a989a
RB
6098 gimple_seq_add_stmt (&body, stmt);
6099 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
777f7f9a 6100
726a989a
RB
6101 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6102 fd.loop.v));
777f7f9a 6103
50674e96 6104 /* After the loop, add exit clauses. */
726a989a
RB
6105 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6106 gimple_seq_add_seq (&body, dlist);
50674e96 6107
726a989a 6108 body = maybe_catch_exception (body);
4a31b7ee 6109
777f7f9a 6110 /* Region exit marker goes at the end of the loop body. */
726a989a 6111 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
50674e96 6112
b357f682 6113 pop_gimplify_context (new_stmt);
726a989a
RB
6114
6115 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6116 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
b357f682
JJ
6117 if (BLOCK_VARS (block))
6118 TREE_USED (block) = 1;
50674e96 6119
726a989a
RB
6120 gimple_bind_set_body (new_stmt, body);
6121 gimple_omp_set_body (stmt, NULL);
6122 gimple_omp_for_set_pre_body (stmt, NULL);
6123 gsi_replace (gsi_p, new_stmt, true);
953ff289
DN
6124}
6125
b8698a0f 6126/* Callback for walk_stmts. Check if the current statement only contains
726a989a 6127 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
69f1837b
JJ
6128
6129static tree
726a989a
RB
6130check_combined_parallel (gimple_stmt_iterator *gsi_p,
6131 bool *handled_ops_p,
6132 struct walk_stmt_info *wi)
69f1837b 6133{
d3bfe4de 6134 int *info = (int *) wi->info;
726a989a 6135 gimple stmt = gsi_stmt (*gsi_p);
69f1837b 6136
726a989a
RB
6137 *handled_ops_p = true;
6138 switch (gimple_code (stmt))
69f1837b 6139 {
726a989a
RB
6140 WALK_SUBSTMTS;
6141
6142 case GIMPLE_OMP_FOR:
6143 case GIMPLE_OMP_SECTIONS:
69f1837b
JJ
6144 *info = *info == 0 ? 1 : -1;
6145 break;
6146 default:
6147 *info = -1;
6148 break;
6149 }
6150 return NULL;
6151}
50674e96 6152
a68ab351
JJ
6153struct omp_taskcopy_context
6154{
6155 /* This field must be at the beginning, as we do "inheritance": Some
6156 callback functions for tree-inline.c (e.g., omp_copy_decl)
6157 receive a copy_body_data pointer that is up-casted to an
6158 omp_context pointer. */
6159 copy_body_data cb;
6160 omp_context *ctx;
6161};
6162
6163static tree
6164task_copyfn_copy_decl (tree var, copy_body_data *cb)
6165{
6166 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6167
6168 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6169 return create_tmp_var (TREE_TYPE (var), NULL);
6170
6171 return var;
6172}
6173
6174static tree
6175task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6176{
6177 tree name, new_fields = NULL, type, f;
6178
6179 type = lang_hooks.types.make_type (RECORD_TYPE);
6180 name = DECL_NAME (TYPE_NAME (orig_type));
c2255bc4
AH
6181 name = build_decl (gimple_location (tcctx->ctx->stmt),
6182 TYPE_DECL, name, type);
a68ab351
JJ
6183 TYPE_NAME (type) = name;
6184
6185 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6186 {
6187 tree new_f = copy_node (f);
6188 DECL_CONTEXT (new_f) = type;
6189 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6190 TREE_CHAIN (new_f) = new_fields;
726a989a
RB
6191 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6192 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6193 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6194 &tcctx->cb, NULL);
a68ab351
JJ
6195 new_fields = new_f;
6196 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6197 }
6198 TYPE_FIELDS (type) = nreverse (new_fields);
6199 layout_type (type);
6200 return type;
6201}
6202
6203/* Create task copyfn. */
6204
6205static void
726a989a 6206create_task_copyfn (gimple task_stmt, omp_context *ctx)
a68ab351
JJ
6207{
6208 struct function *child_cfun;
6209 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6210 tree record_type, srecord_type, bind, list;
6211 bool record_needs_remap = false, srecord_needs_remap = false;
6212 splay_tree_node n;
6213 struct omp_taskcopy_context tcctx;
d406b663 6214 struct gimplify_ctx gctx;
db3927fb 6215 location_t loc = gimple_location (task_stmt);
a68ab351 6216
726a989a 6217 child_fn = gimple_omp_task_copy_fn (task_stmt);
a68ab351
JJ
6218 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6219 gcc_assert (child_cfun->cfg == NULL);
6220 child_cfun->dont_save_pending_sizes_p = 1;
6221 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6222
6223 /* Reset DECL_CONTEXT on function arguments. */
6224 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6225 DECL_CONTEXT (t) = child_fn;
6226
6227 /* Populate the function. */
d406b663 6228 push_gimplify_context (&gctx);
a68ab351
JJ
6229 current_function_decl = child_fn;
6230
6231 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6232 TREE_SIDE_EFFECTS (bind) = 1;
6233 list = NULL;
6234 DECL_SAVED_TREE (child_fn) = bind;
726a989a 6235 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
a68ab351
JJ
6236
6237 /* Remap src and dst argument types if needed. */
6238 record_type = ctx->record_type;
6239 srecord_type = ctx->srecord_type;
6240 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6241 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6242 {
6243 record_needs_remap = true;
6244 break;
6245 }
6246 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6247 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6248 {
6249 srecord_needs_remap = true;
6250 break;
6251 }
6252
6253 if (record_needs_remap || srecord_needs_remap)
6254 {
6255 memset (&tcctx, '\0', sizeof (tcctx));
6256 tcctx.cb.src_fn = ctx->cb.src_fn;
6257 tcctx.cb.dst_fn = child_fn;
6258 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6259 tcctx.cb.dst_node = tcctx.cb.src_node;
6260 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6261 tcctx.cb.copy_decl = task_copyfn_copy_decl;
1d65f45c 6262 tcctx.cb.eh_lp_nr = 0;
a68ab351
JJ
6263 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6264 tcctx.cb.decl_map = pointer_map_create ();
6265 tcctx.ctx = ctx;
6266
6267 if (record_needs_remap)
6268 record_type = task_copyfn_remap_type (&tcctx, record_type);
6269 if (srecord_needs_remap)
6270 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6271 }
6272 else
6273 tcctx.cb.decl_map = NULL;
6274
6275 push_cfun (child_cfun);
6276
6277 arg = DECL_ARGUMENTS (child_fn);
6278 TREE_TYPE (arg) = build_pointer_type (record_type);
6279 sarg = TREE_CHAIN (arg);
6280 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6281
6282 /* First pass: initialize temporaries used in record_type and srecord_type
6283 sizes and field offsets. */
6284 if (tcctx.cb.decl_map)
726a989a 6285 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
a68ab351
JJ
6286 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6287 {
6288 tree *p;
6289
6290 decl = OMP_CLAUSE_DECL (c);
6291 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6292 if (p == NULL)
6293 continue;
6294 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6295 sf = (tree) n->value;
6296 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
70f34814 6297 src = build_simple_mem_ref_loc (loc, sarg);
a68ab351 6298 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
726a989a 6299 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
a68ab351
JJ
6300 append_to_statement_list (t, &list);
6301 }
6302
6303 /* Second pass: copy shared var pointers and copy construct non-VLA
6304 firstprivate vars. */
726a989a 6305 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
a68ab351
JJ
6306 switch (OMP_CLAUSE_CODE (c))
6307 {
6308 case OMP_CLAUSE_SHARED:
6309 decl = OMP_CLAUSE_DECL (c);
6310 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6311 if (n == NULL)
6312 break;
6313 f = (tree) n->value;
6314 if (tcctx.cb.decl_map)
6315 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6316 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6317 sf = (tree) n->value;
6318 if (tcctx.cb.decl_map)
6319 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
70f34814 6320 src = build_simple_mem_ref_loc (loc, sarg);
a68ab351 6321 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
70f34814 6322 dst = build_simple_mem_ref_loc (loc, arg);
a68ab351 6323 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
726a989a 6324 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
a68ab351
JJ
6325 append_to_statement_list (t, &list);
6326 break;
6327 case OMP_CLAUSE_FIRSTPRIVATE:
6328 decl = OMP_CLAUSE_DECL (c);
6329 if (is_variable_sized (decl))
6330 break;
6331 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6332 if (n == NULL)
6333 break;
6334 f = (tree) n->value;
6335 if (tcctx.cb.decl_map)
6336 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6337 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6338 if (n != NULL)
6339 {
6340 sf = (tree) n->value;
6341 if (tcctx.cb.decl_map)
6342 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
70f34814 6343 src = build_simple_mem_ref_loc (loc, sarg);
a68ab351
JJ
6344 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6345 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
70f34814 6346 src = build_simple_mem_ref_loc (loc, src);
a68ab351
JJ
6347 }
6348 else
6349 src = decl;
70f34814 6350 dst = build_simple_mem_ref_loc (loc, arg);
a68ab351
JJ
6351 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6352 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6353 append_to_statement_list (t, &list);
6354 break;
6355 case OMP_CLAUSE_PRIVATE:
6356 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6357 break;
6358 decl = OMP_CLAUSE_DECL (c);
6359 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6360 f = (tree) n->value;
6361 if (tcctx.cb.decl_map)
6362 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6363 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6364 if (n != NULL)
6365 {
6366 sf = (tree) n->value;
6367 if (tcctx.cb.decl_map)
6368 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
70f34814 6369 src = build_simple_mem_ref_loc (loc, sarg);
a68ab351
JJ
6370 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6371 if (use_pointer_for_field (decl, NULL))
70f34814 6372 src = build_simple_mem_ref_loc (loc, src);
a68ab351
JJ
6373 }
6374 else
6375 src = decl;
70f34814 6376 dst = build_simple_mem_ref_loc (loc, arg);
a68ab351 6377 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
726a989a 6378 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
a68ab351
JJ
6379 append_to_statement_list (t, &list);
6380 break;
6381 default:
6382 break;
6383 }
6384
6385 /* Last pass: handle VLA firstprivates. */
6386 if (tcctx.cb.decl_map)
726a989a 6387 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
a68ab351
JJ
6388 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6389 {
6390 tree ind, ptr, df;
6391
6392 decl = OMP_CLAUSE_DECL (c);
6393 if (!is_variable_sized (decl))
6394 continue;
6395 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6396 if (n == NULL)
6397 continue;
6398 f = (tree) n->value;
6399 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6400 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6401 ind = DECL_VALUE_EXPR (decl);
6402 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6403 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6404 n = splay_tree_lookup (ctx->sfield_map,
6405 (splay_tree_key) TREE_OPERAND (ind, 0));
6406 sf = (tree) n->value;
6407 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
70f34814 6408 src = build_simple_mem_ref_loc (loc, sarg);
a68ab351 6409 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
70f34814
RG
6410 src = build_simple_mem_ref_loc (loc, src);
6411 dst = build_simple_mem_ref_loc (loc, arg);
a68ab351
JJ
6412 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6413 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6414 append_to_statement_list (t, &list);
6415 n = splay_tree_lookup (ctx->field_map,
6416 (splay_tree_key) TREE_OPERAND (ind, 0));
6417 df = (tree) n->value;
6418 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
70f34814 6419 ptr = build_simple_mem_ref_loc (loc, arg);
a68ab351 6420 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
726a989a 6421 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
db3927fb 6422 build_fold_addr_expr_loc (loc, dst));
a68ab351
JJ
6423 append_to_statement_list (t, &list);
6424 }
6425
6426 t = build1 (RETURN_EXPR, void_type_node, NULL);
6427 append_to_statement_list (t, &list);
6428
6429 if (tcctx.cb.decl_map)
6430 pointer_map_destroy (tcctx.cb.decl_map);
6431 pop_gimplify_context (NULL);
6432 BIND_EXPR_BODY (bind) = list;
6433 pop_cfun ();
6434 current_function_decl = ctx->cb.src_fn;
6435}
6436
726a989a
RB
6437/* Lower the OpenMP parallel or task directive in the current statement
6438 in GSI_P. CTX holds context information for the directive. */
50674e96
DN
6439
6440static void
726a989a 6441lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
50674e96 6442{
726a989a
RB
6443 tree clauses;
6444 tree child_fn, t;
6445 gimple stmt = gsi_stmt (*gsi_p);
6446 gimple par_bind, bind;
6447 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
d406b663 6448 struct gimplify_ctx gctx;
db3927fb 6449 location_t loc = gimple_location (stmt);
50674e96 6450
726a989a
RB
6451 clauses = gimple_omp_taskreg_clauses (stmt);
6452 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6453 par_body = gimple_bind_body (par_bind);
50674e96 6454 child_fn = ctx->cb.dst_fn;
726a989a
RB
6455 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6456 && !gimple_omp_parallel_combined_p (stmt))
69f1837b
JJ
6457 {
6458 struct walk_stmt_info wi;
6459 int ws_num = 0;
6460
6461 memset (&wi, 0, sizeof (wi));
69f1837b
JJ
6462 wi.info = &ws_num;
6463 wi.val_only = true;
726a989a 6464 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
69f1837b 6465 if (ws_num == 1)
726a989a 6466 gimple_omp_parallel_set_combined_p (stmt, true);
69f1837b 6467 }
a68ab351
JJ
6468 if (ctx->srecord_type)
6469 create_task_copyfn (stmt, ctx);
50674e96 6470
d406b663 6471 push_gimplify_context (&gctx);
50674e96 6472
726a989a
RB
6473 par_olist = NULL;
6474 par_ilist = NULL;
50674e96 6475 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
726a989a
RB
6476 lower_omp (par_body, ctx);
6477 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
a68ab351 6478 lower_reduction_clauses (clauses, &par_olist, ctx);
50674e96
DN
6479
6480 /* Declare all the variables created by mapping and the variables
6481 declared in the scope of the parallel body. */
6482 record_vars_into (ctx->block_vars, child_fn);
726a989a 6483 record_vars_into (gimple_bind_vars (par_bind), child_fn);
50674e96
DN
6484
6485 if (ctx->record_type)
6486 {
a68ab351
JJ
6487 ctx->sender_decl
6488 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6489 : ctx->record_type, ".omp_data_o");
628c189e 6490 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
726a989a 6491 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
50674e96
DN
6492 }
6493
726a989a
RB
6494 olist = NULL;
6495 ilist = NULL;
50674e96
DN
6496 lower_send_clauses (clauses, &ilist, &olist, ctx);
6497 lower_send_shared_vars (&ilist, &olist, ctx);
6498
6499 /* Once all the expansions are done, sequence all the different
726a989a 6500 fragments inside gimple_omp_body. */
50674e96 6501
726a989a 6502 new_body = NULL;
50674e96
DN
6503
6504 if (ctx->record_type)
6505 {
db3927fb 6506 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
018b899b 6507 /* fixup_child_record_type might have changed receiver_decl's type. */
db3927fb 6508 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
726a989a
RB
6509 gimple_seq_add_stmt (&new_body,
6510 gimple_build_assign (ctx->receiver_decl, t));
50674e96
DN
6511 }
6512
726a989a
RB
6513 gimple_seq_add_seq (&new_body, par_ilist);
6514 gimple_seq_add_seq (&new_body, par_body);
6515 gimple_seq_add_seq (&new_body, par_olist);
6516 new_body = maybe_catch_exception (new_body);
6517 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6518 gimple_omp_set_body (stmt, new_body);
50674e96 6519
726a989a
RB
6520 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6521 gimple_bind_add_stmt (bind, stmt);
b357f682
JJ
6522 if (ilist || olist)
6523 {
726a989a
RB
6524 gimple_seq_add_stmt (&ilist, bind);
6525 gimple_seq_add_seq (&ilist, olist);
6526 bind = gimple_build_bind (NULL, ilist, NULL);
b357f682 6527 }
50674e96 6528
726a989a 6529 gsi_replace (gsi_p, bind, true);
50674e96 6530
726a989a 6531 pop_gimplify_context (NULL);
50674e96
DN
6532}
6533
d0fb20be 6534/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
726a989a
RB
6535 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6536 of OpenMP context, but with task_shared_vars set. */
75a4c3c1
AP
6537
6538static tree
726a989a
RB
6539lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6540 void *data)
75a4c3c1 6541{
d0fb20be 6542 tree t = *tp;
75a4c3c1 6543
d0fb20be 6544 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
726a989a 6545 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
b826efd9
JJ
6546 return t;
6547
6548 if (task_shared_vars
6549 && DECL_P (t)
6550 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
d0fb20be 6551 return t;
75a4c3c1 6552
d0fb20be
JJ
6553 /* If a global variable has been privatized, TREE_CONSTANT on
6554 ADDR_EXPR might be wrong. */
726a989a 6555 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
d0fb20be 6556 recompute_tree_invariant_for_addr_expr (t);
75a4c3c1 6557
d0fb20be
JJ
6558 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6559 return NULL_TREE;
75a4c3c1 6560}
50674e96 6561
d0fb20be 6562static void
726a989a 6563lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
953ff289 6564{
726a989a
RB
6565 gimple stmt = gsi_stmt (*gsi_p);
6566 struct walk_stmt_info wi;
953ff289 6567
726a989a
RB
6568 if (gimple_has_location (stmt))
6569 input_location = gimple_location (stmt);
d0fb20be 6570
726a989a
RB
6571 if (task_shared_vars)
6572 memset (&wi, '\0', sizeof (wi));
d0fb20be 6573
50674e96
DN
6574 /* If we have issued syntax errors, avoid doing any heavy lifting.
6575 Just replace the OpenMP directives with a NOP to avoid
6576 confusing RTL expansion. */
1da2ed5f 6577 if (seen_error () && is_gimple_omp (stmt))
50674e96 6578 {
726a989a 6579 gsi_replace (gsi_p, gimple_build_nop (), true);
d0fb20be 6580 return;
50674e96
DN
6581 }
6582
726a989a 6583 switch (gimple_code (stmt))
953ff289 6584 {
726a989a 6585 case GIMPLE_COND:
a68ab351 6586 if ((ctx || task_shared_vars)
726a989a
RB
6587 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6588 ctx ? NULL : &wi, NULL)
6589 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6590 ctx ? NULL : &wi, NULL)))
6591 gimple_regimplify_operands (stmt, gsi_p);
d0fb20be 6592 break;
726a989a
RB
6593 case GIMPLE_CATCH:
6594 lower_omp (gimple_catch_handler (stmt), ctx);
d0fb20be 6595 break;
726a989a
RB
6596 case GIMPLE_EH_FILTER:
6597 lower_omp (gimple_eh_filter_failure (stmt), ctx);
d0fb20be 6598 break;
726a989a
RB
6599 case GIMPLE_TRY:
6600 lower_omp (gimple_try_eval (stmt), ctx);
6601 lower_omp (gimple_try_cleanup (stmt), ctx);
d0fb20be 6602 break;
726a989a
RB
6603 case GIMPLE_BIND:
6604 lower_omp (gimple_bind_body (stmt), ctx);
d0fb20be 6605 break;
726a989a
RB
6606 case GIMPLE_OMP_PARALLEL:
6607 case GIMPLE_OMP_TASK:
6608 ctx = maybe_lookup_ctx (stmt);
6609 lower_omp_taskreg (gsi_p, ctx);
d0fb20be 6610 break;
726a989a
RB
6611 case GIMPLE_OMP_FOR:
6612 ctx = maybe_lookup_ctx (stmt);
953ff289 6613 gcc_assert (ctx);
726a989a 6614 lower_omp_for (gsi_p, ctx);
953ff289 6615 break;
726a989a
RB
6616 case GIMPLE_OMP_SECTIONS:
6617 ctx = maybe_lookup_ctx (stmt);
953ff289 6618 gcc_assert (ctx);
726a989a 6619 lower_omp_sections (gsi_p, ctx);
953ff289 6620 break;
726a989a
RB
6621 case GIMPLE_OMP_SINGLE:
6622 ctx = maybe_lookup_ctx (stmt);
953ff289 6623 gcc_assert (ctx);
726a989a 6624 lower_omp_single (gsi_p, ctx);
953ff289 6625 break;
726a989a
RB
6626 case GIMPLE_OMP_MASTER:
6627 ctx = maybe_lookup_ctx (stmt);
953ff289 6628 gcc_assert (ctx);
726a989a 6629 lower_omp_master (gsi_p, ctx);
953ff289 6630 break;
726a989a
RB
6631 case GIMPLE_OMP_ORDERED:
6632 ctx = maybe_lookup_ctx (stmt);
953ff289 6633 gcc_assert (ctx);
726a989a 6634 lower_omp_ordered (gsi_p, ctx);
953ff289 6635 break;
726a989a
RB
6636 case GIMPLE_OMP_CRITICAL:
6637 ctx = maybe_lookup_ctx (stmt);
953ff289 6638 gcc_assert (ctx);
726a989a
RB
6639 lower_omp_critical (gsi_p, ctx);
6640 break;
6641 case GIMPLE_OMP_ATOMIC_LOAD:
6642 if ((ctx || task_shared_vars)
6643 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6644 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6645 gimple_regimplify_operands (stmt, gsi_p);
953ff289 6646 break;
d0fb20be 6647 default:
a68ab351 6648 if ((ctx || task_shared_vars)
726a989a
RB
6649 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6650 ctx ? NULL : &wi))
6651 gimple_regimplify_operands (stmt, gsi_p);
953ff289 6652 break;
953ff289 6653 }
953ff289
DN
6654}
6655
6656static void
726a989a 6657lower_omp (gimple_seq body, omp_context *ctx)
953ff289 6658{
b357f682 6659 location_t saved_location = input_location;
726a989a
RB
6660 gimple_stmt_iterator gsi = gsi_start (body);
6661 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6662 lower_omp_1 (&gsi, ctx);
b357f682 6663 input_location = saved_location;
953ff289
DN
6664}
6665\f
6666/* Main entry point. */
6667
c2924966 6668static unsigned int
953ff289
DN
6669execute_lower_omp (void)
6670{
726a989a
RB
6671 gimple_seq body;
6672
535b544a
SB
6673 /* This pass always runs, to provide PROP_gimple_lomp.
6674 But there is nothing to do unless -fopenmp is given. */
6675 if (flag_openmp == 0)
6676 return 0;
6677
953ff289
DN
6678 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6679 delete_omp_context);
6680
726a989a
RB
6681 body = gimple_body (current_function_decl);
6682 scan_omp (body, NULL);
a68ab351 6683 gcc_assert (taskreg_nesting_level == 0);
953ff289
DN
6684
6685 if (all_contexts->root)
a68ab351 6686 {
d406b663
JJ
6687 struct gimplify_ctx gctx;
6688
a68ab351 6689 if (task_shared_vars)
d406b663 6690 push_gimplify_context (&gctx);
726a989a 6691 lower_omp (body, NULL);
a68ab351
JJ
6692 if (task_shared_vars)
6693 pop_gimplify_context (NULL);
6694 }
953ff289 6695
50674e96
DN
6696 if (all_contexts)
6697 {
6698 splay_tree_delete (all_contexts);
6699 all_contexts = NULL;
6700 }
a68ab351 6701 BITMAP_FREE (task_shared_vars);
c2924966 6702 return 0;
953ff289
DN
6703}
6704
b8698a0f 6705struct gimple_opt_pass pass_lower_omp =
953ff289 6706{
8ddbbcae
JH
6707 {
6708 GIMPLE_PASS,
953ff289 6709 "omplower", /* name */
535b544a 6710 NULL, /* gate */
953ff289
DN
6711 execute_lower_omp, /* execute */
6712 NULL, /* sub */
6713 NULL, /* next */
6714 0, /* static_pass_number */
7072a650 6715 TV_NONE, /* tv_id */
953ff289
DN
6716 PROP_gimple_any, /* properties_required */
6717 PROP_gimple_lomp, /* properties_provided */
6718 0, /* properties_destroyed */
6719 0, /* todo_flags_start */
8ddbbcae
JH
6720 TODO_dump_func /* todo_flags_finish */
6721 }
953ff289 6722};
953ff289
DN
6723\f
6724/* The following is a utility to diagnose OpenMP structured block violations.
777f7f9a
RH
6725 It is not part of the "omplower" pass, as that's invoked too late. It
6726 should be invoked by the respective front ends after gimplification. */
953ff289
DN
6727
6728static splay_tree all_labels;
6729
6730/* Check for mismatched contexts and generate an error if needed. Return
6731 true if an error is detected. */
6732
6733static bool
726a989a
RB
6734diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6735 gimple branch_ctx, gimple label_ctx)
953ff289 6736{
726a989a 6737 if (label_ctx == branch_ctx)
953ff289
DN
6738 return false;
6739
b8698a0f 6740
726a989a
RB
6741 /*
6742 Previously we kept track of the label's entire context in diagnose_sb_[12]
6743 so we could traverse it and issue a correct "exit" or "enter" error
6744 message upon a structured block violation.
6745
6746 We built the context by building a list with tree_cons'ing, but there is
6747 no easy counterpart in gimple tuples. It seems like far too much work
6748 for issuing exit/enter error messages. If someone really misses the
6749 distinct error message... patches welcome.
6750 */
b8698a0f 6751
726a989a 6752#if 0
953ff289 6753 /* Try to avoid confusing the user by producing and error message
fa10beec 6754 with correct "exit" or "enter" verbiage. We prefer "exit"
953ff289
DN
6755 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6756 if (branch_ctx == NULL)
6757 exit_p = false;
6758 else
6759 {
6760 while (label_ctx)
6761 {
6762 if (TREE_VALUE (label_ctx) == branch_ctx)
6763 {
6764 exit_p = false;
6765 break;
6766 }
6767 label_ctx = TREE_CHAIN (label_ctx);
6768 }
6769 }
6770
6771 if (exit_p)
6772 error ("invalid exit from OpenMP structured block");
6773 else
6774 error ("invalid entry to OpenMP structured block");
726a989a 6775#endif
953ff289 6776
726a989a
RB
6777 /* If it's obvious we have an invalid entry, be specific about the error. */
6778 if (branch_ctx == NULL)
6779 error ("invalid entry to OpenMP structured block");
6780 else
6781 /* Otherwise, be vague and lazy, but efficient. */
6782 error ("invalid branch to/from an OpenMP structured block");
6783
6784 gsi_replace (gsi_p, gimple_build_nop (), false);
953ff289
DN
6785 return true;
6786}
6787
6788/* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
726a989a 6789 where each label is found. */
953ff289
DN
6790
6791static tree
726a989a
RB
6792diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6793 struct walk_stmt_info *wi)
953ff289 6794{
726a989a
RB
6795 gimple context = (gimple) wi->info;
6796 gimple inner_context;
6797 gimple stmt = gsi_stmt (*gsi_p);
953ff289 6798
726a989a
RB
6799 *handled_ops_p = true;
6800
6801 switch (gimple_code (stmt))
953ff289 6802 {
726a989a 6803 WALK_SUBSTMTS;
b8698a0f 6804
726a989a
RB
6805 case GIMPLE_OMP_PARALLEL:
6806 case GIMPLE_OMP_TASK:
6807 case GIMPLE_OMP_SECTIONS:
6808 case GIMPLE_OMP_SINGLE:
6809 case GIMPLE_OMP_SECTION:
6810 case GIMPLE_OMP_MASTER:
6811 case GIMPLE_OMP_ORDERED:
6812 case GIMPLE_OMP_CRITICAL:
6813 /* The minimal context here is just the current OMP construct. */
6814 inner_context = stmt;
953ff289 6815 wi->info = inner_context;
726a989a 6816 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
953ff289
DN
6817 wi->info = context;
6818 break;
6819
726a989a
RB
6820 case GIMPLE_OMP_FOR:
6821 inner_context = stmt;
953ff289 6822 wi->info = inner_context;
726a989a
RB
6823 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6824 walk them. */
6825 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6826 diagnose_sb_1, NULL, wi);
6827 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
953ff289
DN
6828 wi->info = context;
6829 break;
6830
726a989a
RB
6831 case GIMPLE_LABEL:
6832 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
953ff289
DN
6833 (splay_tree_value) context);
6834 break;
6835
6836 default:
6837 break;
6838 }
6839
6840 return NULL_TREE;
6841}
6842
6843/* Pass 2: Check each branch and see if its context differs from that of
6844 the destination label's context. */
6845
6846static tree
726a989a
RB
6847diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6848 struct walk_stmt_info *wi)
953ff289 6849{
726a989a 6850 gimple context = (gimple) wi->info;
953ff289 6851 splay_tree_node n;
726a989a 6852 gimple stmt = gsi_stmt (*gsi_p);
953ff289 6853
726a989a
RB
6854 *handled_ops_p = true;
6855
6856 switch (gimple_code (stmt))
953ff289 6857 {
726a989a
RB
6858 WALK_SUBSTMTS;
6859
6860 case GIMPLE_OMP_PARALLEL:
6861 case GIMPLE_OMP_TASK:
6862 case GIMPLE_OMP_SECTIONS:
6863 case GIMPLE_OMP_SINGLE:
6864 case GIMPLE_OMP_SECTION:
6865 case GIMPLE_OMP_MASTER:
6866 case GIMPLE_OMP_ORDERED:
6867 case GIMPLE_OMP_CRITICAL:
6868 wi->info = stmt;
6869 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
953ff289
DN
6870 wi->info = context;
6871 break;
6872
726a989a
RB
6873 case GIMPLE_OMP_FOR:
6874 wi->info = stmt;
6875 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6876 walk them. */
6877 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6878 diagnose_sb_2, NULL, wi);
6879 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
953ff289
DN
6880 wi->info = context;
6881 break;
6882
ca50f84a
L
6883 case GIMPLE_COND:
6884 {
6885 tree lab = gimple_cond_true_label (stmt);
6886 if (lab)
6887 {
6888 n = splay_tree_lookup (all_labels,
6889 (splay_tree_key) lab);
6890 diagnose_sb_0 (gsi_p, context,
6891 n ? (gimple) n->value : NULL);
6892 }
6893 lab = gimple_cond_false_label (stmt);
6894 if (lab)
6895 {
6896 n = splay_tree_lookup (all_labels,
6897 (splay_tree_key) lab);
6898 diagnose_sb_0 (gsi_p, context,
6899 n ? (gimple) n->value : NULL);
6900 }
6901 }
6902 break;
6903
726a989a 6904 case GIMPLE_GOTO:
953ff289 6905 {
726a989a 6906 tree lab = gimple_goto_dest (stmt);
953ff289
DN
6907 if (TREE_CODE (lab) != LABEL_DECL)
6908 break;
6909
6910 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
726a989a 6911 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
953ff289
DN
6912 }
6913 break;
6914
726a989a 6915 case GIMPLE_SWITCH:
953ff289 6916 {
726a989a
RB
6917 unsigned int i;
6918 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
953ff289 6919 {
726a989a 6920 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
953ff289 6921 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
726a989a 6922 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
953ff289
DN
6923 break;
6924 }
6925 }
6926 break;
6927
726a989a
RB
6928 case GIMPLE_RETURN:
6929 diagnose_sb_0 (gsi_p, context, NULL);
953ff289
DN
6930 break;
6931
6932 default:
6933 break;
6934 }
6935
6936 return NULL_TREE;
6937}
6938
a406865a
RG
6939static unsigned int
6940diagnose_omp_structured_block_errors (void)
953ff289 6941{
953ff289 6942 struct walk_stmt_info wi;
a406865a 6943 gimple_seq body = gimple_body (current_function_decl);
953ff289
DN
6944
6945 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6946
6947 memset (&wi, 0, sizeof (wi));
726a989a 6948 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
953ff289
DN
6949
6950 memset (&wi, 0, sizeof (wi));
953ff289 6951 wi.want_locations = true;
726a989a 6952 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
953ff289
DN
6953
6954 splay_tree_delete (all_labels);
6955 all_labels = NULL;
6956
a406865a 6957 return 0;
953ff289
DN
6958}
6959
a406865a
RG
6960static bool
6961gate_diagnose_omp_blocks (void)
6962{
6963 return flag_openmp != 0;
6964}
6965
6966struct gimple_opt_pass pass_diagnose_omp_blocks =
6967{
6968 {
6969 GIMPLE_PASS,
2329c6f5 6970 "*diagnose_omp_blocks", /* name */
a406865a
RG
6971 gate_diagnose_omp_blocks, /* gate */
6972 diagnose_omp_structured_block_errors, /* execute */
6973 NULL, /* sub */
6974 NULL, /* next */
6975 0, /* static_pass_number */
6976 TV_NONE, /* tv_id */
6977 PROP_gimple_any, /* properties_required */
6978 0, /* properties_provided */
6979 0, /* properties_destroyed */
6980 0, /* todo_flags_start */
6981 0, /* todo_flags_finish */
6982 }
6983};
6984
953ff289 6985#include "gt-omp-low.h"