]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/omp-low.c
Merge from transactional-memory branch.
[thirdparty/gcc.git] / gcc / omp-low.c
CommitLineData
953ff289
DN
1/* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
fb79f500 6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
c75c517d 7 Free Software Foundation, Inc.
953ff289
DN
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
9dcd6f09 13Software Foundation; either version 3, or (at your option) any later
953ff289
DN
14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
9dcd6f09
NC
22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
953ff289
DN
24
25#include "config.h"
26#include "system.h"
27#include "coretypes.h"
28#include "tm.h"
29#include "tree.h"
30#include "rtl.h"
726a989a
RB
31#include "gimple.h"
32#include "tree-iterator.h"
953ff289
DN
33#include "tree-inline.h"
34#include "langhooks.h"
1da2ed5f 35#include "diagnostic-core.h"
953ff289
DN
36#include "tree-flow.h"
37#include "timevar.h"
38#include "flags.h"
39#include "function.h"
40#include "expr.h"
953ff289
DN
41#include "tree-pass.h"
42#include "ggc.h"
43#include "except.h"
6be42dd4 44#include "splay-tree.h"
a509ebb5
RL
45#include "optabs.h"
46#include "cfgloop.h"
953ff289 47
726a989a 48
b8698a0f 49/* Lowering of OpenMP parallel and workshare constructs proceeds in two
953ff289
DN
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
c0220ea4 53 re-gimplifying things when variables have been replaced with complex
953ff289
DN
54 expressions.
55
7ebaeab5
DN
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
953ff289
DN
59
60/* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63typedef struct omp_context
64{
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
726a989a 73 gimple stmt;
953ff289 74
b8698a0f 75 /* Map variables to fields in a structure that allows communication
953ff289
DN
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
a68ab351
JJ
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
953ff289
DN
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
953ff289
DN
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
953ff289
DN
105} omp_context;
106
107
a68ab351
JJ
108struct omp_for_data_loop
109{
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112};
113
50674e96 114/* A structure describing the main elements of a parallel loop. */
953ff289 115
50674e96 116struct omp_for_data
953ff289 117{
a68ab351 118 struct omp_for_data_loop loop;
726a989a
RB
119 tree chunk_size;
120 gimple for_stmt;
a68ab351
JJ
121 tree pre, iter_type;
122 int collapse;
953ff289
DN
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
a68ab351 125 struct omp_for_data_loop *loops;
953ff289
DN
126};
127
50674e96 128
953ff289 129static splay_tree all_contexts;
a68ab351 130static int taskreg_nesting_level;
777f7f9a 131struct omp_region *root_omp_region;
a68ab351 132static bitmap task_shared_vars;
953ff289 133
726a989a
RB
134static void scan_omp (gimple_seq, omp_context *);
135static tree scan_omp_1_op (tree *, int *, void *);
136
137#define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
0a35513e 142 case GIMPLE_TRANSACTION: \
726a989a
RB
143 /* The sub-statements for these should be walked. */ \
144 *handled_ops_p = false; \
145 break;
146
147/* Convenience function for calling scan_omp_1_op on tree operands. */
148
149static inline tree
150scan_omp_op (tree *tp, omp_context *ctx)
151{
152 struct walk_stmt_info wi;
153
154 memset (&wi, 0, sizeof (wi));
155 wi.info = ctx;
156 wi.want_locations = true;
157
158 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
159}
160
161static void lower_omp (gimple_seq, omp_context *);
8ca5b2a2
JJ
162static tree lookup_decl_in_outer_ctx (tree, omp_context *);
163static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
953ff289
DN
164
165/* Find an OpenMP clause of type KIND within CLAUSES. */
166
917948d3 167tree
e0c68ce9 168find_omp_clause (tree clauses, enum omp_clause_code kind)
953ff289
DN
169{
170 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
aaf46ef9 171 if (OMP_CLAUSE_CODE (clauses) == kind)
953ff289
DN
172 return clauses;
173
174 return NULL_TREE;
175}
176
177/* Return true if CTX is for an omp parallel. */
178
179static inline bool
180is_parallel_ctx (omp_context *ctx)
181{
726a989a 182 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
953ff289
DN
183}
184
50674e96 185
a68ab351
JJ
186/* Return true if CTX is for an omp task. */
187
188static inline bool
189is_task_ctx (omp_context *ctx)
190{
726a989a 191 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
a68ab351
JJ
192}
193
194
195/* Return true if CTX is for an omp parallel or omp task. */
196
197static inline bool
198is_taskreg_ctx (omp_context *ctx)
199{
726a989a
RB
200 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
201 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
a68ab351
JJ
202}
203
204
50674e96 205/* Return true if REGION is a combined parallel+workshare region. */
953ff289
DN
206
207static inline bool
50674e96
DN
208is_combined_parallel (struct omp_region *region)
209{
210 return region->is_combined_parallel;
211}
212
213
214/* Extract the header elements of parallel loop FOR_STMT and store
215 them into *FD. */
216
217static void
726a989a 218extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
a68ab351 219 struct omp_for_data_loop *loops)
50674e96 220{
a68ab351
JJ
221 tree t, var, *collapse_iter, *collapse_count;
222 tree count = NULL_TREE, iter_type = long_integer_type_node;
223 struct omp_for_data_loop *loop;
224 int i;
225 struct omp_for_data_loop dummy_loop;
db3927fb 226 location_t loc = gimple_location (for_stmt);
50674e96
DN
227
228 fd->for_stmt = for_stmt;
229 fd->pre = NULL;
726a989a 230 fd->collapse = gimple_omp_for_collapse (for_stmt);
a68ab351
JJ
231 if (fd->collapse > 1)
232 fd->loops = loops;
233 else
234 fd->loops = &fd->loop;
50674e96
DN
235
236 fd->have_nowait = fd->have_ordered = false;
237 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238 fd->chunk_size = NULL_TREE;
a68ab351
JJ
239 collapse_iter = NULL;
240 collapse_count = NULL;
50674e96 241
726a989a 242 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
aaf46ef9 243 switch (OMP_CLAUSE_CODE (t))
50674e96
DN
244 {
245 case OMP_CLAUSE_NOWAIT:
246 fd->have_nowait = true;
247 break;
248 case OMP_CLAUSE_ORDERED:
249 fd->have_ordered = true;
250 break;
251 case OMP_CLAUSE_SCHEDULE:
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254 break;
a68ab351
JJ
255 case OMP_CLAUSE_COLLAPSE:
256 if (fd->collapse > 1)
257 {
258 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 }
50674e96
DN
261 default:
262 break;
263 }
264
a68ab351
JJ
265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
1cbc62c0 268 is best) or if it varies (then schedule(dynamic,N) is better). */
a68ab351
JJ
269 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 {
271 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272 gcc_assert (fd->chunk_size == NULL);
273 }
274 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
50674e96
DN
275 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276 gcc_assert (fd->chunk_size == NULL);
277 else if (fd->chunk_size == NULL)
278 {
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
a68ab351
JJ
281 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 || fd->have_ordered
283 || fd->collapse > 1)
50674e96
DN
284 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285 ? integer_zero_node : integer_one_node;
286 }
a68ab351
JJ
287
288 for (i = 0; i < fd->collapse; i++)
289 {
290 if (fd->collapse == 1)
291 loop = &fd->loop;
292 else if (loops != NULL)
293 loop = loops + i;
294 else
295 loop = &dummy_loop;
296
b8698a0f 297
726a989a 298 loop->v = gimple_omp_for_index (for_stmt, i);
a68ab351
JJ
299 gcc_assert (SSA_VAR_P (loop->v));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
726a989a 303 loop->n1 = gimple_omp_for_initial (for_stmt, i);
a68ab351 304
726a989a
RB
305 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306 loop->n2 = gimple_omp_for_final (for_stmt, i);
a68ab351
JJ
307 switch (loop->cond_code)
308 {
309 case LT_EXPR:
310 case GT_EXPR:
311 break;
312 case LE_EXPR:
313 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
5d49b6a7 314 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
a68ab351 315 else
db3927fb
AH
316 loop->n2 = fold_build2_loc (loc,
317 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
a68ab351
JJ
318 build_int_cst (TREE_TYPE (loop->n2), 1));
319 loop->cond_code = LT_EXPR;
320 break;
321 case GE_EXPR:
322 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
5d49b6a7 323 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
a68ab351 324 else
db3927fb
AH
325 loop->n2 = fold_build2_loc (loc,
326 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
a68ab351
JJ
327 build_int_cst (TREE_TYPE (loop->n2), 1));
328 loop->cond_code = GT_EXPR;
329 break;
330 default:
331 gcc_unreachable ();
332 }
333
726a989a 334 t = gimple_omp_for_incr (for_stmt, i);
a68ab351
JJ
335 gcc_assert (TREE_OPERAND (t, 0) == var);
336 switch (TREE_CODE (t))
337 {
338 case PLUS_EXPR:
339 case POINTER_PLUS_EXPR:
340 loop->step = TREE_OPERAND (t, 1);
341 break;
342 case MINUS_EXPR:
343 loop->step = TREE_OPERAND (t, 1);
db3927fb
AH
344 loop->step = fold_build1_loc (loc,
345 NEGATE_EXPR, TREE_TYPE (loop->step),
a68ab351
JJ
346 loop->step);
347 break;
348 default:
349 gcc_unreachable ();
350 }
351
352 if (iter_type != long_long_unsigned_type_node)
353 {
354 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
355 iter_type = long_long_unsigned_type_node;
356 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
357 && TYPE_PRECISION (TREE_TYPE (loop->v))
358 >= TYPE_PRECISION (iter_type))
359 {
360 tree n;
361
362 if (loop->cond_code == LT_EXPR)
db3927fb
AH
363 n = fold_build2_loc (loc,
364 PLUS_EXPR, TREE_TYPE (loop->v),
a68ab351
JJ
365 loop->n2, loop->step);
366 else
367 n = loop->n1;
368 if (TREE_CODE (n) != INTEGER_CST
369 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
370 iter_type = long_long_unsigned_type_node;
371 }
372 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
373 > TYPE_PRECISION (iter_type))
374 {
375 tree n1, n2;
376
377 if (loop->cond_code == LT_EXPR)
378 {
379 n1 = loop->n1;
db3927fb
AH
380 n2 = fold_build2_loc (loc,
381 PLUS_EXPR, TREE_TYPE (loop->v),
a68ab351
JJ
382 loop->n2, loop->step);
383 }
384 else
385 {
db3927fb
AH
386 n1 = fold_build2_loc (loc,
387 MINUS_EXPR, TREE_TYPE (loop->v),
a68ab351
JJ
388 loop->n2, loop->step);
389 n2 = loop->n1;
390 }
391 if (TREE_CODE (n1) != INTEGER_CST
392 || TREE_CODE (n2) != INTEGER_CST
393 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
394 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
395 iter_type = long_long_unsigned_type_node;
396 }
397 }
398
399 if (collapse_count && *collapse_count == NULL)
400 {
401 if ((i == 0 || count != NULL_TREE)
402 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
403 && TREE_CONSTANT (loop->n1)
404 && TREE_CONSTANT (loop->n2)
405 && TREE_CODE (loop->step) == INTEGER_CST)
406 {
407 tree itype = TREE_TYPE (loop->v);
408
409 if (POINTER_TYPE_P (itype))
410 itype
411 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
412 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
db3927fb
AH
413 t = fold_build2_loc (loc,
414 PLUS_EXPR, itype,
415 fold_convert_loc (loc, itype, loop->step), t);
416 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
417 fold_convert_loc (loc, itype, loop->n2));
418 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
419 fold_convert_loc (loc, itype, loop->n1));
a68ab351 420 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
db3927fb
AH
421 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
422 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
423 fold_build1_loc (loc, NEGATE_EXPR, itype,
424 fold_convert_loc (loc, itype,
425 loop->step)));
a68ab351 426 else
db3927fb
AH
427 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
428 fold_convert_loc (loc, itype, loop->step));
429 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
a68ab351 430 if (count != NULL_TREE)
db3927fb
AH
431 count = fold_build2_loc (loc,
432 MULT_EXPR, long_long_unsigned_type_node,
a68ab351
JJ
433 count, t);
434 else
435 count = t;
436 if (TREE_CODE (count) != INTEGER_CST)
437 count = NULL_TREE;
438 }
439 else
440 count = NULL_TREE;
441 }
442 }
443
444 if (count)
445 {
446 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
447 iter_type = long_long_unsigned_type_node;
448 else
449 iter_type = long_integer_type_node;
450 }
451 else if (collapse_iter && *collapse_iter != NULL)
452 iter_type = TREE_TYPE (*collapse_iter);
453 fd->iter_type = iter_type;
454 if (collapse_iter && *collapse_iter == NULL)
455 *collapse_iter = create_tmp_var (iter_type, ".iter");
456 if (collapse_count && *collapse_count == NULL)
457 {
458 if (count)
db3927fb 459 *collapse_count = fold_convert_loc (loc, iter_type, count);
a68ab351
JJ
460 else
461 *collapse_count = create_tmp_var (iter_type, ".count");
462 }
463
464 if (fd->collapse > 1)
465 {
466 fd->loop.v = *collapse_iter;
467 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
468 fd->loop.n2 = *collapse_count;
469 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
470 fd->loop.cond_code = LT_EXPR;
471 }
50674e96
DN
472}
473
474
475/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
476 is the immediate dominator of PAR_ENTRY_BB, return true if there
477 are no data dependencies that would prevent expanding the parallel
478 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
479
480 When expanding a combined parallel+workshare region, the call to
481 the child function may need additional arguments in the case of
726a989a
RB
482 GIMPLE_OMP_FOR regions. In some cases, these arguments are
483 computed out of variables passed in from the parent to the child
484 via 'struct .omp_data_s'. For instance:
50674e96
DN
485
486 #pragma omp parallel for schedule (guided, i * 4)
487 for (j ...)
488
489 Is lowered into:
490
491 # BLOCK 2 (PAR_ENTRY_BB)
492 .omp_data_o.i = i;
493 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
b8698a0f 494
50674e96
DN
495 # BLOCK 3 (WS_ENTRY_BB)
496 .omp_data_i = &.omp_data_o;
497 D.1667 = .omp_data_i->i;
498 D.1598 = D.1667 * 4;
499 #pragma omp for schedule (guided, D.1598)
500
501 When we outline the parallel region, the call to the child function
502 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
503 that value is computed *after* the call site. So, in principle we
504 cannot do the transformation.
505
506 To see whether the code in WS_ENTRY_BB blocks the combined
507 parallel+workshare call, we collect all the variables used in the
726a989a 508 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
50674e96
DN
509 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
510 call.
511
512 FIXME. If we had the SSA form built at this point, we could merely
513 hoist the code in block 3 into block 2 and be done with it. But at
514 this point we don't have dataflow information and though we could
515 hack something up here, it is really not worth the aggravation. */
516
517static bool
0f900dfa 518workshare_safe_to_combine_p (basic_block ws_entry_bb)
50674e96
DN
519{
520 struct omp_for_data fd;
0f900dfa 521 gimple ws_stmt = last_stmt (ws_entry_bb);
50674e96 522
726a989a 523 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
50674e96
DN
524 return true;
525
726a989a 526 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
50674e96 527
a68ab351
JJ
528 extract_omp_for_data (ws_stmt, &fd, NULL);
529
530 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
531 return false;
532 if (fd.iter_type != long_integer_type_node)
533 return false;
50674e96
DN
534
535 /* FIXME. We give up too easily here. If any of these arguments
536 are not constants, they will likely involve variables that have
537 been mapped into fields of .omp_data_s for sharing with the child
538 function. With appropriate data flow, it would be possible to
539 see through this. */
a68ab351
JJ
540 if (!is_gimple_min_invariant (fd.loop.n1)
541 || !is_gimple_min_invariant (fd.loop.n2)
542 || !is_gimple_min_invariant (fd.loop.step)
50674e96
DN
543 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
544 return false;
545
546 return true;
547}
548
549
550/* Collect additional arguments needed to emit a combined
551 parallel+workshare call. WS_STMT is the workshare directive being
552 expanded. */
553
3bb06db4 554static VEC(tree,gc) *
726a989a 555get_ws_args_for (gimple ws_stmt)
50674e96
DN
556{
557 tree t;
db3927fb 558 location_t loc = gimple_location (ws_stmt);
3bb06db4 559 VEC(tree,gc) *ws_args;
50674e96 560
726a989a 561 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
50674e96
DN
562 {
563 struct omp_for_data fd;
50674e96 564
a68ab351 565 extract_omp_for_data (ws_stmt, &fd, NULL);
50674e96 566
3bb06db4 567 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
50674e96 568
3bb06db4
NF
569 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
570 VEC_quick_push (tree, ws_args, t);
50674e96 571
db3927fb 572 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
3bb06db4 573 VEC_quick_push (tree, ws_args, t);
50674e96 574
3bb06db4
NF
575 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
576 VEC_quick_push (tree, ws_args, t);
577
578 if (fd.chunk_size)
579 {
580 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
581 VEC_quick_push (tree, ws_args, t);
582 }
50674e96
DN
583
584 return ws_args;
585 }
726a989a 586 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
50674e96 587 {
e5c95afe 588 /* Number of sections is equal to the number of edges from the
726a989a
RB
589 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
590 the exit of the sections region. */
591 basic_block bb = single_succ (gimple_bb (ws_stmt));
e5c95afe 592 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
3bb06db4
NF
593 ws_args = VEC_alloc (tree, gc, 1);
594 VEC_quick_push (tree, ws_args, t);
595 return ws_args;
50674e96
DN
596 }
597
598 gcc_unreachable ();
599}
600
601
602/* Discover whether REGION is a combined parallel+workshare region. */
603
604static void
605determine_parallel_type (struct omp_region *region)
953ff289 606{
50674e96
DN
607 basic_block par_entry_bb, par_exit_bb;
608 basic_block ws_entry_bb, ws_exit_bb;
609
d3c673c7 610 if (region == NULL || region->inner == NULL
e5c95afe
ZD
611 || region->exit == NULL || region->inner->exit == NULL
612 || region->inner->cont == NULL)
50674e96
DN
613 return;
614
615 /* We only support parallel+for and parallel+sections. */
726a989a
RB
616 if (region->type != GIMPLE_OMP_PARALLEL
617 || (region->inner->type != GIMPLE_OMP_FOR
618 && region->inner->type != GIMPLE_OMP_SECTIONS))
50674e96
DN
619 return;
620
621 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
622 WS_EXIT_BB -> PAR_EXIT_BB. */
777f7f9a
RH
623 par_entry_bb = region->entry;
624 par_exit_bb = region->exit;
625 ws_entry_bb = region->inner->entry;
626 ws_exit_bb = region->inner->exit;
50674e96
DN
627
628 if (single_succ (par_entry_bb) == ws_entry_bb
629 && single_succ (ws_exit_bb) == par_exit_bb
0f900dfa 630 && workshare_safe_to_combine_p (ws_entry_bb)
726a989a 631 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
69f1837b
JJ
632 || (last_and_only_stmt (ws_entry_bb)
633 && last_and_only_stmt (par_exit_bb))))
50674e96 634 {
726a989a 635 gimple ws_stmt = last_stmt (ws_entry_bb);
777f7f9a 636
726a989a 637 if (region->inner->type == GIMPLE_OMP_FOR)
50674e96
DN
638 {
639 /* If this is a combined parallel loop, we need to determine
640 whether or not to use the combined library calls. There
641 are two cases where we do not apply the transformation:
642 static loops and any kind of ordered loop. In the first
643 case, we already open code the loop so there is no need
644 to do anything else. In the latter case, the combined
645 parallel loop call would still need extra synchronization
646 to implement ordered semantics, so there would not be any
647 gain in using the combined call. */
726a989a 648 tree clauses = gimple_omp_for_clauses (ws_stmt);
50674e96
DN
649 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
650 if (c == NULL
651 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
652 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
653 {
654 region->is_combined_parallel = false;
655 region->inner->is_combined_parallel = false;
656 return;
657 }
658 }
659
660 region->is_combined_parallel = true;
661 region->inner->is_combined_parallel = true;
777f7f9a 662 region->ws_args = get_ws_args_for (ws_stmt);
50674e96 663 }
953ff289
DN
664}
665
50674e96 666
953ff289
DN
667/* Return true if EXPR is variable sized. */
668
669static inline bool
22ea9ec0 670is_variable_sized (const_tree expr)
953ff289
DN
671{
672 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
673}
674
675/* Return true if DECL is a reference type. */
676
677static inline bool
678is_reference (tree decl)
679{
680 return lang_hooks.decls.omp_privatize_by_reference (decl);
681}
682
683/* Lookup variables in the decl or field splay trees. The "maybe" form
684 allows for the variable form to not have been entered, otherwise we
685 assert that the variable must have been entered. */
686
687static inline tree
688lookup_decl (tree var, omp_context *ctx)
689{
6be42dd4
RG
690 tree *n;
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return *n;
953ff289
DN
693}
694
695static inline tree
7c8f7639 696maybe_lookup_decl (const_tree var, omp_context *ctx)
953ff289 697{
6be42dd4
RG
698 tree *n;
699 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
700 return n ? *n : NULL_TREE;
953ff289
DN
701}
702
703static inline tree
704lookup_field (tree var, omp_context *ctx)
705{
706 splay_tree_node n;
707 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
708 return (tree) n->value;
709}
710
a68ab351
JJ
711static inline tree
712lookup_sfield (tree var, omp_context *ctx)
713{
714 splay_tree_node n;
715 n = splay_tree_lookup (ctx->sfield_map
716 ? ctx->sfield_map : ctx->field_map,
717 (splay_tree_key) var);
718 return (tree) n->value;
719}
720
953ff289
DN
721static inline tree
722maybe_lookup_field (tree var, omp_context *ctx)
723{
724 splay_tree_node n;
725 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
726 return n ? (tree) n->value : NULL_TREE;
727}
728
7c8f7639
JJ
729/* Return true if DECL should be copied by pointer. SHARED_CTX is
730 the parallel context if DECL is to be shared. */
953ff289
DN
731
732static bool
a68ab351 733use_pointer_for_field (tree decl, omp_context *shared_ctx)
953ff289
DN
734{
735 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
736 return true;
737
6fc0bb99 738 /* We can only use copy-in/copy-out semantics for shared variables
953ff289 739 when we know the value is not accessible from an outer scope. */
7c8f7639 740 if (shared_ctx)
953ff289
DN
741 {
742 /* ??? Trivially accessible from anywhere. But why would we even
743 be passing an address in this case? Should we simply assert
744 this to be false, or should we have a cleanup pass that removes
745 these from the list of mappings? */
746 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
747 return true;
748
749 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
750 without analyzing the expression whether or not its location
751 is accessible to anyone else. In the case of nested parallel
752 regions it certainly may be. */
077b0dfb 753 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
953ff289
DN
754 return true;
755
756 /* Do not use copy-in/copy-out for variables that have their
757 address taken. */
758 if (TREE_ADDRESSABLE (decl))
759 return true;
7c8f7639
JJ
760
761 /* Disallow copy-in/out in nested parallel if
762 decl is shared in outer parallel, otherwise
763 each thread could store the shared variable
764 in its own copy-in location, making the
765 variable no longer really shared. */
766 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
767 {
768 omp_context *up;
769
770 for (up = shared_ctx->outer; up; up = up->outer)
d9c194cb 771 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
7c8f7639
JJ
772 break;
773
d9c194cb 774 if (up)
7c8f7639
JJ
775 {
776 tree c;
777
726a989a 778 for (c = gimple_omp_taskreg_clauses (up->stmt);
7c8f7639
JJ
779 c; c = OMP_CLAUSE_CHAIN (c))
780 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
781 && OMP_CLAUSE_DECL (c) == decl)
782 break;
783
784 if (c)
25142650 785 goto maybe_mark_addressable_and_ret;
7c8f7639
JJ
786 }
787 }
a68ab351
JJ
788
789 /* For tasks avoid using copy-in/out, unless they are readonly
790 (in which case just copy-in is used). As tasks can be
791 deferred or executed in different thread, when GOMP_task
792 returns, the task hasn't necessarily terminated. */
793 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
794 {
25142650
JJ
795 tree outer;
796 maybe_mark_addressable_and_ret:
797 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
a68ab351
JJ
798 if (is_gimple_reg (outer))
799 {
800 /* Taking address of OUTER in lower_send_shared_vars
801 might need regimplification of everything that uses the
802 variable. */
803 if (!task_shared_vars)
804 task_shared_vars = BITMAP_ALLOC (NULL);
805 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
806 TREE_ADDRESSABLE (outer) = 1;
807 }
808 return true;
809 }
953ff289
DN
810 }
811
812 return false;
813}
814
917948d3 815/* Create a new VAR_DECL and copy information from VAR to it. */
953ff289 816
917948d3
ZD
817tree
818copy_var_decl (tree var, tree name, tree type)
953ff289 819{
c2255bc4 820 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
953ff289
DN
821
822 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
917948d3 823 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
0890b981 824 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
953ff289
DN
825 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
826 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
917948d3 827 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
953ff289 828 TREE_USED (copy) = 1;
953ff289
DN
829 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
830
917948d3
ZD
831 return copy;
832}
833
834/* Construct a new automatic decl similar to VAR. */
835
836static tree
837omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
838{
839 tree copy = copy_var_decl (var, name, type);
840
841 DECL_CONTEXT (copy) = current_function_decl;
910ad8de 842 DECL_CHAIN (copy) = ctx->block_vars;
953ff289
DN
843 ctx->block_vars = copy;
844
845 return copy;
846}
847
848static tree
849omp_copy_decl_1 (tree var, omp_context *ctx)
850{
851 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
852}
853
854/* Build tree nodes to access the field for VAR on the receiver side. */
855
856static tree
857build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
858{
859 tree x, field = lookup_field (var, ctx);
860
861 /* If the receiver record type was remapped in the child function,
862 remap the field into the new record type. */
863 x = maybe_lookup_field (field, ctx);
864 if (x != NULL)
865 field = x;
866
70f34814 867 x = build_simple_mem_ref (ctx->receiver_decl);
953ff289
DN
868 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
869 if (by_ref)
70f34814 870 x = build_simple_mem_ref (x);
953ff289
DN
871
872 return x;
873}
874
875/* Build tree nodes to access VAR in the scope outer to CTX. In the case
876 of a parallel, this is a component reference; for workshare constructs
877 this is some variable. */
878
879static tree
880build_outer_var_ref (tree var, omp_context *ctx)
881{
882 tree x;
883
8ca5b2a2 884 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
953ff289
DN
885 x = var;
886 else if (is_variable_sized (var))
887 {
888 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
889 x = build_outer_var_ref (x, ctx);
70f34814 890 x = build_simple_mem_ref (x);
953ff289 891 }
a68ab351 892 else if (is_taskreg_ctx (ctx))
953ff289 893 {
7c8f7639 894 bool by_ref = use_pointer_for_field (var, NULL);
953ff289
DN
895 x = build_receiver_ref (var, by_ref, ctx);
896 }
897 else if (ctx->outer)
898 x = lookup_decl (var, ctx->outer);
eeb1d9e0
JJ
899 else if (is_reference (var))
900 /* This can happen with orphaned constructs. If var is reference, it is
901 possible it is shared and as such valid. */
902 x = var;
953ff289
DN
903 else
904 gcc_unreachable ();
905
906 if (is_reference (var))
70f34814 907 x = build_simple_mem_ref (x);
953ff289
DN
908
909 return x;
910}
911
912/* Build tree nodes to access the field for VAR on the sender side. */
913
914static tree
915build_sender_ref (tree var, omp_context *ctx)
916{
a68ab351 917 tree field = lookup_sfield (var, ctx);
953ff289
DN
918 return build3 (COMPONENT_REF, TREE_TYPE (field),
919 ctx->sender_decl, field, NULL);
920}
921
922/* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
923
924static void
a68ab351 925install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
953ff289 926{
a68ab351 927 tree field, type, sfield = NULL_TREE;
953ff289 928
a68ab351
JJ
929 gcc_assert ((mask & 1) == 0
930 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
931 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
932 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
953ff289
DN
933
934 type = TREE_TYPE (var);
935 if (by_ref)
936 type = build_pointer_type (type);
a68ab351
JJ
937 else if ((mask & 3) == 1 && is_reference (var))
938 type = TREE_TYPE (type);
953ff289 939
c2255bc4
AH
940 field = build_decl (DECL_SOURCE_LOCATION (var),
941 FIELD_DECL, DECL_NAME (var), type);
953ff289
DN
942
943 /* Remember what variable this field was created for. This does have a
944 side effect of making dwarf2out ignore this member, so for helpful
945 debugging we clear it later in delete_omp_context. */
946 DECL_ABSTRACT_ORIGIN (field) = var;
a68ab351
JJ
947 if (type == TREE_TYPE (var))
948 {
949 DECL_ALIGN (field) = DECL_ALIGN (var);
950 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
951 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
952 }
953 else
954 DECL_ALIGN (field) = TYPE_ALIGN (type);
953ff289 955
a68ab351
JJ
956 if ((mask & 3) == 3)
957 {
958 insert_field_into_struct (ctx->record_type, field);
959 if (ctx->srecord_type)
960 {
c2255bc4
AH
961 sfield = build_decl (DECL_SOURCE_LOCATION (var),
962 FIELD_DECL, DECL_NAME (var), type);
a68ab351
JJ
963 DECL_ABSTRACT_ORIGIN (sfield) = var;
964 DECL_ALIGN (sfield) = DECL_ALIGN (field);
965 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
966 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
967 insert_field_into_struct (ctx->srecord_type, sfield);
968 }
969 }
970 else
971 {
972 if (ctx->srecord_type == NULL_TREE)
973 {
974 tree t;
975
976 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
977 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
978 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
979 {
c2255bc4
AH
980 sfield = build_decl (DECL_SOURCE_LOCATION (var),
981 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
a68ab351
JJ
982 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
983 insert_field_into_struct (ctx->srecord_type, sfield);
984 splay_tree_insert (ctx->sfield_map,
985 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
986 (splay_tree_value) sfield);
987 }
988 }
989 sfield = field;
990 insert_field_into_struct ((mask & 1) ? ctx->record_type
991 : ctx->srecord_type, field);
992 }
953ff289 993
a68ab351
JJ
994 if (mask & 1)
995 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
996 (splay_tree_value) field);
997 if ((mask & 2) && ctx->sfield_map)
998 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
999 (splay_tree_value) sfield);
953ff289
DN
1000}
1001
1002static tree
1003install_var_local (tree var, omp_context *ctx)
1004{
1005 tree new_var = omp_copy_decl_1 (var, ctx);
1006 insert_decl_map (&ctx->cb, var, new_var);
1007 return new_var;
1008}
1009
1010/* Adjust the replacement for DECL in CTX for the new context. This means
1011 copying the DECL_VALUE_EXPR, and fixing up the type. */
1012
1013static void
1014fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1015{
1016 tree new_decl, size;
1017
1018 new_decl = lookup_decl (decl, ctx);
1019
1020 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1021
1022 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1023 && DECL_HAS_VALUE_EXPR_P (decl))
1024 {
1025 tree ve = DECL_VALUE_EXPR (decl);
726a989a 1026 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
953ff289
DN
1027 SET_DECL_VALUE_EXPR (new_decl, ve);
1028 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1029 }
1030
1031 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1032 {
1033 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1034 if (size == error_mark_node)
1035 size = TYPE_SIZE (TREE_TYPE (new_decl));
1036 DECL_SIZE (new_decl) = size;
1037
1038 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1039 if (size == error_mark_node)
1040 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1041 DECL_SIZE_UNIT (new_decl) = size;
1042 }
1043}
1044
1045/* The callback for remap_decl. Search all containing contexts for a
1046 mapping of the variable; this avoids having to duplicate the splay
1047 tree ahead of time. We know a mapping doesn't already exist in the
1048 given context. Create new mappings to implement default semantics. */
1049
1050static tree
1051omp_copy_decl (tree var, copy_body_data *cb)
1052{
1053 omp_context *ctx = (omp_context *) cb;
1054 tree new_var;
1055
953ff289
DN
1056 if (TREE_CODE (var) == LABEL_DECL)
1057 {
c2255bc4 1058 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
50674e96 1059 DECL_CONTEXT (new_var) = current_function_decl;
953ff289
DN
1060 insert_decl_map (&ctx->cb, var, new_var);
1061 return new_var;
1062 }
1063
a68ab351 1064 while (!is_taskreg_ctx (ctx))
953ff289
DN
1065 {
1066 ctx = ctx->outer;
1067 if (ctx == NULL)
1068 return var;
1069 new_var = maybe_lookup_decl (var, ctx);
1070 if (new_var)
1071 return new_var;
1072 }
1073
8ca5b2a2
JJ
1074 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1075 return var;
1076
953ff289
DN
1077 return error_mark_node;
1078}
1079
50674e96
DN
1080
1081/* Return the parallel region associated with STMT. */
1082
50674e96
DN
1083/* Debugging dumps for parallel regions. */
1084void dump_omp_region (FILE *, struct omp_region *, int);
1085void debug_omp_region (struct omp_region *);
1086void debug_all_omp_regions (void);
1087
1088/* Dump the parallel region tree rooted at REGION. */
1089
1090void
1091dump_omp_region (FILE *file, struct omp_region *region, int indent)
1092{
777f7f9a 1093 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
726a989a 1094 gimple_code_name[region->type]);
50674e96
DN
1095
1096 if (region->inner)
1097 dump_omp_region (file, region->inner, indent + 4);
1098
777f7f9a
RH
1099 if (region->cont)
1100 {
726a989a 1101 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
777f7f9a
RH
1102 region->cont->index);
1103 }
b8698a0f 1104
50674e96 1105 if (region->exit)
726a989a 1106 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
777f7f9a 1107 region->exit->index);
50674e96 1108 else
777f7f9a 1109 fprintf (file, "%*s[no exit marker]\n", indent, "");
50674e96
DN
1110
1111 if (region->next)
777f7f9a 1112 dump_omp_region (file, region->next, indent);
50674e96
DN
1113}
1114
24e47c76 1115DEBUG_FUNCTION void
50674e96
DN
1116debug_omp_region (struct omp_region *region)
1117{
1118 dump_omp_region (stderr, region, 0);
1119}
1120
24e47c76 1121DEBUG_FUNCTION void
50674e96
DN
1122debug_all_omp_regions (void)
1123{
1124 dump_omp_region (stderr, root_omp_region, 0);
1125}
1126
1127
1128/* Create a new parallel region starting at STMT inside region PARENT. */
1129
777f7f9a 1130struct omp_region *
726a989a
RB
1131new_omp_region (basic_block bb, enum gimple_code type,
1132 struct omp_region *parent)
50674e96 1133{
d3bfe4de 1134 struct omp_region *region = XCNEW (struct omp_region);
50674e96
DN
1135
1136 region->outer = parent;
777f7f9a
RH
1137 region->entry = bb;
1138 region->type = type;
50674e96
DN
1139
1140 if (parent)
1141 {
1142 /* This is a nested region. Add it to the list of inner
1143 regions in PARENT. */
1144 region->next = parent->inner;
1145 parent->inner = region;
1146 }
777f7f9a 1147 else
50674e96
DN
1148 {
1149 /* This is a toplevel region. Add it to the list of toplevel
1150 regions in ROOT_OMP_REGION. */
1151 region->next = root_omp_region;
1152 root_omp_region = region;
1153 }
777f7f9a
RH
1154
1155 return region;
1156}
1157
1158/* Release the memory associated with the region tree rooted at REGION. */
1159
1160static void
1161free_omp_region_1 (struct omp_region *region)
1162{
1163 struct omp_region *i, *n;
1164
1165 for (i = region->inner; i ; i = n)
50674e96 1166 {
777f7f9a
RH
1167 n = i->next;
1168 free_omp_region_1 (i);
50674e96
DN
1169 }
1170
777f7f9a
RH
1171 free (region);
1172}
50674e96 1173
777f7f9a
RH
1174/* Release the memory for the entire omp region tree. */
1175
1176void
1177free_omp_regions (void)
1178{
1179 struct omp_region *r, *n;
1180 for (r = root_omp_region; r ; r = n)
1181 {
1182 n = r->next;
1183 free_omp_region_1 (r);
1184 }
1185 root_omp_region = NULL;
50674e96
DN
1186}
1187
1188
953ff289
DN
1189/* Create a new context, with OUTER_CTX being the surrounding context. */
1190
1191static omp_context *
726a989a 1192new_omp_context (gimple stmt, omp_context *outer_ctx)
953ff289
DN
1193{
1194 omp_context *ctx = XCNEW (omp_context);
1195
1196 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1197 (splay_tree_value) ctx);
1198 ctx->stmt = stmt;
1199
1200 if (outer_ctx)
1201 {
1202 ctx->outer = outer_ctx;
1203 ctx->cb = outer_ctx->cb;
1204 ctx->cb.block = NULL;
1205 ctx->depth = outer_ctx->depth + 1;
1206 }
1207 else
1208 {
1209 ctx->cb.src_fn = current_function_decl;
1210 ctx->cb.dst_fn = current_function_decl;
fe660d7b
MJ
1211 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1212 gcc_checking_assert (ctx->cb.src_node);
953ff289
DN
1213 ctx->cb.dst_node = ctx->cb.src_node;
1214 ctx->cb.src_cfun = cfun;
1215 ctx->cb.copy_decl = omp_copy_decl;
1d65f45c 1216 ctx->cb.eh_lp_nr = 0;
953ff289
DN
1217 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1218 ctx->depth = 1;
1219 }
1220
6be42dd4 1221 ctx->cb.decl_map = pointer_map_create ();
953ff289
DN
1222
1223 return ctx;
1224}
1225
726a989a 1226static gimple_seq maybe_catch_exception (gimple_seq);
2368a460
JJ
1227
1228/* Finalize task copyfn. */
1229
1230static void
726a989a 1231finalize_task_copyfn (gimple task_stmt)
2368a460
JJ
1232{
1233 struct function *child_cfun;
1234 tree child_fn, old_fn;
726a989a
RB
1235 gimple_seq seq, new_seq;
1236 gimple bind;
2368a460 1237
726a989a 1238 child_fn = gimple_omp_task_copy_fn (task_stmt);
2368a460
JJ
1239 if (child_fn == NULL_TREE)
1240 return;
1241
1242 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1243
1244 /* Inform the callgraph about the new function. */
1245 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1246 = cfun->curr_properties;
1247
1248 old_fn = current_function_decl;
1249 push_cfun (child_cfun);
1250 current_function_decl = child_fn;
726a989a
RB
1251 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1252 seq = gimple_seq_alloc ();
1253 gimple_seq_add_stmt (&seq, bind);
1254 new_seq = maybe_catch_exception (seq);
1255 if (new_seq != seq)
1256 {
1257 bind = gimple_build_bind (NULL, new_seq, NULL);
1258 seq = gimple_seq_alloc ();
1259 gimple_seq_add_stmt (&seq, bind);
1260 }
1261 gimple_set_body (child_fn, seq);
2368a460
JJ
1262 pop_cfun ();
1263 current_function_decl = old_fn;
1264
1265 cgraph_add_new_function (child_fn, false);
1266}
1267
953ff289
DN
1268/* Destroy a omp_context data structures. Called through the splay tree
1269 value delete callback. */
1270
1271static void
1272delete_omp_context (splay_tree_value value)
1273{
1274 omp_context *ctx = (omp_context *) value;
1275
6be42dd4 1276 pointer_map_destroy (ctx->cb.decl_map);
953ff289
DN
1277
1278 if (ctx->field_map)
1279 splay_tree_delete (ctx->field_map);
a68ab351
JJ
1280 if (ctx->sfield_map)
1281 splay_tree_delete (ctx->sfield_map);
953ff289
DN
1282
1283 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1284 it produces corrupt debug information. */
1285 if (ctx->record_type)
1286 {
1287 tree t;
910ad8de 1288 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
953ff289
DN
1289 DECL_ABSTRACT_ORIGIN (t) = NULL;
1290 }
a68ab351
JJ
1291 if (ctx->srecord_type)
1292 {
1293 tree t;
910ad8de 1294 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
a68ab351
JJ
1295 DECL_ABSTRACT_ORIGIN (t) = NULL;
1296 }
953ff289 1297
2368a460
JJ
1298 if (is_task_ctx (ctx))
1299 finalize_task_copyfn (ctx->stmt);
1300
953ff289
DN
1301 XDELETE (ctx);
1302}
1303
1304/* Fix up RECEIVER_DECL with a type that has been remapped to the child
1305 context. */
1306
1307static void
1308fixup_child_record_type (omp_context *ctx)
1309{
1310 tree f, type = ctx->record_type;
1311
1312 /* ??? It isn't sufficient to just call remap_type here, because
1313 variably_modified_type_p doesn't work the way we expect for
1314 record types. Testing each field for whether it needs remapping
1315 and creating a new record by hand works, however. */
910ad8de 1316 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
953ff289
DN
1317 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1318 break;
1319 if (f)
1320 {
1321 tree name, new_fields = NULL;
1322
1323 type = lang_hooks.types.make_type (RECORD_TYPE);
1324 name = DECL_NAME (TYPE_NAME (ctx->record_type));
c2255bc4
AH
1325 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1326 TYPE_DECL, name, type);
953ff289
DN
1327 TYPE_NAME (type) = name;
1328
910ad8de 1329 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
953ff289
DN
1330 {
1331 tree new_f = copy_node (f);
1332 DECL_CONTEXT (new_f) = type;
1333 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
910ad8de 1334 DECL_CHAIN (new_f) = new_fields;
726a989a
RB
1335 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1336 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1337 &ctx->cb, NULL);
1338 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1339 &ctx->cb, NULL);
953ff289
DN
1340 new_fields = new_f;
1341
1342 /* Arrange to be able to look up the receiver field
1343 given the sender field. */
1344 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1345 (splay_tree_value) new_f);
1346 }
1347 TYPE_FIELDS (type) = nreverse (new_fields);
1348 layout_type (type);
1349 }
1350
1351 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1352}
1353
1354/* Instantiate decls as necessary in CTX to satisfy the data sharing
1355 specified by CLAUSES. */
1356
1357static void
1358scan_sharing_clauses (tree clauses, omp_context *ctx)
1359{
1360 tree c, decl;
1361 bool scan_array_reductions = false;
1362
1363 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1364 {
1365 bool by_ref;
1366
aaf46ef9 1367 switch (OMP_CLAUSE_CODE (c))
953ff289
DN
1368 {
1369 case OMP_CLAUSE_PRIVATE:
1370 decl = OMP_CLAUSE_DECL (c);
a68ab351
JJ
1371 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1372 goto do_private;
1373 else if (!is_variable_sized (decl))
953ff289
DN
1374 install_var_local (decl, ctx);
1375 break;
1376
1377 case OMP_CLAUSE_SHARED:
a68ab351 1378 gcc_assert (is_taskreg_ctx (ctx));
953ff289 1379 decl = OMP_CLAUSE_DECL (c);
5da250fc
JJ
1380 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1381 || !is_variable_sized (decl));
8ca5b2a2
JJ
1382 /* Global variables don't need to be copied,
1383 the receiver side will use them directly. */
1384 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1385 break;
a68ab351 1386 by_ref = use_pointer_for_field (decl, ctx);
953ff289
DN
1387 if (! TREE_READONLY (decl)
1388 || TREE_ADDRESSABLE (decl)
1389 || by_ref
1390 || is_reference (decl))
1391 {
a68ab351 1392 install_var_field (decl, by_ref, 3, ctx);
953ff289
DN
1393 install_var_local (decl, ctx);
1394 break;
1395 }
1396 /* We don't need to copy const scalar vars back. */
aaf46ef9 1397 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
953ff289
DN
1398 goto do_private;
1399
1400 case OMP_CLAUSE_LASTPRIVATE:
1401 /* Let the corresponding firstprivate clause create
1402 the variable. */
1403 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1404 break;
1405 /* FALLTHRU */
1406
1407 case OMP_CLAUSE_FIRSTPRIVATE:
1408 case OMP_CLAUSE_REDUCTION:
1409 decl = OMP_CLAUSE_DECL (c);
1410 do_private:
1411 if (is_variable_sized (decl))
953ff289 1412 {
a68ab351
JJ
1413 if (is_task_ctx (ctx))
1414 install_var_field (decl, false, 1, ctx);
1415 break;
1416 }
1417 else if (is_taskreg_ctx (ctx))
1418 {
1419 bool global
1420 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
7c8f7639 1421 by_ref = use_pointer_for_field (decl, NULL);
a68ab351
JJ
1422
1423 if (is_task_ctx (ctx)
1424 && (global || by_ref || is_reference (decl)))
1425 {
1426 install_var_field (decl, false, 1, ctx);
1427 if (!global)
1428 install_var_field (decl, by_ref, 2, ctx);
1429 }
1430 else if (!global)
1431 install_var_field (decl, by_ref, 3, ctx);
953ff289
DN
1432 }
1433 install_var_local (decl, ctx);
1434 break;
1435
1436 case OMP_CLAUSE_COPYPRIVATE:
953ff289
DN
1437 case OMP_CLAUSE_COPYIN:
1438 decl = OMP_CLAUSE_DECL (c);
7c8f7639 1439 by_ref = use_pointer_for_field (decl, NULL);
a68ab351 1440 install_var_field (decl, by_ref, 3, ctx);
953ff289
DN
1441 break;
1442
1443 case OMP_CLAUSE_DEFAULT:
1444 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1445 break;
1446
20906c66 1447 case OMP_CLAUSE_FINAL:
953ff289
DN
1448 case OMP_CLAUSE_IF:
1449 case OMP_CLAUSE_NUM_THREADS:
1450 case OMP_CLAUSE_SCHEDULE:
1451 if (ctx->outer)
726a989a 1452 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
953ff289
DN
1453 break;
1454
1455 case OMP_CLAUSE_NOWAIT:
1456 case OMP_CLAUSE_ORDERED:
a68ab351
JJ
1457 case OMP_CLAUSE_COLLAPSE:
1458 case OMP_CLAUSE_UNTIED:
20906c66 1459 case OMP_CLAUSE_MERGEABLE:
953ff289
DN
1460 break;
1461
1462 default:
1463 gcc_unreachable ();
1464 }
1465 }
1466
1467 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1468 {
aaf46ef9 1469 switch (OMP_CLAUSE_CODE (c))
953ff289
DN
1470 {
1471 case OMP_CLAUSE_LASTPRIVATE:
1472 /* Let the corresponding firstprivate clause create
1473 the variable. */
726a989a 1474 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
a68ab351 1475 scan_array_reductions = true;
953ff289
DN
1476 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1477 break;
1478 /* FALLTHRU */
1479
1480 case OMP_CLAUSE_PRIVATE:
1481 case OMP_CLAUSE_FIRSTPRIVATE:
1482 case OMP_CLAUSE_REDUCTION:
1483 decl = OMP_CLAUSE_DECL (c);
1484 if (is_variable_sized (decl))
1485 install_var_local (decl, ctx);
1486 fixup_remapped_decl (decl, ctx,
aaf46ef9 1487 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
953ff289 1488 && OMP_CLAUSE_PRIVATE_DEBUG (c));
aaf46ef9 1489 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
953ff289
DN
1490 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1491 scan_array_reductions = true;
1492 break;
1493
1494 case OMP_CLAUSE_SHARED:
1495 decl = OMP_CLAUSE_DECL (c);
8ca5b2a2
JJ
1496 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1497 fixup_remapped_decl (decl, ctx, false);
953ff289
DN
1498 break;
1499
1500 case OMP_CLAUSE_COPYPRIVATE:
1501 case OMP_CLAUSE_COPYIN:
1502 case OMP_CLAUSE_DEFAULT:
1503 case OMP_CLAUSE_IF:
1504 case OMP_CLAUSE_NUM_THREADS:
1505 case OMP_CLAUSE_SCHEDULE:
1506 case OMP_CLAUSE_NOWAIT:
1507 case OMP_CLAUSE_ORDERED:
a68ab351
JJ
1508 case OMP_CLAUSE_COLLAPSE:
1509 case OMP_CLAUSE_UNTIED:
20906c66
JJ
1510 case OMP_CLAUSE_FINAL:
1511 case OMP_CLAUSE_MERGEABLE:
953ff289
DN
1512 break;
1513
1514 default:
1515 gcc_unreachable ();
1516 }
1517 }
1518
1519 if (scan_array_reductions)
1520 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
aaf46ef9 1521 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
953ff289
DN
1522 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1523 {
726a989a
RB
1524 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1525 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
953ff289 1526 }
a68ab351 1527 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
726a989a
RB
1528 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1529 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
953ff289
DN
1530}
1531
1532/* Create a new name for omp child function. Returns an identifier. */
1533
1534static GTY(()) unsigned int tmp_ompfn_id_num;
1535
1536static tree
a68ab351 1537create_omp_child_function_name (bool task_copy)
953ff289 1538{
036546e5
JH
1539 return (clone_function_name (current_function_decl,
1540 task_copy ? "_omp_cpyfn" : "_omp_fn"));
953ff289
DN
1541}
1542
1543/* Build a decl for the omp child function. It'll not contain a body
1544 yet, just the bare decl. */
1545
1546static void
a68ab351 1547create_omp_child_function (omp_context *ctx, bool task_copy)
953ff289
DN
1548{
1549 tree decl, type, name, t;
1550
a68ab351
JJ
1551 name = create_omp_child_function_name (task_copy);
1552 if (task_copy)
1553 type = build_function_type_list (void_type_node, ptr_type_node,
1554 ptr_type_node, NULL_TREE);
1555 else
1556 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
953ff289 1557
c2255bc4
AH
1558 decl = build_decl (gimple_location (ctx->stmt),
1559 FUNCTION_DECL, name, type);
953ff289 1560
a68ab351
JJ
1561 if (!task_copy)
1562 ctx->cb.dst_fn = decl;
1563 else
726a989a 1564 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
953ff289
DN
1565
1566 TREE_STATIC (decl) = 1;
1567 TREE_USED (decl) = 1;
1568 DECL_ARTIFICIAL (decl) = 1;
cd3f04c8 1569 DECL_NAMELESS (decl) = 1;
953ff289
DN
1570 DECL_IGNORED_P (decl) = 0;
1571 TREE_PUBLIC (decl) = 0;
1572 DECL_UNINLINABLE (decl) = 1;
1573 DECL_EXTERNAL (decl) = 0;
1574 DECL_CONTEXT (decl) = NULL_TREE;
50674e96 1575 DECL_INITIAL (decl) = make_node (BLOCK);
953ff289 1576
c2255bc4
AH
1577 t = build_decl (DECL_SOURCE_LOCATION (decl),
1578 RESULT_DECL, NULL_TREE, void_type_node);
953ff289
DN
1579 DECL_ARTIFICIAL (t) = 1;
1580 DECL_IGNORED_P (t) = 1;
07485407 1581 DECL_CONTEXT (t) = decl;
953ff289
DN
1582 DECL_RESULT (decl) = t;
1583
c2255bc4
AH
1584 t = build_decl (DECL_SOURCE_LOCATION (decl),
1585 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
953ff289 1586 DECL_ARTIFICIAL (t) = 1;
cd3f04c8 1587 DECL_NAMELESS (t) = 1;
953ff289 1588 DECL_ARG_TYPE (t) = ptr_type_node;
50674e96 1589 DECL_CONTEXT (t) = current_function_decl;
953ff289
DN
1590 TREE_USED (t) = 1;
1591 DECL_ARGUMENTS (decl) = t;
a68ab351
JJ
1592 if (!task_copy)
1593 ctx->receiver_decl = t;
1594 else
1595 {
c2255bc4
AH
1596 t = build_decl (DECL_SOURCE_LOCATION (decl),
1597 PARM_DECL, get_identifier (".omp_data_o"),
a68ab351
JJ
1598 ptr_type_node);
1599 DECL_ARTIFICIAL (t) = 1;
cd3f04c8 1600 DECL_NAMELESS (t) = 1;
a68ab351
JJ
1601 DECL_ARG_TYPE (t) = ptr_type_node;
1602 DECL_CONTEXT (t) = current_function_decl;
1603 TREE_USED (t) = 1;
628c189e 1604 TREE_ADDRESSABLE (t) = 1;
910ad8de 1605 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
a68ab351
JJ
1606 DECL_ARGUMENTS (decl) = t;
1607 }
953ff289 1608
b8698a0f 1609 /* Allocate memory for the function structure. The call to
50674e96 1610 allocate_struct_function clobbers CFUN, so we need to restore
953ff289 1611 it afterward. */
db2960f4 1612 push_struct_function (decl);
726a989a 1613 cfun->function_end_locus = gimple_location (ctx->stmt);
db2960f4 1614 pop_cfun ();
953ff289
DN
1615}
1616
953ff289
DN
1617
1618/* Scan an OpenMP parallel directive. */
1619
1620static void
726a989a 1621scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
953ff289
DN
1622{
1623 omp_context *ctx;
1624 tree name;
726a989a 1625 gimple stmt = gsi_stmt (*gsi);
953ff289
DN
1626
1627 /* Ignore parallel directives with empty bodies, unless there
1628 are copyin clauses. */
1629 if (optimize > 0
726a989a
RB
1630 && empty_body_p (gimple_omp_body (stmt))
1631 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1632 OMP_CLAUSE_COPYIN) == NULL)
953ff289 1633 {
726a989a 1634 gsi_replace (gsi, gimple_build_nop (), false);
953ff289
DN
1635 return;
1636 }
1637
726a989a 1638 ctx = new_omp_context (stmt, outer_ctx);
a68ab351 1639 if (taskreg_nesting_level > 1)
50674e96 1640 ctx->is_nested = true;
953ff289 1641 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
953ff289
DN
1642 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1643 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
953ff289 1644 name = create_tmp_var_name (".omp_data_s");
c2255bc4
AH
1645 name = build_decl (gimple_location (stmt),
1646 TYPE_DECL, name, ctx->record_type);
cd3f04c8
JJ
1647 DECL_ARTIFICIAL (name) = 1;
1648 DECL_NAMELESS (name) = 1;
953ff289 1649 TYPE_NAME (ctx->record_type) = name;
a68ab351 1650 create_omp_child_function (ctx, false);
726a989a 1651 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
953ff289 1652
726a989a
RB
1653 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1654 scan_omp (gimple_omp_body (stmt), ctx);
953ff289
DN
1655
1656 if (TYPE_FIELDS (ctx->record_type) == NULL)
1657 ctx->record_type = ctx->receiver_decl = NULL;
1658 else
1659 {
1660 layout_type (ctx->record_type);
1661 fixup_child_record_type (ctx);
1662 }
1663}
1664
a68ab351
JJ
1665/* Scan an OpenMP task directive. */
1666
1667static void
726a989a 1668scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
a68ab351
JJ
1669{
1670 omp_context *ctx;
726a989a
RB
1671 tree name, t;
1672 gimple stmt = gsi_stmt (*gsi);
db3927fb 1673 location_t loc = gimple_location (stmt);
a68ab351
JJ
1674
1675 /* Ignore task directives with empty bodies. */
1676 if (optimize > 0
726a989a 1677 && empty_body_p (gimple_omp_body (stmt)))
a68ab351 1678 {
726a989a 1679 gsi_replace (gsi, gimple_build_nop (), false);
a68ab351
JJ
1680 return;
1681 }
1682
726a989a 1683 ctx = new_omp_context (stmt, outer_ctx);
a68ab351
JJ
1684 if (taskreg_nesting_level > 1)
1685 ctx->is_nested = true;
1686 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1687 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1688 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1689 name = create_tmp_var_name (".omp_data_s");
c2255bc4
AH
1690 name = build_decl (gimple_location (stmt),
1691 TYPE_DECL, name, ctx->record_type);
cd3f04c8
JJ
1692 DECL_ARTIFICIAL (name) = 1;
1693 DECL_NAMELESS (name) = 1;
a68ab351
JJ
1694 TYPE_NAME (ctx->record_type) = name;
1695 create_omp_child_function (ctx, false);
726a989a 1696 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
a68ab351 1697
726a989a 1698 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
a68ab351
JJ
1699
1700 if (ctx->srecord_type)
1701 {
1702 name = create_tmp_var_name (".omp_data_a");
c2255bc4
AH
1703 name = build_decl (gimple_location (stmt),
1704 TYPE_DECL, name, ctx->srecord_type);
cd3f04c8
JJ
1705 DECL_ARTIFICIAL (name) = 1;
1706 DECL_NAMELESS (name) = 1;
a68ab351
JJ
1707 TYPE_NAME (ctx->srecord_type) = name;
1708 create_omp_child_function (ctx, true);
1709 }
1710
726a989a 1711 scan_omp (gimple_omp_body (stmt), ctx);
a68ab351
JJ
1712
1713 if (TYPE_FIELDS (ctx->record_type) == NULL)
1714 {
1715 ctx->record_type = ctx->receiver_decl = NULL;
726a989a
RB
1716 t = build_int_cst (long_integer_type_node, 0);
1717 gimple_omp_task_set_arg_size (stmt, t);
1718 t = build_int_cst (long_integer_type_node, 1);
1719 gimple_omp_task_set_arg_align (stmt, t);
a68ab351
JJ
1720 }
1721 else
1722 {
1723 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1724 /* Move VLA fields to the end. */
1725 p = &TYPE_FIELDS (ctx->record_type);
1726 while (*p)
1727 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1728 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1729 {
1730 *q = *p;
1731 *p = TREE_CHAIN (*p);
1732 TREE_CHAIN (*q) = NULL_TREE;
1733 q = &TREE_CHAIN (*q);
1734 }
1735 else
910ad8de 1736 p = &DECL_CHAIN (*p);
a68ab351
JJ
1737 *p = vla_fields;
1738 layout_type (ctx->record_type);
1739 fixup_child_record_type (ctx);
1740 if (ctx->srecord_type)
1741 layout_type (ctx->srecord_type);
db3927fb 1742 t = fold_convert_loc (loc, long_integer_type_node,
a68ab351 1743 TYPE_SIZE_UNIT (ctx->record_type));
726a989a
RB
1744 gimple_omp_task_set_arg_size (stmt, t);
1745 t = build_int_cst (long_integer_type_node,
a68ab351 1746 TYPE_ALIGN_UNIT (ctx->record_type));
726a989a 1747 gimple_omp_task_set_arg_align (stmt, t);
a68ab351
JJ
1748 }
1749}
1750
953ff289 1751
50674e96 1752/* Scan an OpenMP loop directive. */
953ff289
DN
1753
1754static void
726a989a 1755scan_omp_for (gimple stmt, omp_context *outer_ctx)
953ff289 1756{
50674e96 1757 omp_context *ctx;
726a989a 1758 size_t i;
953ff289 1759
50674e96 1760 ctx = new_omp_context (stmt, outer_ctx);
953ff289 1761
726a989a 1762 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
953ff289 1763
726a989a
RB
1764 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1765 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
a68ab351 1766 {
726a989a
RB
1767 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1768 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1769 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1770 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
a68ab351 1771 }
726a989a 1772 scan_omp (gimple_omp_body (stmt), ctx);
953ff289
DN
1773}
1774
1775/* Scan an OpenMP sections directive. */
1776
1777static void
726a989a 1778scan_omp_sections (gimple stmt, omp_context *outer_ctx)
953ff289 1779{
953ff289
DN
1780 omp_context *ctx;
1781
1782 ctx = new_omp_context (stmt, outer_ctx);
726a989a
RB
1783 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1784 scan_omp (gimple_omp_body (stmt), ctx);
953ff289
DN
1785}
1786
1787/* Scan an OpenMP single directive. */
1788
1789static void
726a989a 1790scan_omp_single (gimple stmt, omp_context *outer_ctx)
953ff289 1791{
953ff289
DN
1792 omp_context *ctx;
1793 tree name;
1794
1795 ctx = new_omp_context (stmt, outer_ctx);
1796 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1797 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1798 name = create_tmp_var_name (".omp_copy_s");
c2255bc4
AH
1799 name = build_decl (gimple_location (stmt),
1800 TYPE_DECL, name, ctx->record_type);
953ff289
DN
1801 TYPE_NAME (ctx->record_type) = name;
1802
726a989a
RB
1803 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1804 scan_omp (gimple_omp_body (stmt), ctx);
953ff289
DN
1805
1806 if (TYPE_FIELDS (ctx->record_type) == NULL)
1807 ctx->record_type = NULL;
1808 else
1809 layout_type (ctx->record_type);
1810}
1811
953ff289 1812
a6fc8e21
JJ
1813/* Check OpenMP nesting restrictions. */
1814static void
726a989a 1815check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
a6fc8e21 1816{
726a989a 1817 switch (gimple_code (stmt))
a6fc8e21 1818 {
726a989a
RB
1819 case GIMPLE_OMP_FOR:
1820 case GIMPLE_OMP_SECTIONS:
1821 case GIMPLE_OMP_SINGLE:
1822 case GIMPLE_CALL:
a6fc8e21 1823 for (; ctx != NULL; ctx = ctx->outer)
726a989a 1824 switch (gimple_code (ctx->stmt))
a6fc8e21 1825 {
726a989a
RB
1826 case GIMPLE_OMP_FOR:
1827 case GIMPLE_OMP_SECTIONS:
1828 case GIMPLE_OMP_SINGLE:
1829 case GIMPLE_OMP_ORDERED:
1830 case GIMPLE_OMP_MASTER:
1831 case GIMPLE_OMP_TASK:
1832 if (is_gimple_call (stmt))
a68ab351
JJ
1833 {
1834 warning (0, "barrier region may not be closely nested inside "
1835 "of work-sharing, critical, ordered, master or "
1836 "explicit task region");
1837 return;
1838 }
a6fc8e21 1839 warning (0, "work-sharing region may not be closely nested inside "
a68ab351
JJ
1840 "of work-sharing, critical, ordered, master or explicit "
1841 "task region");
a6fc8e21 1842 return;
726a989a 1843 case GIMPLE_OMP_PARALLEL:
a6fc8e21
JJ
1844 return;
1845 default:
1846 break;
1847 }
1848 break;
726a989a 1849 case GIMPLE_OMP_MASTER:
a6fc8e21 1850 for (; ctx != NULL; ctx = ctx->outer)
726a989a 1851 switch (gimple_code (ctx->stmt))
a6fc8e21 1852 {
726a989a
RB
1853 case GIMPLE_OMP_FOR:
1854 case GIMPLE_OMP_SECTIONS:
1855 case GIMPLE_OMP_SINGLE:
1856 case GIMPLE_OMP_TASK:
a6fc8e21 1857 warning (0, "master region may not be closely nested inside "
a68ab351 1858 "of work-sharing or explicit task region");
a6fc8e21 1859 return;
726a989a 1860 case GIMPLE_OMP_PARALLEL:
a6fc8e21
JJ
1861 return;
1862 default:
1863 break;
1864 }
1865 break;
726a989a 1866 case GIMPLE_OMP_ORDERED:
a6fc8e21 1867 for (; ctx != NULL; ctx = ctx->outer)
726a989a 1868 switch (gimple_code (ctx->stmt))
a6fc8e21 1869 {
726a989a
RB
1870 case GIMPLE_OMP_CRITICAL:
1871 case GIMPLE_OMP_TASK:
a6fc8e21 1872 warning (0, "ordered region may not be closely nested inside "
a68ab351 1873 "of critical or explicit task region");
a6fc8e21 1874 return;
726a989a
RB
1875 case GIMPLE_OMP_FOR:
1876 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
a6fc8e21
JJ
1877 OMP_CLAUSE_ORDERED) == NULL)
1878 warning (0, "ordered region must be closely nested inside "
1879 "a loop region with an ordered clause");
1880 return;
726a989a 1881 case GIMPLE_OMP_PARALLEL:
a6fc8e21
JJ
1882 return;
1883 default:
1884 break;
1885 }
1886 break;
726a989a 1887 case GIMPLE_OMP_CRITICAL:
a6fc8e21 1888 for (; ctx != NULL; ctx = ctx->outer)
726a989a
RB
1889 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1890 && (gimple_omp_critical_name (stmt)
1891 == gimple_omp_critical_name (ctx->stmt)))
a6fc8e21
JJ
1892 {
1893 warning (0, "critical region may not be nested inside a critical "
1894 "region with the same name");
1895 return;
1896 }
1897 break;
1898 default:
1899 break;
1900 }
1901}
1902
1903
726a989a
RB
1904/* Helper function scan_omp.
1905
1906 Callback for walk_tree or operators in walk_gimple_stmt used to
1907 scan for OpenMP directives in TP. */
953ff289
DN
1908
1909static tree
726a989a 1910scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
953ff289 1911{
d3bfe4de
KG
1912 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1913 omp_context *ctx = (omp_context *) wi->info;
953ff289
DN
1914 tree t = *tp;
1915
726a989a
RB
1916 switch (TREE_CODE (t))
1917 {
1918 case VAR_DECL:
1919 case PARM_DECL:
1920 case LABEL_DECL:
1921 case RESULT_DECL:
1922 if (ctx)
1923 *tp = remap_decl (t, &ctx->cb);
1924 break;
1925
1926 default:
1927 if (ctx && TYPE_P (t))
1928 *tp = remap_type (t, &ctx->cb);
1929 else if (!DECL_P (t))
a900ae6b
JJ
1930 {
1931 *walk_subtrees = 1;
1932 if (ctx)
70f34814
RG
1933 {
1934 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1935 if (tem != TREE_TYPE (t))
1936 {
1937 if (TREE_CODE (t) == INTEGER_CST)
1938 *tp = build_int_cst_wide (tem,
1939 TREE_INT_CST_LOW (t),
1940 TREE_INT_CST_HIGH (t));
1941 else
1942 TREE_TYPE (t) = tem;
1943 }
1944 }
a900ae6b 1945 }
726a989a
RB
1946 break;
1947 }
1948
1949 return NULL_TREE;
1950}
1951
1952
1953/* Helper function for scan_omp.
1954
1955 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1956 the current statement in GSI. */
1957
1958static tree
1959scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1960 struct walk_stmt_info *wi)
1961{
1962 gimple stmt = gsi_stmt (*gsi);
1963 omp_context *ctx = (omp_context *) wi->info;
1964
1965 if (gimple_has_location (stmt))
1966 input_location = gimple_location (stmt);
953ff289 1967
a6fc8e21 1968 /* Check the OpenMP nesting restrictions. */
a68ab351
JJ
1969 if (ctx != NULL)
1970 {
726a989a
RB
1971 if (is_gimple_omp (stmt))
1972 check_omp_nesting_restrictions (stmt, ctx);
1973 else if (is_gimple_call (stmt))
a68ab351 1974 {
726a989a 1975 tree fndecl = gimple_call_fndecl (stmt);
a68ab351
JJ
1976 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1977 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
726a989a 1978 check_omp_nesting_restrictions (stmt, ctx);
a68ab351
JJ
1979 }
1980 }
a6fc8e21 1981
726a989a
RB
1982 *handled_ops_p = true;
1983
1984 switch (gimple_code (stmt))
953ff289 1985 {
726a989a 1986 case GIMPLE_OMP_PARALLEL:
a68ab351 1987 taskreg_nesting_level++;
726a989a 1988 scan_omp_parallel (gsi, ctx);
a68ab351
JJ
1989 taskreg_nesting_level--;
1990 break;
1991
726a989a 1992 case GIMPLE_OMP_TASK:
a68ab351 1993 taskreg_nesting_level++;
726a989a 1994 scan_omp_task (gsi, ctx);
a68ab351 1995 taskreg_nesting_level--;
953ff289
DN
1996 break;
1997
726a989a
RB
1998 case GIMPLE_OMP_FOR:
1999 scan_omp_for (stmt, ctx);
953ff289
DN
2000 break;
2001
726a989a
RB
2002 case GIMPLE_OMP_SECTIONS:
2003 scan_omp_sections (stmt, ctx);
953ff289
DN
2004 break;
2005
726a989a
RB
2006 case GIMPLE_OMP_SINGLE:
2007 scan_omp_single (stmt, ctx);
953ff289
DN
2008 break;
2009
726a989a
RB
2010 case GIMPLE_OMP_SECTION:
2011 case GIMPLE_OMP_MASTER:
2012 case GIMPLE_OMP_ORDERED:
2013 case GIMPLE_OMP_CRITICAL:
2014 ctx = new_omp_context (stmt, ctx);
2015 scan_omp (gimple_omp_body (stmt), ctx);
953ff289
DN
2016 break;
2017
726a989a 2018 case GIMPLE_BIND:
953ff289
DN
2019 {
2020 tree var;
953ff289 2021
726a989a
RB
2022 *handled_ops_p = false;
2023 if (ctx)
910ad8de 2024 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
726a989a 2025 insert_decl_map (&ctx->cb, var, var);
953ff289
DN
2026 }
2027 break;
953ff289 2028 default:
726a989a 2029 *handled_ops_p = false;
953ff289
DN
2030 break;
2031 }
2032
2033 return NULL_TREE;
2034}
2035
2036
726a989a
RB
2037/* Scan all the statements starting at the current statement. CTX
2038 contains context information about the OpenMP directives and
2039 clauses found during the scan. */
953ff289
DN
2040
2041static void
726a989a 2042scan_omp (gimple_seq body, omp_context *ctx)
953ff289
DN
2043{
2044 location_t saved_location;
2045 struct walk_stmt_info wi;
2046
2047 memset (&wi, 0, sizeof (wi));
953ff289 2048 wi.info = ctx;
953ff289
DN
2049 wi.want_locations = true;
2050
2051 saved_location = input_location;
726a989a 2052 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
953ff289
DN
2053 input_location = saved_location;
2054}
2055\f
2056/* Re-gimplification and code generation routines. */
2057
2058/* Build a call to GOMP_barrier. */
2059
917948d3
ZD
2060static tree
2061build_omp_barrier (void)
953ff289 2062{
e79983f4 2063 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
953ff289
DN
2064}
2065
2066/* If a context was created for STMT when it was scanned, return it. */
2067
2068static omp_context *
726a989a 2069maybe_lookup_ctx (gimple stmt)
953ff289
DN
2070{
2071 splay_tree_node n;
2072 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2073 return n ? (omp_context *) n->value : NULL;
2074}
2075
50674e96
DN
2076
2077/* Find the mapping for DECL in CTX or the immediately enclosing
2078 context that has a mapping for DECL.
2079
2080 If CTX is a nested parallel directive, we may have to use the decl
2081 mappings created in CTX's parent context. Suppose that we have the
2082 following parallel nesting (variable UIDs showed for clarity):
2083
2084 iD.1562 = 0;
2085 #omp parallel shared(iD.1562) -> outer parallel
2086 iD.1562 = iD.1562 + 1;
2087
2088 #omp parallel shared (iD.1562) -> inner parallel
2089 iD.1562 = iD.1562 - 1;
2090
2091 Each parallel structure will create a distinct .omp_data_s structure
2092 for copying iD.1562 in/out of the directive:
2093
2094 outer parallel .omp_data_s.1.i -> iD.1562
2095 inner parallel .omp_data_s.2.i -> iD.1562
2096
2097 A shared variable mapping will produce a copy-out operation before
2098 the parallel directive and a copy-in operation after it. So, in
2099 this case we would have:
2100
2101 iD.1562 = 0;
2102 .omp_data_o.1.i = iD.1562;
2103 #omp parallel shared(iD.1562) -> outer parallel
2104 .omp_data_i.1 = &.omp_data_o.1
2105 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2106
2107 .omp_data_o.2.i = iD.1562; -> **
2108 #omp parallel shared(iD.1562) -> inner parallel
2109 .omp_data_i.2 = &.omp_data_o.2
2110 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2111
2112
2113 ** This is a problem. The symbol iD.1562 cannot be referenced
2114 inside the body of the outer parallel region. But since we are
2115 emitting this copy operation while expanding the inner parallel
2116 directive, we need to access the CTX structure of the outer
2117 parallel directive to get the correct mapping:
2118
2119 .omp_data_o.2.i = .omp_data_i.1->i
2120
2121 Since there may be other workshare or parallel directives enclosing
2122 the parallel directive, it may be necessary to walk up the context
2123 parent chain. This is not a problem in general because nested
2124 parallelism happens only rarely. */
2125
2126static tree
2127lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2128{
2129 tree t;
2130 omp_context *up;
2131
50674e96
DN
2132 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2133 t = maybe_lookup_decl (decl, up);
2134
d2dda7fe 2135 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
50674e96 2136
64964499 2137 return t ? t : decl;
50674e96
DN
2138}
2139
2140
8ca5b2a2
JJ
2141/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2142 in outer contexts. */
2143
2144static tree
2145maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2146{
2147 tree t = NULL;
2148 omp_context *up;
2149
d2dda7fe
JJ
2150 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2151 t = maybe_lookup_decl (decl, up);
8ca5b2a2
JJ
2152
2153 return t ? t : decl;
2154}
2155
2156
953ff289
DN
2157/* Construct the initialization value for reduction CLAUSE. */
2158
2159tree
2160omp_reduction_init (tree clause, tree type)
2161{
db3927fb 2162 location_t loc = OMP_CLAUSE_LOCATION (clause);
953ff289
DN
2163 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2164 {
2165 case PLUS_EXPR:
2166 case MINUS_EXPR:
2167 case BIT_IOR_EXPR:
2168 case BIT_XOR_EXPR:
2169 case TRUTH_OR_EXPR:
2170 case TRUTH_ORIF_EXPR:
2171 case TRUTH_XOR_EXPR:
2172 case NE_EXPR:
e8160c9a 2173 return build_zero_cst (type);
953ff289
DN
2174
2175 case MULT_EXPR:
2176 case TRUTH_AND_EXPR:
2177 case TRUTH_ANDIF_EXPR:
2178 case EQ_EXPR:
db3927fb 2179 return fold_convert_loc (loc, type, integer_one_node);
953ff289
DN
2180
2181 case BIT_AND_EXPR:
db3927fb 2182 return fold_convert_loc (loc, type, integer_minus_one_node);
953ff289
DN
2183
2184 case MAX_EXPR:
2185 if (SCALAR_FLOAT_TYPE_P (type))
2186 {
2187 REAL_VALUE_TYPE max, min;
2188 if (HONOR_INFINITIES (TYPE_MODE (type)))
2189 {
2190 real_inf (&max);
2191 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2192 }
2193 else
2194 real_maxval (&min, 1, TYPE_MODE (type));
2195 return build_real (type, min);
2196 }
2197 else
2198 {
2199 gcc_assert (INTEGRAL_TYPE_P (type));
2200 return TYPE_MIN_VALUE (type);
2201 }
2202
2203 case MIN_EXPR:
2204 if (SCALAR_FLOAT_TYPE_P (type))
2205 {
2206 REAL_VALUE_TYPE max;
2207 if (HONOR_INFINITIES (TYPE_MODE (type)))
2208 real_inf (&max);
2209 else
2210 real_maxval (&max, 0, TYPE_MODE (type));
2211 return build_real (type, max);
2212 }
2213 else
2214 {
2215 gcc_assert (INTEGRAL_TYPE_P (type));
2216 return TYPE_MAX_VALUE (type);
2217 }
2218
2219 default:
2220 gcc_unreachable ();
2221 }
2222}
2223
2224/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2225 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2226 private variables. Initialization statements go in ILIST, while calls
2227 to destructors go in DLIST. */
2228
2229static void
726a989a 2230lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3d55c64b 2231 omp_context *ctx)
953ff289 2232{
726a989a 2233 gimple_stmt_iterator diter;
5039610b 2234 tree c, dtor, copyin_seq, x, ptr;
953ff289 2235 bool copyin_by_ref = false;
8ca5b2a2 2236 bool lastprivate_firstprivate = false;
953ff289
DN
2237 int pass;
2238
726a989a
RB
2239 *dlist = gimple_seq_alloc ();
2240 diter = gsi_start (*dlist);
953ff289
DN
2241 copyin_seq = NULL;
2242
2243 /* Do all the fixed sized types in the first pass, and the variable sized
2244 types in the second pass. This makes sure that the scalar arguments to
b8698a0f 2245 the variable sized types are processed before we use them in the
953ff289
DN
2246 variable sized operations. */
2247 for (pass = 0; pass < 2; ++pass)
2248 {
2249 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2250 {
aaf46ef9 2251 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
953ff289
DN
2252 tree var, new_var;
2253 bool by_ref;
db3927fb 2254 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
953ff289
DN
2255
2256 switch (c_kind)
2257 {
2258 case OMP_CLAUSE_PRIVATE:
2259 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2260 continue;
2261 break;
2262 case OMP_CLAUSE_SHARED:
8ca5b2a2
JJ
2263 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2264 {
2265 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2266 continue;
2267 }
953ff289 2268 case OMP_CLAUSE_FIRSTPRIVATE:
953ff289
DN
2269 case OMP_CLAUSE_COPYIN:
2270 case OMP_CLAUSE_REDUCTION:
2271 break;
077b0dfb 2272 case OMP_CLAUSE_LASTPRIVATE:
8ca5b2a2
JJ
2273 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2274 {
2275 lastprivate_firstprivate = true;
2276 if (pass != 0)
2277 continue;
2278 }
077b0dfb 2279 break;
953ff289
DN
2280 default:
2281 continue;
2282 }
2283
2284 new_var = var = OMP_CLAUSE_DECL (c);
2285 if (c_kind != OMP_CLAUSE_COPYIN)
2286 new_var = lookup_decl (var, ctx);
2287
2288 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2289 {
2290 if (pass != 0)
2291 continue;
2292 }
953ff289
DN
2293 else if (is_variable_sized (var))
2294 {
50674e96
DN
2295 /* For variable sized types, we need to allocate the
2296 actual storage here. Call alloca and store the
2297 result in the pointer decl that we created elsewhere. */
953ff289
DN
2298 if (pass == 0)
2299 continue;
2300
a68ab351
JJ
2301 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2302 {
726a989a 2303 gimple stmt;
e79983f4 2304 tree tmp, atmp;
726a989a 2305
a68ab351
JJ
2306 ptr = DECL_VALUE_EXPR (new_var);
2307 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2308 ptr = TREE_OPERAND (ptr, 0);
2309 gcc_assert (DECL_P (ptr));
2310 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
726a989a
RB
2311
2312 /* void *tmp = __builtin_alloca */
e79983f4
MM
2313 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2314 stmt = gimple_build_call (atmp, 1, x);
726a989a
RB
2315 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2316 gimple_add_tmp_var (tmp);
2317 gimple_call_set_lhs (stmt, tmp);
2318
2319 gimple_seq_add_stmt (ilist, stmt);
2320
db3927fb 2321 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
726a989a 2322 gimplify_assign (ptr, x, ilist);
a68ab351 2323 }
953ff289 2324 }
953ff289
DN
2325 else if (is_reference (var))
2326 {
50674e96
DN
2327 /* For references that are being privatized for Fortran,
2328 allocate new backing storage for the new pointer
2329 variable. This allows us to avoid changing all the
2330 code that expects a pointer to something that expects
2331 a direct variable. Note that this doesn't apply to
2332 C++, since reference types are disallowed in data
077b0dfb
JJ
2333 sharing clauses there, except for NRV optimized
2334 return values. */
953ff289
DN
2335 if (pass == 0)
2336 continue;
2337
2338 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
a68ab351
JJ
2339 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2340 {
2341 x = build_receiver_ref (var, false, ctx);
db3927fb 2342 x = build_fold_addr_expr_loc (clause_loc, x);
a68ab351
JJ
2343 }
2344 else if (TREE_CONSTANT (x))
953ff289
DN
2345 {
2346 const char *name = NULL;
2347 if (DECL_NAME (var))
2348 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2349
077b0dfb
JJ
2350 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2351 name);
2352 gimple_add_tmp_var (x);
628c189e 2353 TREE_ADDRESSABLE (x) = 1;
db3927fb 2354 x = build_fold_addr_expr_loc (clause_loc, x);
953ff289
DN
2355 }
2356 else
2357 {
e79983f4
MM
2358 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2359 x = build_call_expr_loc (clause_loc, atmp, 1, x);
953ff289
DN
2360 }
2361
db3927fb 2362 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
726a989a 2363 gimplify_assign (new_var, x, ilist);
953ff289 2364
70f34814 2365 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
953ff289
DN
2366 }
2367 else if (c_kind == OMP_CLAUSE_REDUCTION
2368 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2369 {
2370 if (pass == 0)
2371 continue;
2372 }
2373 else if (pass != 0)
2374 continue;
2375
aaf46ef9 2376 switch (OMP_CLAUSE_CODE (c))
953ff289
DN
2377 {
2378 case OMP_CLAUSE_SHARED:
8ca5b2a2
JJ
2379 /* Shared global vars are just accessed directly. */
2380 if (is_global_var (new_var))
2381 break;
953ff289
DN
2382 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2383 needs to be delayed until after fixup_child_record_type so
2384 that we get the correct type during the dereference. */
7c8f7639 2385 by_ref = use_pointer_for_field (var, ctx);
953ff289
DN
2386 x = build_receiver_ref (var, by_ref, ctx);
2387 SET_DECL_VALUE_EXPR (new_var, x);
2388 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2389
2390 /* ??? If VAR is not passed by reference, and the variable
2391 hasn't been initialized yet, then we'll get a warning for
2392 the store into the omp_data_s structure. Ideally, we'd be
b8698a0f 2393 able to notice this and not store anything at all, but
953ff289
DN
2394 we're generating code too early. Suppress the warning. */
2395 if (!by_ref)
2396 TREE_NO_WARNING (var) = 1;
2397 break;
2398
2399 case OMP_CLAUSE_LASTPRIVATE:
2400 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2401 break;
2402 /* FALLTHRU */
2403
2404 case OMP_CLAUSE_PRIVATE:
a68ab351
JJ
2405 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2406 x = build_outer_var_ref (var, ctx);
2407 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2408 {
2409 if (is_task_ctx (ctx))
2410 x = build_receiver_ref (var, false, ctx);
2411 else
2412 x = build_outer_var_ref (var, ctx);
2413 }
2414 else
2415 x = NULL;
2416 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
953ff289
DN
2417 if (x)
2418 gimplify_and_add (x, ilist);
2419 /* FALLTHRU */
2420
2421 do_dtor:
2422 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2423 if (x)
2424 {
726a989a
RB
2425 gimple_seq tseq = NULL;
2426
953ff289 2427 dtor = x;
726a989a
RB
2428 gimplify_stmt (&dtor, &tseq);
2429 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
953ff289
DN
2430 }
2431 break;
2432
2433 case OMP_CLAUSE_FIRSTPRIVATE:
a68ab351
JJ
2434 if (is_task_ctx (ctx))
2435 {
2436 if (is_reference (var) || is_variable_sized (var))
2437 goto do_dtor;
2438 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2439 ctx))
2440 || use_pointer_for_field (var, NULL))
2441 {
2442 x = build_receiver_ref (var, false, ctx);
2443 SET_DECL_VALUE_EXPR (new_var, x);
2444 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2445 goto do_dtor;
2446 }
2447 }
953ff289
DN
2448 x = build_outer_var_ref (var, ctx);
2449 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2450 gimplify_and_add (x, ilist);
2451 goto do_dtor;
2452 break;
2453
2454 case OMP_CLAUSE_COPYIN:
7c8f7639 2455 by_ref = use_pointer_for_field (var, NULL);
953ff289
DN
2456 x = build_receiver_ref (var, by_ref, ctx);
2457 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2458 append_to_statement_list (x, &copyin_seq);
2459 copyin_by_ref |= by_ref;
2460 break;
2461
2462 case OMP_CLAUSE_REDUCTION:
2463 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2464 {
a68ab351
JJ
2465 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2466 x = build_outer_var_ref (var, ctx);
2467
2468 if (is_reference (var))
db3927fb 2469 x = build_fold_addr_expr_loc (clause_loc, x);
a68ab351
JJ
2470 SET_DECL_VALUE_EXPR (placeholder, x);
2471 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
726a989a
RB
2472 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2473 gimple_seq_add_seq (ilist,
2474 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2475 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
a68ab351 2476 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
953ff289
DN
2477 }
2478 else
2479 {
2480 x = omp_reduction_init (c, TREE_TYPE (new_var));
2481 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
726a989a 2482 gimplify_assign (new_var, x, ilist);
953ff289
DN
2483 }
2484 break;
2485
2486 default:
2487 gcc_unreachable ();
2488 }
2489 }
2490 }
2491
2492 /* The copyin sequence is not to be executed by the main thread, since
2493 that would result in self-copies. Perhaps not visible to scalars,
2494 but it certainly is to C++ operator=. */
2495 if (copyin_seq)
2496 {
e79983f4
MM
2497 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2498 0);
953ff289
DN
2499 x = build2 (NE_EXPR, boolean_type_node, x,
2500 build_int_cst (TREE_TYPE (x), 0));
2501 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2502 gimplify_and_add (x, ilist);
2503 }
2504
2505 /* If any copyin variable is passed by reference, we must ensure the
2506 master thread doesn't modify it before it is copied over in all
8ca5b2a2
JJ
2507 threads. Similarly for variables in both firstprivate and
2508 lastprivate clauses we need to ensure the lastprivate copying
2509 happens after firstprivate copying in all threads. */
2510 if (copyin_by_ref || lastprivate_firstprivate)
917948d3 2511 gimplify_and_add (build_omp_barrier (), ilist);
953ff289
DN
2512}
2513
50674e96 2514
953ff289
DN
2515/* Generate code to implement the LASTPRIVATE clauses. This is used for
2516 both parallel and workshare constructs. PREDICATE may be NULL if it's
2517 always true. */
2518
2519static void
726a989a
RB
2520lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2521 omp_context *ctx)
953ff289 2522{
726a989a 2523 tree x, c, label = NULL;
a68ab351 2524 bool par_clauses = false;
953ff289
DN
2525
2526 /* Early exit if there are no lastprivate clauses. */
2527 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2528 if (clauses == NULL)
2529 {
2530 /* If this was a workshare clause, see if it had been combined
2531 with its parallel. In that case, look for the clauses on the
2532 parallel statement itself. */
2533 if (is_parallel_ctx (ctx))
2534 return;
2535
2536 ctx = ctx->outer;
2537 if (ctx == NULL || !is_parallel_ctx (ctx))
2538 return;
2539
726a989a 2540 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
953ff289
DN
2541 OMP_CLAUSE_LASTPRIVATE);
2542 if (clauses == NULL)
2543 return;
a68ab351 2544 par_clauses = true;
953ff289
DN
2545 }
2546
726a989a
RB
2547 if (predicate)
2548 {
2549 gimple stmt;
2550 tree label_true, arm1, arm2;
2551
c2255bc4
AH
2552 label = create_artificial_label (UNKNOWN_LOCATION);
2553 label_true = create_artificial_label (UNKNOWN_LOCATION);
726a989a
RB
2554 arm1 = TREE_OPERAND (predicate, 0);
2555 arm2 = TREE_OPERAND (predicate, 1);
2556 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2557 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2558 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2559 label_true, label);
2560 gimple_seq_add_stmt (stmt_list, stmt);
2561 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2562 }
953ff289 2563
a68ab351 2564 for (c = clauses; c ;)
953ff289
DN
2565 {
2566 tree var, new_var;
db3927fb 2567 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
953ff289 2568
a68ab351
JJ
2569 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2570 {
2571 var = OMP_CLAUSE_DECL (c);
2572 new_var = lookup_decl (var, ctx);
953ff289 2573
726a989a
RB
2574 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2575 {
2576 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2577 gimple_seq_add_seq (stmt_list,
2578 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2579 }
2580 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
953ff289 2581
a68ab351
JJ
2582 x = build_outer_var_ref (var, ctx);
2583 if (is_reference (var))
70f34814 2584 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
a68ab351 2585 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
726a989a 2586 gimplify_and_add (x, stmt_list);
a68ab351
JJ
2587 }
2588 c = OMP_CLAUSE_CHAIN (c);
2589 if (c == NULL && !par_clauses)
2590 {
2591 /* If this was a workshare clause, see if it had been combined
2592 with its parallel. In that case, continue looking for the
2593 clauses also on the parallel statement itself. */
2594 if (is_parallel_ctx (ctx))
2595 break;
2596
2597 ctx = ctx->outer;
2598 if (ctx == NULL || !is_parallel_ctx (ctx))
2599 break;
2600
726a989a 2601 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
a68ab351
JJ
2602 OMP_CLAUSE_LASTPRIVATE);
2603 par_clauses = true;
2604 }
953ff289
DN
2605 }
2606
726a989a
RB
2607 if (label)
2608 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
953ff289
DN
2609}
2610
50674e96 2611
953ff289
DN
2612/* Generate code to implement the REDUCTION clauses. */
2613
2614static void
726a989a 2615lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
953ff289 2616{
726a989a
RB
2617 gimple_seq sub_seq = NULL;
2618 gimple stmt;
2619 tree x, c;
953ff289
DN
2620 int count = 0;
2621
2622 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2623 update in that case, otherwise use a lock. */
2624 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
aaf46ef9 2625 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
953ff289
DN
2626 {
2627 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2628 {
2629 /* Never use OMP_ATOMIC for array reductions. */
2630 count = -1;
2631 break;
2632 }
2633 count++;
2634 }
2635
2636 if (count == 0)
2637 return;
2638
2639 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2640 {
2641 tree var, ref, new_var;
2642 enum tree_code code;
db3927fb 2643 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
953ff289 2644
aaf46ef9 2645 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
953ff289
DN
2646 continue;
2647
2648 var = OMP_CLAUSE_DECL (c);
2649 new_var = lookup_decl (var, ctx);
2650 if (is_reference (var))
70f34814 2651 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
953ff289
DN
2652 ref = build_outer_var_ref (var, ctx);
2653 code = OMP_CLAUSE_REDUCTION_CODE (c);
50674e96
DN
2654
2655 /* reduction(-:var) sums up the partial results, so it acts
2656 identically to reduction(+:var). */
953ff289
DN
2657 if (code == MINUS_EXPR)
2658 code = PLUS_EXPR;
2659
2660 if (count == 1)
2661 {
db3927fb 2662 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
953ff289
DN
2663
2664 addr = save_expr (addr);
2665 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
db3927fb 2666 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
953ff289 2667 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
726a989a 2668 gimplify_and_add (x, stmt_seqp);
953ff289
DN
2669 return;
2670 }
2671
2672 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2673 {
2674 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2675
2676 if (is_reference (var))
db3927fb 2677 ref = build_fold_addr_expr_loc (clause_loc, ref);
953ff289
DN
2678 SET_DECL_VALUE_EXPR (placeholder, ref);
2679 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
726a989a
RB
2680 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2681 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2682 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
953ff289
DN
2683 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2684 }
2685 else
2686 {
2687 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2688 ref = build_outer_var_ref (var, ctx);
726a989a 2689 gimplify_assign (ref, x, &sub_seq);
953ff289
DN
2690 }
2691 }
2692
e79983f4
MM
2693 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
2694 0);
726a989a 2695 gimple_seq_add_stmt (stmt_seqp, stmt);
953ff289 2696
726a989a 2697 gimple_seq_add_seq (stmt_seqp, sub_seq);
953ff289 2698
e79983f4
MM
2699 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
2700 0);
726a989a 2701 gimple_seq_add_stmt (stmt_seqp, stmt);
953ff289
DN
2702}
2703
50674e96 2704
953ff289
DN
2705/* Generate code to implement the COPYPRIVATE clauses. */
2706
2707static void
726a989a 2708lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
953ff289
DN
2709 omp_context *ctx)
2710{
2711 tree c;
2712
2713 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2714 {
78db7d92 2715 tree var, new_var, ref, x;
953ff289 2716 bool by_ref;
db3927fb 2717 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
953ff289 2718
aaf46ef9 2719 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
953ff289
DN
2720 continue;
2721
2722 var = OMP_CLAUSE_DECL (c);
7c8f7639 2723 by_ref = use_pointer_for_field (var, NULL);
953ff289
DN
2724
2725 ref = build_sender_ref (var, ctx);
78db7d92
JJ
2726 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2727 if (by_ref)
2728 {
2729 x = build_fold_addr_expr_loc (clause_loc, new_var);
2730 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2731 }
726a989a 2732 gimplify_assign (ref, x, slist);
953ff289 2733
78db7d92
JJ
2734 ref = build_receiver_ref (var, false, ctx);
2735 if (by_ref)
2736 {
2737 ref = fold_convert_loc (clause_loc,
2738 build_pointer_type (TREE_TYPE (new_var)),
2739 ref);
2740 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2741 }
953ff289
DN
2742 if (is_reference (var))
2743 {
78db7d92 2744 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
70f34814
RG
2745 ref = build_simple_mem_ref_loc (clause_loc, ref);
2746 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
953ff289 2747 }
78db7d92 2748 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
953ff289
DN
2749 gimplify_and_add (x, rlist);
2750 }
2751}
2752
50674e96 2753
953ff289
DN
2754/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2755 and REDUCTION from the sender (aka parent) side. */
2756
2757static void
726a989a
RB
2758lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2759 omp_context *ctx)
953ff289
DN
2760{
2761 tree c;
2762
2763 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2764 {
50674e96 2765 tree val, ref, x, var;
953ff289 2766 bool by_ref, do_in = false, do_out = false;
db3927fb 2767 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
953ff289 2768
aaf46ef9 2769 switch (OMP_CLAUSE_CODE (c))
953ff289 2770 {
a68ab351
JJ
2771 case OMP_CLAUSE_PRIVATE:
2772 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2773 break;
2774 continue;
953ff289
DN
2775 case OMP_CLAUSE_FIRSTPRIVATE:
2776 case OMP_CLAUSE_COPYIN:
2777 case OMP_CLAUSE_LASTPRIVATE:
2778 case OMP_CLAUSE_REDUCTION:
2779 break;
2780 default:
2781 continue;
2782 }
2783
d2dda7fe
JJ
2784 val = OMP_CLAUSE_DECL (c);
2785 var = lookup_decl_in_outer_ctx (val, ctx);
50674e96 2786
8ca5b2a2
JJ
2787 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2788 && is_global_var (var))
2789 continue;
953ff289
DN
2790 if (is_variable_sized (val))
2791 continue;
7c8f7639 2792 by_ref = use_pointer_for_field (val, NULL);
953ff289 2793
aaf46ef9 2794 switch (OMP_CLAUSE_CODE (c))
953ff289 2795 {
a68ab351 2796 case OMP_CLAUSE_PRIVATE:
953ff289
DN
2797 case OMP_CLAUSE_FIRSTPRIVATE:
2798 case OMP_CLAUSE_COPYIN:
2799 do_in = true;
2800 break;
2801
2802 case OMP_CLAUSE_LASTPRIVATE:
2803 if (by_ref || is_reference (val))
2804 {
2805 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2806 continue;
2807 do_in = true;
2808 }
2809 else
a68ab351
JJ
2810 {
2811 do_out = true;
2812 if (lang_hooks.decls.omp_private_outer_ref (val))
2813 do_in = true;
2814 }
953ff289
DN
2815 break;
2816
2817 case OMP_CLAUSE_REDUCTION:
2818 do_in = true;
2819 do_out = !(by_ref || is_reference (val));
2820 break;
2821
2822 default:
2823 gcc_unreachable ();
2824 }
2825
2826 if (do_in)
2827 {
2828 ref = build_sender_ref (val, ctx);
db3927fb 2829 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
726a989a 2830 gimplify_assign (ref, x, ilist);
a68ab351
JJ
2831 if (is_task_ctx (ctx))
2832 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
953ff289 2833 }
50674e96 2834
953ff289
DN
2835 if (do_out)
2836 {
2837 ref = build_sender_ref (val, ctx);
726a989a 2838 gimplify_assign (var, ref, olist);
953ff289
DN
2839 }
2840 }
2841}
2842
726a989a
RB
2843/* Generate code to implement SHARED from the sender (aka parent)
2844 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2845 list things that got automatically shared. */
953ff289
DN
2846
2847static void
726a989a 2848lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
953ff289 2849{
a68ab351 2850 tree var, ovar, nvar, f, x, record_type;
953ff289
DN
2851
2852 if (ctx->record_type == NULL)
2853 return;
50674e96 2854
a68ab351 2855 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
910ad8de 2856 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
953ff289
DN
2857 {
2858 ovar = DECL_ABSTRACT_ORIGIN (f);
2859 nvar = maybe_lookup_decl (ovar, ctx);
2860 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2861 continue;
2862
50674e96
DN
2863 /* If CTX is a nested parallel directive. Find the immediately
2864 enclosing parallel or workshare construct that contains a
2865 mapping for OVAR. */
d2dda7fe 2866 var = lookup_decl_in_outer_ctx (ovar, ctx);
50674e96 2867
7c8f7639 2868 if (use_pointer_for_field (ovar, ctx))
953ff289
DN
2869 {
2870 x = build_sender_ref (ovar, ctx);
50674e96 2871 var = build_fold_addr_expr (var);
726a989a 2872 gimplify_assign (x, var, ilist);
953ff289
DN
2873 }
2874 else
2875 {
2876 x = build_sender_ref (ovar, ctx);
726a989a 2877 gimplify_assign (x, var, ilist);
953ff289 2878
14e5b285
RG
2879 if (!TREE_READONLY (var)
2880 /* We don't need to receive a new reference to a result
2881 or parm decl. In fact we may not store to it as we will
2882 invalidate any pending RSO and generate wrong gimple
2883 during inlining. */
2884 && !((TREE_CODE (var) == RESULT_DECL
2885 || TREE_CODE (var) == PARM_DECL)
2886 && DECL_BY_REFERENCE (var)))
a68ab351
JJ
2887 {
2888 x = build_sender_ref (ovar, ctx);
726a989a 2889 gimplify_assign (var, x, olist);
a68ab351 2890 }
953ff289
DN
2891 }
2892 }
2893}
2894
726a989a
RB
2895
2896/* A convenience function to build an empty GIMPLE_COND with just the
2897 condition. */
2898
2899static gimple
2900gimple_build_cond_empty (tree cond)
2901{
2902 enum tree_code pred_code;
2903 tree lhs, rhs;
2904
2905 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2906 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2907}
2908
2909
b8698a0f 2910/* Build the function calls to GOMP_parallel_start etc to actually
50674e96
DN
2911 generate the parallel operation. REGION is the parallel region
2912 being expanded. BB is the block where to insert the code. WS_ARGS
2913 will be set if this is a call to a combined parallel+workshare
2914 construct, it contains the list of additional arguments needed by
2915 the workshare construct. */
953ff289
DN
2916
2917static void
777f7f9a 2918expand_parallel_call (struct omp_region *region, basic_block bb,
3bb06db4 2919 gimple entry_stmt, VEC(tree,gc) *ws_args)
953ff289 2920{
917948d3 2921 tree t, t1, t2, val, cond, c, clauses;
726a989a
RB
2922 gimple_stmt_iterator gsi;
2923 gimple stmt;
e79983f4
MM
2924 enum built_in_function start_ix;
2925 int start_ix2;
db3927fb 2926 location_t clause_loc;
3bb06db4 2927 VEC(tree,gc) *args;
50674e96 2928
726a989a 2929 clauses = gimple_omp_parallel_clauses (entry_stmt);
50674e96 2930
c0220ea4 2931 /* Determine what flavor of GOMP_parallel_start we will be
50674e96
DN
2932 emitting. */
2933 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2934 if (is_combined_parallel (region))
2935 {
777f7f9a 2936 switch (region->inner->type)
50674e96 2937 {
726a989a 2938 case GIMPLE_OMP_FOR:
a68ab351 2939 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
e79983f4
MM
2940 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2941 + (region->inner->sched_kind
2942 == OMP_CLAUSE_SCHEDULE_RUNTIME
2943 ? 3 : region->inner->sched_kind));
2944 start_ix = (enum built_in_function)start_ix2;
777f7f9a 2945 break;
726a989a 2946 case GIMPLE_OMP_SECTIONS:
777f7f9a
RH
2947 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2948 break;
2949 default:
2950 gcc_unreachable ();
50674e96 2951 }
50674e96 2952 }
953ff289
DN
2953
2954 /* By default, the value of NUM_THREADS is zero (selected at run time)
2955 and there is no conditional. */
2956 cond = NULL_TREE;
2957 val = build_int_cst (unsigned_type_node, 0);
2958
2959 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2960 if (c)
2961 cond = OMP_CLAUSE_IF_EXPR (c);
2962
2963 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2964 if (c)
db3927fb
AH
2965 {
2966 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2967 clause_loc = OMP_CLAUSE_LOCATION (c);
2968 }
2969 else
2970 clause_loc = gimple_location (entry_stmt);
953ff289
DN
2971
2972 /* Ensure 'val' is of the correct type. */
db3927fb 2973 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
953ff289
DN
2974
2975 /* If we found the clause 'if (cond)', build either
2976 (cond != 0) or (cond ? val : 1u). */
2977 if (cond)
2978 {
726a989a 2979 gimple_stmt_iterator gsi;
50674e96
DN
2980
2981 cond = gimple_boolify (cond);
2982
953ff289 2983 if (integer_zerop (val))
db3927fb
AH
2984 val = fold_build2_loc (clause_loc,
2985 EQ_EXPR, unsigned_type_node, cond,
917948d3 2986 build_int_cst (TREE_TYPE (cond), 0));
953ff289 2987 else
50674e96
DN
2988 {
2989 basic_block cond_bb, then_bb, else_bb;
917948d3 2990 edge e, e_then, e_else;
726a989a 2991 tree tmp_then, tmp_else, tmp_join, tmp_var;
917948d3
ZD
2992
2993 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2994 if (gimple_in_ssa_p (cfun))
2995 {
726a989a
RB
2996 tmp_then = make_ssa_name (tmp_var, NULL);
2997 tmp_else = make_ssa_name (tmp_var, NULL);
2998 tmp_join = make_ssa_name (tmp_var, NULL);
917948d3
ZD
2999 }
3000 else
3001 {
3002 tmp_then = tmp_var;
3003 tmp_else = tmp_var;
3004 tmp_join = tmp_var;
3005 }
50674e96 3006
50674e96
DN
3007 e = split_block (bb, NULL);
3008 cond_bb = e->src;
3009 bb = e->dest;
3010 remove_edge (e);
3011
3012 then_bb = create_empty_bb (cond_bb);
3013 else_bb = create_empty_bb (then_bb);
917948d3
ZD
3014 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3015 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
50674e96 3016
726a989a
RB
3017 stmt = gimple_build_cond_empty (cond);
3018 gsi = gsi_start_bb (cond_bb);
3019 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
50674e96 3020
726a989a
RB
3021 gsi = gsi_start_bb (then_bb);
3022 stmt = gimple_build_assign (tmp_then, val);
3023 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
50674e96 3024
726a989a
RB
3025 gsi = gsi_start_bb (else_bb);
3026 stmt = gimple_build_assign
3027 (tmp_else, build_int_cst (unsigned_type_node, 1));
3028 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
50674e96
DN
3029
3030 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3031 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
917948d3
ZD
3032 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3033 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
50674e96 3034
917948d3
ZD
3035 if (gimple_in_ssa_p (cfun))
3036 {
726a989a 3037 gimple phi = create_phi_node (tmp_join, bb);
917948d3 3038 SSA_NAME_DEF_STMT (tmp_join) = phi;
f5045c96
AM
3039 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3040 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
917948d3
ZD
3041 }
3042
3043 val = tmp_join;
50674e96
DN
3044 }
3045
726a989a
RB
3046 gsi = gsi_start_bb (bb);
3047 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3048 false, GSI_CONTINUE_LINKING);
953ff289
DN
3049 }
3050
726a989a
RB
3051 gsi = gsi_last_bb (bb);
3052 t = gimple_omp_parallel_data_arg (entry_stmt);
953ff289 3053 if (t == NULL)
5039610b 3054 t1 = null_pointer_node;
953ff289 3055 else
5039610b 3056 t1 = build_fold_addr_expr (t);
726a989a 3057 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
50674e96 3058
3bb06db4
NF
3059 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3060 VEC_quick_push (tree, args, t2);
3061 VEC_quick_push (tree, args, t1);
3062 VEC_quick_push (tree, args, val);
3063 VEC_splice (tree, args, ws_args);
3064
3065 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
e79983f4 3066 builtin_decl_explicit (start_ix), args);
50674e96 3067
726a989a
RB
3068 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3069 false, GSI_CONTINUE_LINKING);
953ff289 3070
726a989a 3071 t = gimple_omp_parallel_data_arg (entry_stmt);
953ff289
DN
3072 if (t == NULL)
3073 t = null_pointer_node;
3074 else
3075 t = build_fold_addr_expr (t);
db3927fb
AH
3076 t = build_call_expr_loc (gimple_location (entry_stmt),
3077 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
726a989a
RB
3078 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3079 false, GSI_CONTINUE_LINKING);
953ff289 3080
db3927fb 3081 t = build_call_expr_loc (gimple_location (entry_stmt),
e79983f4
MM
3082 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3083 0);
726a989a
RB
3084 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3085 false, GSI_CONTINUE_LINKING);
953ff289
DN
3086}
3087
50674e96 3088
a68ab351
JJ
3089/* Build the function call to GOMP_task to actually
3090 generate the task operation. BB is the block where to insert the code. */
3091
3092static void
726a989a 3093expand_task_call (basic_block bb, gimple entry_stmt)
a68ab351 3094{
20906c66 3095 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
726a989a 3096 gimple_stmt_iterator gsi;
db3927fb 3097 location_t loc = gimple_location (entry_stmt);
a68ab351 3098
726a989a 3099 clauses = gimple_omp_task_clauses (entry_stmt);
a68ab351 3100
a68ab351
JJ
3101 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3102 if (c)
3103 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3104 else
3105 cond = boolean_true_node;
3106
3107 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
20906c66
JJ
3108 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3109 flags = build_int_cst (unsigned_type_node,
3110 (c ? 1 : 0) + (c2 ? 4 : 0));
3111
3112 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3113 if (c)
3114 {
3115 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3116 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3117 build_int_cst (unsigned_type_node, 2),
3118 build_int_cst (unsigned_type_node, 0));
3119 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3120 }
a68ab351 3121
726a989a
RB
3122 gsi = gsi_last_bb (bb);
3123 t = gimple_omp_task_data_arg (entry_stmt);
a68ab351
JJ
3124 if (t == NULL)
3125 t2 = null_pointer_node;
3126 else
db3927fb
AH
3127 t2 = build_fold_addr_expr_loc (loc, t);
3128 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
726a989a 3129 t = gimple_omp_task_copy_fn (entry_stmt);
a68ab351
JJ
3130 if (t == NULL)
3131 t3 = null_pointer_node;
3132 else
db3927fb 3133 t3 = build_fold_addr_expr_loc (loc, t);
a68ab351 3134
e79983f4
MM
3135 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3136 7, t1, t2, t3,
726a989a
RB
3137 gimple_omp_task_arg_size (entry_stmt),
3138 gimple_omp_task_arg_align (entry_stmt), cond, flags);
a68ab351 3139
726a989a
RB
3140 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3141 false, GSI_CONTINUE_LINKING);
a68ab351
JJ
3142}
3143
3144
726a989a
RB
3145/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3146 catch handler and return it. This prevents programs from violating the
3147 structured block semantics with throws. */
953ff289 3148
726a989a
RB
3149static gimple_seq
3150maybe_catch_exception (gimple_seq body)
953ff289 3151{
1d65f45c
RH
3152 gimple g;
3153 tree decl;
953ff289
DN
3154
3155 if (!flag_exceptions)
726a989a 3156 return body;
953ff289 3157
3b06d379
SB
3158 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3159 decl = lang_hooks.eh_protect_cleanup_actions ();
953ff289 3160 else
e79983f4 3161 decl = builtin_decl_explicit (BUILT_IN_TRAP);
726a989a 3162
1d65f45c
RH
3163 g = gimple_build_eh_must_not_throw (decl);
3164 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
726a989a 3165 GIMPLE_TRY_CATCH);
953ff289 3166
1d65f45c 3167 return gimple_seq_alloc_with_stmt (g);
953ff289
DN
3168}
3169
50674e96 3170/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
953ff289 3171
50674e96 3172static tree
c021f10b 3173vec2chain (VEC(tree,gc) *v)
953ff289 3174{
c021f10b
NF
3175 tree chain = NULL_TREE, t;
3176 unsigned ix;
953ff289 3177
c021f10b 3178 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
50674e96 3179 {
910ad8de 3180 DECL_CHAIN (t) = chain;
c021f10b 3181 chain = t;
50674e96 3182 }
953ff289 3183
c021f10b 3184 return chain;
50674e96 3185}
953ff289 3186
953ff289 3187
50674e96 3188/* Remove barriers in REGION->EXIT's block. Note that this is only
726a989a
RB
3189 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3190 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3191 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
50674e96 3192 removed. */
953ff289 3193
50674e96
DN
3194static void
3195remove_exit_barrier (struct omp_region *region)
3196{
726a989a 3197 gimple_stmt_iterator gsi;
50674e96 3198 basic_block exit_bb;
777f7f9a
RH
3199 edge_iterator ei;
3200 edge e;
726a989a 3201 gimple stmt;
03742a9b 3202 int any_addressable_vars = -1;
953ff289 3203
777f7f9a 3204 exit_bb = region->exit;
953ff289 3205
2aee3e57
JJ
3206 /* If the parallel region doesn't return, we don't have REGION->EXIT
3207 block at all. */
3208 if (! exit_bb)
3209 return;
3210
726a989a
RB
3211 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3212 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
777f7f9a
RH
3213 statements that can appear in between are extremely limited -- no
3214 memory operations at all. Here, we allow nothing at all, so the
726a989a
RB
3215 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3216 gsi = gsi_last_bb (exit_bb);
3217 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3218 gsi_prev (&gsi);
3219 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
50674e96 3220 return;
953ff289 3221
777f7f9a
RH
3222 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3223 {
726a989a
RB
3224 gsi = gsi_last_bb (e->src);
3225 if (gsi_end_p (gsi))
777f7f9a 3226 continue;
726a989a 3227 stmt = gsi_stmt (gsi);
03742a9b
JJ
3228 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3229 && !gimple_omp_return_nowait_p (stmt))
3230 {
3231 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3232 in many cases. If there could be tasks queued, the barrier
3233 might be needed to let the tasks run before some local
3234 variable of the parallel that the task uses as shared
3235 runs out of scope. The task can be spawned either
3236 from within current function (this would be easy to check)
3237 or from some function it calls and gets passed an address
3238 of such a variable. */
3239 if (any_addressable_vars < 0)
3240 {
3241 gimple parallel_stmt = last_stmt (region->entry);
3242 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
c021f10b
NF
3243 tree local_decls, block, decl;
3244 unsigned ix;
03742a9b
JJ
3245
3246 any_addressable_vars = 0;
c021f10b
NF
3247 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3248 if (TREE_ADDRESSABLE (decl))
03742a9b
JJ
3249 {
3250 any_addressable_vars = 1;
3251 break;
3252 }
3253 for (block = gimple_block (stmt);
3254 !any_addressable_vars
3255 && block
3256 && TREE_CODE (block) == BLOCK;
3257 block = BLOCK_SUPERCONTEXT (block))
3258 {
3259 for (local_decls = BLOCK_VARS (block);
3260 local_decls;
910ad8de 3261 local_decls = DECL_CHAIN (local_decls))
03742a9b
JJ
3262 if (TREE_ADDRESSABLE (local_decls))
3263 {
3264 any_addressable_vars = 1;
3265 break;
3266 }
3267 if (block == gimple_block (parallel_stmt))
3268 break;
3269 }
3270 }
3271 if (!any_addressable_vars)
3272 gimple_omp_return_set_nowait (stmt);
3273 }
777f7f9a 3274 }
953ff289
DN
3275}
3276
777f7f9a
RH
3277static void
3278remove_exit_barriers (struct omp_region *region)
3279{
726a989a 3280 if (region->type == GIMPLE_OMP_PARALLEL)
777f7f9a
RH
3281 remove_exit_barrier (region);
3282
3283 if (region->inner)
3284 {
3285 region = region->inner;
3286 remove_exit_barriers (region);
3287 while (region->next)
3288 {
3289 region = region->next;
3290 remove_exit_barriers (region);
3291 }
3292 }
3293}
50674e96 3294
2b4cf991
JJ
3295/* Optimize omp_get_thread_num () and omp_get_num_threads ()
3296 calls. These can't be declared as const functions, but
3297 within one parallel body they are constant, so they can be
3298 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
a68ab351
JJ
3299 which are declared const. Similarly for task body, except
3300 that in untied task omp_get_thread_num () can change at any task
3301 scheduling point. */
2b4cf991
JJ
3302
3303static void
726a989a 3304optimize_omp_library_calls (gimple entry_stmt)
2b4cf991
JJ
3305{
3306 basic_block bb;
726a989a 3307 gimple_stmt_iterator gsi;
e79983f4
MM
3308 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3309 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3310 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3311 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
726a989a
RB
3312 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3313 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
a68ab351 3314 OMP_CLAUSE_UNTIED) != NULL);
2b4cf991
JJ
3315
3316 FOR_EACH_BB (bb)
726a989a 3317 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2b4cf991 3318 {
726a989a 3319 gimple call = gsi_stmt (gsi);
2b4cf991
JJ
3320 tree decl;
3321
726a989a
RB
3322 if (is_gimple_call (call)
3323 && (decl = gimple_call_fndecl (call))
2b4cf991
JJ
3324 && DECL_EXTERNAL (decl)
3325 && TREE_PUBLIC (decl)
3326 && DECL_INITIAL (decl) == NULL)
3327 {
3328 tree built_in;
3329
3330 if (DECL_NAME (decl) == thr_num_id)
a68ab351
JJ
3331 {
3332 /* In #pragma omp task untied omp_get_thread_num () can change
3333 during the execution of the task region. */
3334 if (untied_task)
3335 continue;
e79983f4 3336 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
a68ab351 3337 }
2b4cf991 3338 else if (DECL_NAME (decl) == num_thr_id)
e79983f4 3339 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
2b4cf991
JJ
3340 else
3341 continue;
3342
3343 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
726a989a 3344 || gimple_call_num_args (call) != 0)
2b4cf991
JJ
3345 continue;
3346
3347 if (flag_exceptions && !TREE_NOTHROW (decl))
3348 continue;
3349
3350 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
9600efe1
MM
3351 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3352 TREE_TYPE (TREE_TYPE (built_in))))
2b4cf991
JJ
3353 continue;
3354
7c9577be 3355 gimple_call_set_fndecl (call, built_in);
2b4cf991
JJ
3356 }
3357 }
3358}
3359
a68ab351 3360/* Expand the OpenMP parallel or task directive starting at REGION. */
953ff289
DN
3361
3362static void
a68ab351 3363expand_omp_taskreg (struct omp_region *region)
953ff289 3364{
50674e96 3365 basic_block entry_bb, exit_bb, new_bb;
db2960f4 3366 struct function *child_cfun;
3bb06db4 3367 tree child_fn, block, t;
133f9369 3368 tree save_current;
726a989a
RB
3369 gimple_stmt_iterator gsi;
3370 gimple entry_stmt, stmt;
50674e96 3371 edge e;
3bb06db4 3372 VEC(tree,gc) *ws_args;
50674e96 3373
777f7f9a 3374 entry_stmt = last_stmt (region->entry);
726a989a 3375 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
50674e96 3376 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
135a171d
JJ
3377 /* If this function has been already instrumented, make sure
3378 the child function isn't instrumented again. */
3379 child_cfun->after_tree_profile = cfun->after_tree_profile;
50674e96 3380
777f7f9a
RH
3381 entry_bb = region->entry;
3382 exit_bb = region->exit;
50674e96 3383
50674e96 3384 if (is_combined_parallel (region))
777f7f9a 3385 ws_args = region->ws_args;
50674e96 3386 else
3bb06db4 3387 ws_args = NULL;
953ff289 3388
777f7f9a 3389 if (child_cfun->cfg)
953ff289 3390 {
50674e96
DN
3391 /* Due to inlining, it may happen that we have already outlined
3392 the region, in which case all we need to do is make the
3393 sub-graph unreachable and emit the parallel call. */
3394 edge entry_succ_e, exit_succ_e;
726a989a 3395 gimple_stmt_iterator gsi;
50674e96
DN
3396
3397 entry_succ_e = single_succ_edge (entry_bb);
50674e96 3398
726a989a
RB
3399 gsi = gsi_last_bb (entry_bb);
3400 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3401 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3402 gsi_remove (&gsi, true);
50674e96
DN
3403
3404 new_bb = entry_bb;
d3c673c7
JJ
3405 if (exit_bb)
3406 {
3407 exit_succ_e = single_succ_edge (exit_bb);
3408 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3409 }
917948d3 3410 remove_edge_and_dominated_blocks (entry_succ_e);
953ff289 3411 }
50674e96
DN
3412 else
3413 {
2fed2012 3414 unsigned srcidx, dstidx, num;
c021f10b 3415
50674e96 3416 /* If the parallel region needs data sent from the parent
b570947c
JJ
3417 function, then the very first statement (except possible
3418 tree profile counter updates) of the parallel body
50674e96
DN
3419 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3420 &.OMP_DATA_O is passed as an argument to the child function,
3421 we need to replace it with the argument as seen by the child
3422 function.
3423
3424 In most cases, this will end up being the identity assignment
3425 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3426 a function call that has been inlined, the original PARM_DECL
3427 .OMP_DATA_I may have been converted into a different local
3428 variable. In which case, we need to keep the assignment. */
726a989a 3429 if (gimple_omp_taskreg_data_arg (entry_stmt))
50674e96
DN
3430 {
3431 basic_block entry_succ_bb = single_succ (entry_bb);
726a989a
RB
3432 gimple_stmt_iterator gsi;
3433 tree arg, narg;
3434 gimple parcopy_stmt = NULL;
953ff289 3435
726a989a 3436 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
b570947c 3437 {
726a989a 3438 gimple stmt;
b570947c 3439
726a989a
RB
3440 gcc_assert (!gsi_end_p (gsi));
3441 stmt = gsi_stmt (gsi);
3442 if (gimple_code (stmt) != GIMPLE_ASSIGN)
018b899b
JJ
3443 continue;
3444
726a989a 3445 if (gimple_num_ops (stmt) == 2)
b570947c 3446 {
726a989a
RB
3447 tree arg = gimple_assign_rhs1 (stmt);
3448
3449 /* We're ignore the subcode because we're
3450 effectively doing a STRIP_NOPS. */
3451
3452 if (TREE_CODE (arg) == ADDR_EXPR
3453 && TREE_OPERAND (arg, 0)
3454 == gimple_omp_taskreg_data_arg (entry_stmt))
3455 {
3456 parcopy_stmt = stmt;
3457 break;
3458 }
b570947c
JJ
3459 }
3460 }
917948d3 3461
726a989a 3462 gcc_assert (parcopy_stmt != NULL);
917948d3
ZD
3463 arg = DECL_ARGUMENTS (child_fn);
3464
3465 if (!gimple_in_ssa_p (cfun))
3466 {
726a989a
RB
3467 if (gimple_assign_lhs (parcopy_stmt) == arg)
3468 gsi_remove (&gsi, true);
917948d3 3469 else
726a989a
RB
3470 {
3471 /* ?? Is setting the subcode really necessary ?? */
3472 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3473 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3474 }
917948d3
ZD
3475 }
3476 else
3477 {
3478 /* If we are in ssa form, we must load the value from the default
3479 definition of the argument. That should not be defined now,
3480 since the argument is not used uninitialized. */
3481 gcc_assert (gimple_default_def (cfun, arg) == NULL);
726a989a 3482 narg = make_ssa_name (arg, gimple_build_nop ());
917948d3 3483 set_default_def (arg, narg);
726a989a
RB
3484 /* ?? Is setting the subcode really necessary ?? */
3485 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3486 gimple_assign_set_rhs1 (parcopy_stmt, narg);
917948d3
ZD
3487 update_stmt (parcopy_stmt);
3488 }
50674e96
DN
3489 }
3490
3491 /* Declare local variables needed in CHILD_CFUN. */
3492 block = DECL_INITIAL (child_fn);
c021f10b 3493 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
4f0ae266
JJ
3494 /* The gimplifier could record temporaries in parallel/task block
3495 rather than in containing function's local_decls chain,
3496 which would mean cgraph missed finalizing them. Do it now. */
910ad8de 3497 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
4f0ae266
JJ
3498 if (TREE_CODE (t) == VAR_DECL
3499 && TREE_STATIC (t)
3500 && !DECL_EXTERNAL (t))
3501 varpool_finalize_decl (t);
726a989a
RB
3502 DECL_SAVED_TREE (child_fn) = NULL;
3503 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
b357f682 3504 TREE_USED (block) = 1;
50674e96 3505
917948d3 3506 /* Reset DECL_CONTEXT on function arguments. */
910ad8de 3507 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
50674e96
DN
3508 DECL_CONTEXT (t) = child_fn;
3509
726a989a
RB
3510 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3511 so that it can be moved to the child function. */
3512 gsi = gsi_last_bb (entry_bb);
3513 stmt = gsi_stmt (gsi);
3514 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3515 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3516 gsi_remove (&gsi, true);
3517 e = split_block (entry_bb, stmt);
50674e96
DN
3518 entry_bb = e->dest;
3519 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3520
726a989a 3521 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
2aee3e57
JJ
3522 if (exit_bb)
3523 {
726a989a
RB
3524 gsi = gsi_last_bb (exit_bb);
3525 gcc_assert (!gsi_end_p (gsi)
3526 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3527 stmt = gimple_build_return (NULL);
3528 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3529 gsi_remove (&gsi, true);
2aee3e57 3530 }
917948d3
ZD
3531
3532 /* Move the parallel region into CHILD_CFUN. */
b8698a0f 3533
917948d3
ZD
3534 if (gimple_in_ssa_p (cfun))
3535 {
3536 push_cfun (child_cfun);
5db9ba0c 3537 init_tree_ssa (child_cfun);
917948d3
ZD
3538 init_ssa_operands ();
3539 cfun->gimple_df->in_ssa_p = true;
3540 pop_cfun ();
b357f682 3541 block = NULL_TREE;
917948d3 3542 }
b357f682 3543 else
726a989a 3544 block = gimple_block (entry_stmt);
b357f682
JJ
3545
3546 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
917948d3
ZD
3547 if (exit_bb)
3548 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3549
b357f682 3550 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
2fed2012
JJ
3551 num = VEC_length (tree, child_cfun->local_decls);
3552 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3553 {
3554 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3555 if (DECL_CONTEXT (t) == cfun->decl)
3556 continue;
3557 if (srcidx != dstidx)
3558 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3559 dstidx++;
3560 }
3561 if (dstidx != num)
3562 VEC_truncate (tree, child_cfun->local_decls, dstidx);
b357f682 3563
917948d3
ZD
3564 /* Inform the callgraph about the new function. */
3565 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3566 = cfun->curr_properties;
3567 cgraph_add_new_function (child_fn, true);
3568
3569 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3570 fixed in a following pass. */
3571 push_cfun (child_cfun);
133f9369
MJ
3572 save_current = current_function_decl;
3573 current_function_decl = child_fn;
2b4cf991 3574 if (optimize)
a68ab351 3575 optimize_omp_library_calls (entry_stmt);
917948d3 3576 rebuild_cgraph_edges ();
99819c63
JJ
3577
3578 /* Some EH regions might become dead, see PR34608. If
3579 pass_cleanup_cfg isn't the first pass to happen with the
3580 new child, these dead EH edges might cause problems.
3581 Clean them up now. */
3582 if (flag_exceptions)
3583 {
3584 basic_block bb;
99819c63
JJ
3585 bool changed = false;
3586
99819c63 3587 FOR_EACH_BB (bb)
726a989a 3588 changed |= gimple_purge_dead_eh_edges (bb);
99819c63
JJ
3589 if (changed)
3590 cleanup_tree_cfg ();
99819c63 3591 }
5006671f
RG
3592 if (gimple_in_ssa_p (cfun))
3593 update_ssa (TODO_update_ssa);
133f9369 3594 current_function_decl = save_current;
917948d3 3595 pop_cfun ();
50674e96 3596 }
b8698a0f 3597
50674e96 3598 /* Emit a library call to launch the children threads. */
726a989a 3599 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
a68ab351
JJ
3600 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3601 else
3602 expand_task_call (new_bb, entry_stmt);
5f40b3cb 3603 update_ssa (TODO_update_ssa_only_virtuals);
953ff289
DN
3604}
3605
50674e96
DN
3606
3607/* A subroutine of expand_omp_for. Generate code for a parallel
953ff289
DN
3608 loop with any schedule. Given parameters:
3609
3610 for (V = N1; V cond N2; V += STEP) BODY;
3611
3612 where COND is "<" or ">", we generate pseudocode
3613
3614 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
50674e96 3615 if (more) goto L0; else goto L3;
953ff289
DN
3616 L0:
3617 V = istart0;
3618 iend = iend0;
3619 L1:
3620 BODY;
3621 V += STEP;
50674e96 3622 if (V cond iend) goto L1; else goto L2;
953ff289 3623 L2:
50674e96
DN
3624 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3625 L3:
953ff289 3626
50674e96 3627 If this is a combined omp parallel loop, instead of the call to
a68ab351
JJ
3628 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3629
3630 For collapsed loops, given parameters:
3631 collapse(3)
3632 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3633 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3634 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3635 BODY;
3636
3637 we generate pseudocode
3638
3639 if (cond3 is <)
3640 adj = STEP3 - 1;
3641 else
3642 adj = STEP3 + 1;
3643 count3 = (adj + N32 - N31) / STEP3;
3644 if (cond2 is <)
3645 adj = STEP2 - 1;
3646 else
3647 adj = STEP2 + 1;
3648 count2 = (adj + N22 - N21) / STEP2;
3649 if (cond1 is <)
3650 adj = STEP1 - 1;
3651 else
3652 adj = STEP1 + 1;
3653 count1 = (adj + N12 - N11) / STEP1;
3654 count = count1 * count2 * count3;
3655 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3656 if (more) goto L0; else goto L3;
3657 L0:
3658 V = istart0;
3659 T = V;
3660 V3 = N31 + (T % count3) * STEP3;
3661 T = T / count3;
3662 V2 = N21 + (T % count2) * STEP2;
3663 T = T / count2;
3664 V1 = N11 + T * STEP1;
3665 iend = iend0;
3666 L1:
3667 BODY;
3668 V += 1;
3669 if (V < iend) goto L10; else goto L2;
3670 L10:
3671 V3 += STEP3;
3672 if (V3 cond3 N32) goto L1; else goto L11;
3673 L11:
3674 V3 = N31;
3675 V2 += STEP2;
3676 if (V2 cond2 N22) goto L1; else goto L12;
3677 L12:
3678 V2 = N21;
3679 V1 += STEP1;
3680 goto L1;
3681 L2:
3682 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3683 L3:
3684
3685 */
953ff289 3686
777f7f9a 3687static void
50674e96
DN
3688expand_omp_for_generic (struct omp_region *region,
3689 struct omp_for_data *fd,
953ff289
DN
3690 enum built_in_function start_fn,
3691 enum built_in_function next_fn)
3692{
726a989a 3693 tree type, istart0, iend0, iend;
a68ab351
JJ
3694 tree t, vmain, vback, bias = NULL_TREE;
3695 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
d3c673c7 3696 basic_block l2_bb = NULL, l3_bb = NULL;
726a989a
RB
3697 gimple_stmt_iterator gsi;
3698 gimple stmt;
50674e96 3699 bool in_combined_parallel = is_combined_parallel (region);
e5c95afe 3700 bool broken_loop = region->cont == NULL;
917948d3 3701 edge e, ne;
a68ab351
JJ
3702 tree *counts = NULL;
3703 int i;
e5c95afe
ZD
3704
3705 gcc_assert (!broken_loop || !in_combined_parallel);
a68ab351
JJ
3706 gcc_assert (fd->iter_type == long_integer_type_node
3707 || !in_combined_parallel);
953ff289 3708
a68ab351
JJ
3709 type = TREE_TYPE (fd->loop.v);
3710 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3711 iend0 = create_tmp_var (fd->iter_type, ".iend0");
5b4fc8fb
JJ
3712 TREE_ADDRESSABLE (istart0) = 1;
3713 TREE_ADDRESSABLE (iend0) = 1;
917948d3
ZD
3714 if (gimple_in_ssa_p (cfun))
3715 {
3716 add_referenced_var (istart0);
3717 add_referenced_var (iend0);
3718 }
953ff289 3719
a68ab351
JJ
3720 /* See if we need to bias by LLONG_MIN. */
3721 if (fd->iter_type == long_long_unsigned_type_node
3722 && TREE_CODE (type) == INTEGER_TYPE
3723 && !TYPE_UNSIGNED (type))
3724 {
3725 tree n1, n2;
3726
3727 if (fd->loop.cond_code == LT_EXPR)
3728 {
3729 n1 = fd->loop.n1;
3730 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3731 }
3732 else
3733 {
3734 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3735 n2 = fd->loop.n1;
3736 }
3737 if (TREE_CODE (n1) != INTEGER_CST
3738 || TREE_CODE (n2) != INTEGER_CST
3739 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3740 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3741 }
3742
777f7f9a 3743 entry_bb = region->entry;
d3c673c7 3744 cont_bb = region->cont;
a68ab351 3745 collapse_bb = NULL;
e5c95afe
ZD
3746 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3747 gcc_assert (broken_loop
3748 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3749 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3750 l1_bb = single_succ (l0_bb);
3751 if (!broken_loop)
d3c673c7
JJ
3752 {
3753 l2_bb = create_empty_bb (cont_bb);
e5c95afe
ZD
3754 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3755 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
d3c673c7 3756 }
e5c95afe
ZD
3757 else
3758 l2_bb = NULL;
3759 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3760 exit_bb = region->exit;
50674e96 3761
726a989a 3762 gsi = gsi_last_bb (entry_bb);
a68ab351 3763
726a989a 3764 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
a68ab351
JJ
3765 if (fd->collapse > 1)
3766 {
3767 /* collapsed loops need work for expansion in SSA form. */
3768 gcc_assert (!gimple_in_ssa_p (cfun));
3769 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3770 for (i = 0; i < fd->collapse; i++)
3771 {
3772 tree itype = TREE_TYPE (fd->loops[i].v);
3773
3774 if (POINTER_TYPE_P (itype))
3775 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3776 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3777 ? -1 : 1));
3778 t = fold_build2 (PLUS_EXPR, itype,
3779 fold_convert (itype, fd->loops[i].step), t);
3780 t = fold_build2 (PLUS_EXPR, itype, t,
3781 fold_convert (itype, fd->loops[i].n2));
3782 t = fold_build2 (MINUS_EXPR, itype, t,
3783 fold_convert (itype, fd->loops[i].n1));
3784 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3785 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3786 fold_build1 (NEGATE_EXPR, itype, t),
3787 fold_build1 (NEGATE_EXPR, itype,
3788 fold_convert (itype,
3789 fd->loops[i].step)));
3790 else
3791 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3792 fold_convert (itype, fd->loops[i].step));
3793 t = fold_convert (type, t);
3794 if (TREE_CODE (t) == INTEGER_CST)
3795 counts[i] = t;
3796 else
3797 {
3798 counts[i] = create_tmp_var (type, ".count");
726a989a
RB
3799 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3800 true, GSI_SAME_STMT);
3801 stmt = gimple_build_assign (counts[i], t);
3802 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
a68ab351
JJ
3803 }
3804 if (SSA_VAR_P (fd->loop.n2))
3805 {
3806 if (i == 0)
726a989a 3807 t = counts[0];
a68ab351
JJ
3808 else
3809 {
3810 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
726a989a
RB
3811 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3812 true, GSI_SAME_STMT);
a68ab351 3813 }
726a989a
RB
3814 stmt = gimple_build_assign (fd->loop.n2, t);
3815 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
a68ab351
JJ
3816 }
3817 }
3818 }
917948d3
ZD
3819 if (in_combined_parallel)
3820 {
3821 /* In a combined parallel loop, emit a call to
3822 GOMP_loop_foo_next. */
e79983f4 3823 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
917948d3
ZD
3824 build_fold_addr_expr (istart0),
3825 build_fold_addr_expr (iend0));
3826 }
3827 else
953ff289 3828 {
5039610b 3829 tree t0, t1, t2, t3, t4;
50674e96
DN
3830 /* If this is not a combined parallel loop, emit a call to
3831 GOMP_loop_foo_start in ENTRY_BB. */
5039610b
SL
3832 t4 = build_fold_addr_expr (iend0);
3833 t3 = build_fold_addr_expr (istart0);
a68ab351 3834 t2 = fold_convert (fd->iter_type, fd->loop.step);
c6ff4493
SE
3835 if (POINTER_TYPE_P (type)
3836 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3837 {
3838 /* Avoid casting pointers to integer of a different size. */
3839 tree itype
3840 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3841 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3842 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3843 }
3844 else
3845 {
3846 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3847 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3848 }
a68ab351 3849 if (bias)
953ff289 3850 {
a68ab351
JJ
3851 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3852 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3853 }
3854 if (fd->iter_type == long_integer_type_node)
3855 {
3856 if (fd->chunk_size)
3857 {
3858 t = fold_convert (fd->iter_type, fd->chunk_size);
e79983f4
MM
3859 t = build_call_expr (builtin_decl_explicit (start_fn),
3860 6, t0, t1, t2, t, t3, t4);
a68ab351
JJ
3861 }
3862 else
e79983f4
MM
3863 t = build_call_expr (builtin_decl_explicit (start_fn),
3864 5, t0, t1, t2, t3, t4);
953ff289 3865 }
5039610b 3866 else
a68ab351
JJ
3867 {
3868 tree t5;
3869 tree c_bool_type;
e79983f4 3870 tree bfn_decl;
a68ab351
JJ
3871
3872 /* The GOMP_loop_ull_*start functions have additional boolean
3873 argument, true for < loops and false for > loops.
3874 In Fortran, the C bool type can be different from
3875 boolean_type_node. */
e79983f4
MM
3876 bfn_decl = builtin_decl_explicit (start_fn);
3877 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
a68ab351
JJ
3878 t5 = build_int_cst (c_bool_type,
3879 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3880 if (fd->chunk_size)
3881 {
e79983f4 3882 tree bfn_decl = builtin_decl_explicit (start_fn);
a68ab351 3883 t = fold_convert (fd->iter_type, fd->chunk_size);
e79983f4 3884 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
a68ab351
JJ
3885 }
3886 else
e79983f4
MM
3887 t = build_call_expr (builtin_decl_explicit (start_fn),
3888 6, t5, t0, t1, t2, t3, t4);
a68ab351 3889 }
953ff289 3890 }
a68ab351
JJ
3891 if (TREE_TYPE (t) != boolean_type_node)
3892 t = fold_build2 (NE_EXPR, boolean_type_node,
3893 t, build_int_cst (TREE_TYPE (t), 0));
726a989a
RB
3894 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3895 true, GSI_SAME_STMT);
3896 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
917948d3 3897
726a989a
RB
3898 /* Remove the GIMPLE_OMP_FOR statement. */
3899 gsi_remove (&gsi, true);
953ff289 3900
50674e96 3901 /* Iteration setup for sequential loop goes in L0_BB. */
726a989a 3902 gsi = gsi_start_bb (l0_bb);
550918ca 3903 t = istart0;
a68ab351 3904 if (bias)
550918ca
RG
3905 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3906 if (POINTER_TYPE_P (type))
3907 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3908 0), t);
3909 t = fold_convert (type, t);
726a989a
RB
3910 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3911 false, GSI_CONTINUE_LINKING);
3912 stmt = gimple_build_assign (fd->loop.v, t);
3913 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
953ff289 3914
550918ca 3915 t = iend0;
a68ab351 3916 if (bias)
550918ca
RG
3917 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3918 if (POINTER_TYPE_P (type))
3919 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3920 0), t);
3921 t = fold_convert (type, t);
726a989a
RB
3922 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3923 false, GSI_CONTINUE_LINKING);
a68ab351
JJ
3924 if (fd->collapse > 1)
3925 {
3926 tree tem = create_tmp_var (type, ".tem");
3927
726a989a
RB
3928 stmt = gimple_build_assign (tem, fd->loop.v);
3929 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
3930 for (i = fd->collapse - 1; i >= 0; i--)
3931 {
3932 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3933 itype = vtype;
3934 if (POINTER_TYPE_P (vtype))
3935 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3936 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3937 t = fold_convert (itype, t);
ada39f0b
RG
3938 t = fold_build2 (MULT_EXPR, itype, t,
3939 fold_convert (itype, fd->loops[i].step));
a68ab351 3940 if (POINTER_TYPE_P (vtype))
5d49b6a7 3941 t = fold_build_pointer_plus (fd->loops[i].n1, t);
a68ab351
JJ
3942 else
3943 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
726a989a
RB
3944 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3945 false, GSI_CONTINUE_LINKING);
3946 stmt = gimple_build_assign (fd->loops[i].v, t);
3947 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
3948 if (i != 0)
3949 {
3950 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
726a989a
RB
3951 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3952 false, GSI_CONTINUE_LINKING);
3953 stmt = gimple_build_assign (tem, t);
3954 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
3955 }
3956 }
3957 }
50674e96 3958
e5c95afe 3959 if (!broken_loop)
d3c673c7 3960 {
e5c95afe
ZD
3961 /* Code to control the increment and predicate for the sequential
3962 loop goes in the CONT_BB. */
726a989a
RB
3963 gsi = gsi_last_bb (cont_bb);
3964 stmt = gsi_stmt (gsi);
3965 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3966 vmain = gimple_omp_continue_control_use (stmt);
3967 vback = gimple_omp_continue_control_def (stmt);
917948d3 3968
a68ab351 3969 if (POINTER_TYPE_P (type))
5d49b6a7 3970 t = fold_build_pointer_plus (vmain, fd->loop.step);
a68ab351
JJ
3971 else
3972 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
726a989a
RB
3973 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3974 true, GSI_SAME_STMT);
3975 stmt = gimple_build_assign (vback, t);
3976 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3977
a68ab351 3978 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
726a989a
RB
3979 stmt = gimple_build_cond_empty (t);
3980 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
50674e96 3981
726a989a
RB
3982 /* Remove GIMPLE_OMP_CONTINUE. */
3983 gsi_remove (&gsi, true);
50674e96 3984
a68ab351
JJ
3985 if (fd->collapse > 1)
3986 {
3987 basic_block last_bb, bb;
3988
3989 last_bb = cont_bb;
3990 for (i = fd->collapse - 1; i >= 0; i--)
3991 {
3992 tree vtype = TREE_TYPE (fd->loops[i].v);
3993
3994 bb = create_empty_bb (last_bb);
726a989a 3995 gsi = gsi_start_bb (bb);
a68ab351
JJ
3996
3997 if (i < fd->collapse - 1)
3998 {
3999 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
4000 e->probability = REG_BR_PROB_BASE / 8;
4001
726a989a
RB
4002 t = fd->loops[i + 1].n1;
4003 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4004 false, GSI_CONTINUE_LINKING);
4005 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4006 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
4007 }
4008 else
4009 collapse_bb = bb;
4010
4011 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4012
4013 if (POINTER_TYPE_P (vtype))
5d49b6a7 4014 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
a68ab351
JJ
4015 else
4016 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
4017 fd->loops[i].step);
726a989a
RB
4018 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4019 false, GSI_CONTINUE_LINKING);
4020 stmt = gimple_build_assign (fd->loops[i].v, t);
4021 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
4022
4023 if (i > 0)
4024 {
726a989a
RB
4025 t = fd->loops[i].n2;
4026 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4027 false, GSI_CONTINUE_LINKING);
a68ab351 4028 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
726a989a
RB
4029 fd->loops[i].v, t);
4030 stmt = gimple_build_cond_empty (t);
4031 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
4032 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4033 e->probability = REG_BR_PROB_BASE * 7 / 8;
4034 }
4035 else
4036 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4037 last_bb = bb;
4038 }
4039 }
4040
e5c95afe 4041 /* Emit code to get the next parallel iteration in L2_BB. */
726a989a 4042 gsi = gsi_start_bb (l2_bb);
50674e96 4043
e79983f4 4044 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
e5c95afe
ZD
4045 build_fold_addr_expr (istart0),
4046 build_fold_addr_expr (iend0));
726a989a
RB
4047 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4048 false, GSI_CONTINUE_LINKING);
a68ab351
JJ
4049 if (TREE_TYPE (t) != boolean_type_node)
4050 t = fold_build2 (NE_EXPR, boolean_type_node,
4051 t, build_int_cst (TREE_TYPE (t), 0));
726a989a
RB
4052 stmt = gimple_build_cond_empty (t);
4053 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
e5c95afe 4054 }
953ff289 4055
777f7f9a 4056 /* Add the loop cleanup function. */
726a989a
RB
4057 gsi = gsi_last_bb (exit_bb);
4058 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
e79983f4 4059 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
777f7f9a 4060 else
e79983f4 4061 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
726a989a
RB
4062 stmt = gimple_build_call (t, 0);
4063 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4064 gsi_remove (&gsi, true);
50674e96
DN
4065
4066 /* Connect the new blocks. */
917948d3
ZD
4067 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4068 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
953ff289 4069
e5c95afe
ZD
4070 if (!broken_loop)
4071 {
726a989a
RB
4072 gimple_seq phis;
4073
917948d3
ZD
4074 e = find_edge (cont_bb, l3_bb);
4075 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4076
726a989a
RB
4077 phis = phi_nodes (l3_bb);
4078 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4079 {
4080 gimple phi = gsi_stmt (gsi);
4081 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4082 PHI_ARG_DEF_FROM_EDGE (phi, e));
4083 }
917948d3
ZD
4084 remove_edge (e);
4085
e5c95afe 4086 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
a68ab351
JJ
4087 if (fd->collapse > 1)
4088 {
4089 e = find_edge (cont_bb, l1_bb);
4090 remove_edge (e);
4091 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4092 }
4093 else
4094 {
4095 e = find_edge (cont_bb, l1_bb);
4096 e->flags = EDGE_TRUE_VALUE;
4097 }
4098 e->probability = REG_BR_PROB_BASE * 7 / 8;
4099 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
e5c95afe 4100 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
917948d3
ZD
4101
4102 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4103 recompute_dominator (CDI_DOMINATORS, l2_bb));
4104 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4105 recompute_dominator (CDI_DOMINATORS, l3_bb));
4106 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4107 recompute_dominator (CDI_DOMINATORS, l0_bb));
4108 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4109 recompute_dominator (CDI_DOMINATORS, l1_bb));
e5c95afe 4110 }
953ff289
DN
4111}
4112
4113
50674e96
DN
4114/* A subroutine of expand_omp_for. Generate code for a parallel
4115 loop with static schedule and no specified chunk size. Given
4116 parameters:
953ff289
DN
4117
4118 for (V = N1; V cond N2; V += STEP) BODY;
4119
4120 where COND is "<" or ">", we generate pseudocode
4121
4122 if (cond is <)
4123 adj = STEP - 1;
4124 else
4125 adj = STEP + 1;
a68ab351
JJ
4126 if ((__typeof (V)) -1 > 0 && cond is >)
4127 n = -(adj + N2 - N1) / -STEP;
4128 else
4129 n = (adj + N2 - N1) / STEP;
953ff289 4130 q = n / nthreads;
fb79f500
JJ
4131 tt = n % nthreads;
4132 if (threadid < tt) goto L3; else goto L4;
4133 L3:
4134 tt = 0;
4135 q = q + 1;
4136 L4:
4137 s0 = q * threadid + tt;
4138 e0 = s0 + q;
917948d3 4139 V = s0 * STEP + N1;
953ff289
DN
4140 if (s0 >= e0) goto L2; else goto L0;
4141 L0:
953ff289
DN
4142 e = e0 * STEP + N1;
4143 L1:
4144 BODY;
4145 V += STEP;
4146 if (V cond e) goto L1;
953ff289
DN
4147 L2:
4148*/
4149
777f7f9a 4150static void
50674e96
DN
4151expand_omp_for_static_nochunk (struct omp_region *region,
4152 struct omp_for_data *fd)
953ff289 4153{
fb79f500 4154 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
a68ab351 4155 tree type, itype, vmain, vback;
fb79f500
JJ
4156 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4157 basic_block body_bb, cont_bb;
777f7f9a 4158 basic_block fin_bb;
726a989a
RB
4159 gimple_stmt_iterator gsi;
4160 gimple stmt;
fb79f500 4161 edge ep;
953ff289 4162
a68ab351
JJ
4163 itype = type = TREE_TYPE (fd->loop.v);
4164 if (POINTER_TYPE_P (type))
4165 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
953ff289 4166
777f7f9a 4167 entry_bb = region->entry;
777f7f9a 4168 cont_bb = region->cont;
e5c95afe
ZD
4169 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4170 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4171 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4172 body_bb = single_succ (seq_start_bb);
4173 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4174 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4175 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
777f7f9a
RH
4176 exit_bb = region->exit;
4177
50674e96 4178 /* Iteration space partitioning goes in ENTRY_BB. */
726a989a
RB
4179 gsi = gsi_last_bb (entry_bb);
4180 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
777f7f9a 4181
e79983f4 4182 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
a68ab351 4183 t = fold_convert (itype, t);
726a989a
RB
4184 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4185 true, GSI_SAME_STMT);
b8698a0f 4186
e79983f4 4187 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
a68ab351 4188 t = fold_convert (itype, t);
726a989a
RB
4189 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4190 true, GSI_SAME_STMT);
953ff289 4191
a68ab351 4192 fd->loop.n1
726a989a
RB
4193 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4194 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351 4195 fd->loop.n2
726a989a
RB
4196 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4197 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351 4198 fd->loop.step
726a989a
RB
4199 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4200 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351
JJ
4201
4202 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4203 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4204 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4205 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4206 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4207 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4208 fold_build1 (NEGATE_EXPR, itype, t),
4209 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4210 else
4211 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4212 t = fold_convert (itype, t);
726a989a 4213 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
953ff289 4214
fb79f500 4215 q = create_tmp_var (itype, "q");
a68ab351 4216 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
fb79f500
JJ
4217 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4218 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4219
4220 tt = create_tmp_var (itype, "tt");
4221 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4222 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4223 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
953ff289 4224
fb79f500
JJ
4225 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4226 stmt = gimple_build_cond_empty (t);
4227 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4228
4229 second_bb = split_block (entry_bb, stmt)->dest;
4230 gsi = gsi_last_bb (second_bb);
4231 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4232
4233 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4234 GSI_SAME_STMT);
4235 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4236 build_int_cst (itype, 1));
4237 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4238
4239 third_bb = split_block (second_bb, stmt)->dest;
4240 gsi = gsi_last_bb (third_bb);
4241 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
953ff289 4242
a68ab351 4243 t = build2 (MULT_EXPR, itype, q, threadid);
fb79f500 4244 t = build2 (PLUS_EXPR, itype, t, tt);
726a989a 4245 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
953ff289 4246
a68ab351 4247 t = fold_build2 (PLUS_EXPR, itype, s0, q);
726a989a 4248 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
917948d3 4249
953ff289 4250 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
726a989a 4251 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
50674e96 4252
726a989a
RB
4253 /* Remove the GIMPLE_OMP_FOR statement. */
4254 gsi_remove (&gsi, true);
50674e96
DN
4255
4256 /* Setup code for sequential iteration goes in SEQ_START_BB. */
726a989a 4257 gsi = gsi_start_bb (seq_start_bb);
953ff289 4258
a68ab351
JJ
4259 t = fold_convert (itype, s0);
4260 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4261 if (POINTER_TYPE_P (type))
5d49b6a7 4262 t = fold_build_pointer_plus (fd->loop.n1, t);
a68ab351
JJ
4263 else
4264 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
726a989a
RB
4265 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4266 false, GSI_CONTINUE_LINKING);
4267 stmt = gimple_build_assign (fd->loop.v, t);
4268 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
b8698a0f 4269
a68ab351
JJ
4270 t = fold_convert (itype, e0);
4271 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4272 if (POINTER_TYPE_P (type))
5d49b6a7 4273 t = fold_build_pointer_plus (fd->loop.n1, t);
a68ab351
JJ
4274 else
4275 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
726a989a
RB
4276 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4277 false, GSI_CONTINUE_LINKING);
953ff289 4278
726a989a
RB
4279 /* The code controlling the sequential loop replaces the
4280 GIMPLE_OMP_CONTINUE. */
4281 gsi = gsi_last_bb (cont_bb);
4282 stmt = gsi_stmt (gsi);
4283 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4284 vmain = gimple_omp_continue_control_use (stmt);
4285 vback = gimple_omp_continue_control_def (stmt);
917948d3 4286
a68ab351 4287 if (POINTER_TYPE_P (type))
5d49b6a7 4288 t = fold_build_pointer_plus (vmain, fd->loop.step);
a68ab351
JJ
4289 else
4290 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
726a989a
RB
4291 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4292 true, GSI_SAME_STMT);
4293 stmt = gimple_build_assign (vback, t);
4294 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
917948d3 4295
a68ab351 4296 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
726a989a 4297 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
953ff289 4298
726a989a
RB
4299 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4300 gsi_remove (&gsi, true);
50674e96 4301
726a989a
RB
4302 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4303 gsi = gsi_last_bb (exit_bb);
4304 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4305 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4306 false, GSI_SAME_STMT);
4307 gsi_remove (&gsi, true);
50674e96
DN
4308
4309 /* Connect all the blocks. */
fb79f500
JJ
4310 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4311 ep->probability = REG_BR_PROB_BASE / 4 * 3;
4312 ep = find_edge (entry_bb, second_bb);
4313 ep->flags = EDGE_TRUE_VALUE;
4314 ep->probability = REG_BR_PROB_BASE / 4;
4315 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4316 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
917948d3 4317
e5c95afe 4318 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
777f7f9a 4319 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
b8698a0f 4320
fb79f500
JJ
4321 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4322 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4323 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
917948d3
ZD
4324 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4325 recompute_dominator (CDI_DOMINATORS, body_bb));
4326 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4327 recompute_dominator (CDI_DOMINATORS, fin_bb));
953ff289
DN
4328}
4329
50674e96
DN
4330
4331/* A subroutine of expand_omp_for. Generate code for a parallel
4332 loop with static schedule and a specified chunk size. Given
4333 parameters:
953ff289
DN
4334
4335 for (V = N1; V cond N2; V += STEP) BODY;
4336
4337 where COND is "<" or ">", we generate pseudocode
4338
4339 if (cond is <)
4340 adj = STEP - 1;
4341 else
4342 adj = STEP + 1;
a68ab351
JJ
4343 if ((__typeof (V)) -1 > 0 && cond is >)
4344 n = -(adj + N2 - N1) / -STEP;
4345 else
4346 n = (adj + N2 - N1) / STEP;
953ff289 4347 trip = 0;
917948d3
ZD
4348 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4349 here so that V is defined
4350 if the loop is not entered
953ff289
DN
4351 L0:
4352 s0 = (trip * nthreads + threadid) * CHUNK;
4353 e0 = min(s0 + CHUNK, n);
4354 if (s0 < n) goto L1; else goto L4;
4355 L1:
4356 V = s0 * STEP + N1;
4357 e = e0 * STEP + N1;
4358 L2:
4359 BODY;
4360 V += STEP;
4361 if (V cond e) goto L2; else goto L3;
4362 L3:
4363 trip += 1;
4364 goto L0;
4365 L4:
953ff289
DN
4366*/
4367
777f7f9a 4368static void
726a989a 4369expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
953ff289 4370{
726a989a 4371 tree n, s0, e0, e, t;
917948d3 4372 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
726a989a 4373 tree type, itype, v_main, v_back, v_extra;
50674e96 4374 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
777f7f9a 4375 basic_block trip_update_bb, cont_bb, fin_bb;
726a989a
RB
4376 gimple_stmt_iterator si;
4377 gimple stmt;
4378 edge se;
953ff289 4379
a68ab351
JJ
4380 itype = type = TREE_TYPE (fd->loop.v);
4381 if (POINTER_TYPE_P (type))
4382 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
953ff289 4383
777f7f9a 4384 entry_bb = region->entry;
e5c95afe
ZD
4385 se = split_block (entry_bb, last_stmt (entry_bb));
4386 entry_bb = se->src;
4387 iter_part_bb = se->dest;
777f7f9a 4388 cont_bb = region->cont;
e5c95afe
ZD
4389 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4390 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4391 == FALLTHRU_EDGE (cont_bb)->dest);
4392 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4393 body_bb = single_succ (seq_start_bb);
4394 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4395 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4396 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4397 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
777f7f9a 4398 exit_bb = region->exit;
50674e96 4399
50674e96 4400 /* Trip and adjustment setup goes in ENTRY_BB. */
726a989a
RB
4401 si = gsi_last_bb (entry_bb);
4402 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
50674e96 4403
e79983f4 4404 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
a68ab351 4405 t = fold_convert (itype, t);
726a989a
RB
4406 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4407 true, GSI_SAME_STMT);
b8698a0f 4408
e79983f4 4409 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
a68ab351 4410 t = fold_convert (itype, t);
726a989a
RB
4411 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4412 true, GSI_SAME_STMT);
917948d3 4413
a68ab351 4414 fd->loop.n1
726a989a
RB
4415 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4416 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351 4417 fd->loop.n2
726a989a
RB
4418 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4419 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351 4420 fd->loop.step
726a989a
RB
4421 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4422 true, NULL_TREE, true, GSI_SAME_STMT);
917948d3 4423 fd->chunk_size
726a989a
RB
4424 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4425 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351
JJ
4426
4427 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4428 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4429 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4430 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4431 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4432 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4433 fold_build1 (NEGATE_EXPR, itype, t),
4434 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4435 else
4436 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4437 t = fold_convert (itype, t);
726a989a
RB
4438 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4439 true, GSI_SAME_STMT);
917948d3 4440
a68ab351 4441 trip_var = create_tmp_var (itype, ".trip");
917948d3
ZD
4442 if (gimple_in_ssa_p (cfun))
4443 {
4444 add_referenced_var (trip_var);
726a989a
RB
4445 trip_init = make_ssa_name (trip_var, NULL);
4446 trip_main = make_ssa_name (trip_var, NULL);
4447 trip_back = make_ssa_name (trip_var, NULL);
917948d3 4448 }
953ff289 4449 else
917948d3
ZD
4450 {
4451 trip_init = trip_var;
4452 trip_main = trip_var;
4453 trip_back = trip_var;
4454 }
953ff289 4455
726a989a
RB
4456 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4457 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
50674e96 4458
a68ab351
JJ
4459 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4460 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4461 if (POINTER_TYPE_P (type))
5d49b6a7 4462 t = fold_build_pointer_plus (fd->loop.n1, t);
a68ab351
JJ
4463 else
4464 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
726a989a
RB
4465 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4466 true, GSI_SAME_STMT);
917948d3 4467
726a989a
RB
4468 /* Remove the GIMPLE_OMP_FOR. */
4469 gsi_remove (&si, true);
50674e96
DN
4470
4471 /* Iteration space partitioning goes in ITER_PART_BB. */
726a989a 4472 si = gsi_last_bb (iter_part_bb);
953ff289 4473
a68ab351
JJ
4474 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4475 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4476 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
726a989a
RB
4477 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4478 false, GSI_CONTINUE_LINKING);
953ff289 4479
a68ab351
JJ
4480 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4481 t = fold_build2 (MIN_EXPR, itype, t, n);
726a989a
RB
4482 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4483 false, GSI_CONTINUE_LINKING);
953ff289
DN
4484
4485 t = build2 (LT_EXPR, boolean_type_node, s0, n);
726a989a 4486 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
50674e96
DN
4487
4488 /* Setup code for sequential iteration goes in SEQ_START_BB. */
726a989a 4489 si = gsi_start_bb (seq_start_bb);
953ff289 4490
a68ab351
JJ
4491 t = fold_convert (itype, s0);
4492 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4493 if (POINTER_TYPE_P (type))
5d49b6a7 4494 t = fold_build_pointer_plus (fd->loop.n1, t);
a68ab351
JJ
4495 else
4496 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
726a989a
RB
4497 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4498 false, GSI_CONTINUE_LINKING);
4499 stmt = gimple_build_assign (fd->loop.v, t);
4500 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
953ff289 4501
a68ab351
JJ
4502 t = fold_convert (itype, e0);
4503 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4504 if (POINTER_TYPE_P (type))
5d49b6a7 4505 t = fold_build_pointer_plus (fd->loop.n1, t);
a68ab351
JJ
4506 else
4507 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
726a989a
RB
4508 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4509 false, GSI_CONTINUE_LINKING);
953ff289 4510
777f7f9a 4511 /* The code controlling the sequential loop goes in CONT_BB,
726a989a
RB
4512 replacing the GIMPLE_OMP_CONTINUE. */
4513 si = gsi_last_bb (cont_bb);
4514 stmt = gsi_stmt (si);
4515 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4516 v_main = gimple_omp_continue_control_use (stmt);
4517 v_back = gimple_omp_continue_control_def (stmt);
917948d3 4518
a68ab351 4519 if (POINTER_TYPE_P (type))
5d49b6a7 4520 t = fold_build_pointer_plus (v_main, fd->loop.step);
a68ab351 4521 else
726a989a
RB
4522 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4523 stmt = gimple_build_assign (v_back, t);
4524 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
917948d3 4525
a68ab351 4526 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
726a989a 4527 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
b8698a0f 4528
726a989a
RB
4529 /* Remove GIMPLE_OMP_CONTINUE. */
4530 gsi_remove (&si, true);
50674e96
DN
4531
4532 /* Trip update code goes into TRIP_UPDATE_BB. */
726a989a 4533 si = gsi_start_bb (trip_update_bb);
953ff289 4534
a68ab351
JJ
4535 t = build_int_cst (itype, 1);
4536 t = build2 (PLUS_EXPR, itype, trip_main, t);
726a989a
RB
4537 stmt = gimple_build_assign (trip_back, t);
4538 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
953ff289 4539
726a989a
RB
4540 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4541 si = gsi_last_bb (exit_bb);
4542 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4543 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4544 false, GSI_SAME_STMT);
4545 gsi_remove (&si, true);
953ff289 4546
50674e96 4547 /* Connect the new blocks. */
e5c95afe
ZD
4548 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4549 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
917948d3 4550
e5c95afe
ZD
4551 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4552 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
917948d3 4553
e5c95afe 4554 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
917948d3
ZD
4555
4556 if (gimple_in_ssa_p (cfun))
4557 {
726a989a
RB
4558 gimple_stmt_iterator psi;
4559 gimple phi;
4560 edge re, ene;
4561 edge_var_map_vector head;
4562 edge_var_map *vm;
4563 size_t i;
4564
917948d3
ZD
4565 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4566 remove arguments of the phi nodes in fin_bb. We need to create
4567 appropriate phi nodes in iter_part_bb instead. */
4568 se = single_pred_edge (fin_bb);
4569 re = single_succ_edge (trip_update_bb);
726a989a 4570 head = redirect_edge_var_map_vector (re);
917948d3
ZD
4571 ene = single_succ_edge (entry_bb);
4572
726a989a
RB
4573 psi = gsi_start_phis (fin_bb);
4574 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4575 gsi_next (&psi), ++i)
917948d3 4576 {
726a989a 4577 gimple nphi;
f5045c96 4578 source_location locus;
726a989a
RB
4579
4580 phi = gsi_stmt (psi);
4581 t = gimple_phi_result (phi);
4582 gcc_assert (t == redirect_edge_var_map_result (vm));
917948d3
ZD
4583 nphi = create_phi_node (t, iter_part_bb);
4584 SSA_NAME_DEF_STMT (t) = nphi;
4585
4586 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
f5045c96
AM
4587 locus = gimple_phi_arg_location_from_edge (phi, se);
4588
a68ab351
JJ
4589 /* A special case -- fd->loop.v is not yet computed in
4590 iter_part_bb, we need to use v_extra instead. */
4591 if (t == fd->loop.v)
917948d3 4592 t = v_extra;
f5045c96
AM
4593 add_phi_arg (nphi, t, ene, locus);
4594 locus = redirect_edge_var_map_location (vm);
4595 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
726a989a
RB
4596 }
4597 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4598 redirect_edge_var_map_clear (re);
4599 while (1)
4600 {
4601 psi = gsi_start_phis (fin_bb);
4602 if (gsi_end_p (psi))
4603 break;
4604 remove_phi_node (&psi, false);
917948d3 4605 }
917948d3
ZD
4606
4607 /* Make phi node for trip. */
4608 phi = create_phi_node (trip_main, iter_part_bb);
4609 SSA_NAME_DEF_STMT (trip_main) = phi;
f5045c96
AM
4610 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4611 UNKNOWN_LOCATION);
4612 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4613 UNKNOWN_LOCATION);
917948d3
ZD
4614 }
4615
4616 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4617 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4618 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4619 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4620 recompute_dominator (CDI_DOMINATORS, fin_bb));
4621 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4622 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4623 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4624 recompute_dominator (CDI_DOMINATORS, body_bb));
953ff289
DN
4625}
4626
953ff289 4627
50674e96 4628/* Expand the OpenMP loop defined by REGION. */
953ff289 4629
50674e96
DN
4630static void
4631expand_omp_for (struct omp_region *region)
4632{
4633 struct omp_for_data fd;
a68ab351 4634 struct omp_for_data_loop *loops;
953ff289 4635
a68ab351
JJ
4636 loops
4637 = (struct omp_for_data_loop *)
726a989a 4638 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
a68ab351 4639 * sizeof (struct omp_for_data_loop));
a68ab351 4640 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
21a66e91 4641 region->sched_kind = fd.sched_kind;
953ff289 4642
135a171d
JJ
4643 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4644 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4645 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4646 if (region->cont)
4647 {
4648 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4649 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4650 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4651 }
4652
d3c673c7
JJ
4653 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4654 && !fd.have_ordered
a68ab351 4655 && fd.collapse == 1
e5c95afe 4656 && region->cont != NULL)
953ff289
DN
4657 {
4658 if (fd.chunk_size == NULL)
777f7f9a 4659 expand_omp_for_static_nochunk (region, &fd);
953ff289 4660 else
777f7f9a 4661 expand_omp_for_static_chunk (region, &fd);
953ff289
DN
4662 }
4663 else
4664 {
a68ab351
JJ
4665 int fn_index, start_ix, next_ix;
4666
4667 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4668 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
726a989a 4669 ? 3 : fd.sched_kind;
a68ab351 4670 fn_index += fd.have_ordered * 4;
e79983f4
MM
4671 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
4672 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
a68ab351
JJ
4673 if (fd.iter_type == long_long_unsigned_type_node)
4674 {
e79983f4
MM
4675 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4676 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
4677 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4678 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
a68ab351 4679 }
bbbbb16a
ILT
4680 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4681 (enum built_in_function) next_ix);
953ff289 4682 }
5f40b3cb
ZD
4683
4684 update_ssa (TODO_update_ssa_only_virtuals);
953ff289
DN
4685}
4686
953ff289
DN
4687
4688/* Expand code for an OpenMP sections directive. In pseudo code, we generate
4689
953ff289
DN
4690 v = GOMP_sections_start (n);
4691 L0:
4692 switch (v)
4693 {
4694 case 0:
4695 goto L2;
4696 case 1:
4697 section 1;
4698 goto L1;
4699 case 2:
4700 ...
4701 case n:
4702 ...
953ff289
DN
4703 default:
4704 abort ();
4705 }
4706 L1:
4707 v = GOMP_sections_next ();
4708 goto L0;
4709 L2:
4710 reduction;
4711
50674e96 4712 If this is a combined parallel sections, replace the call to
917948d3 4713 GOMP_sections_start with call to GOMP_sections_next. */
953ff289
DN
4714
4715static void
50674e96 4716expand_omp_sections (struct omp_region *region)
953ff289 4717{
0f900dfa 4718 tree t, u, vin = NULL, vmain, vnext, l2;
726a989a
RB
4719 VEC (tree,heap) *label_vec;
4720 unsigned len;
e5c95afe 4721 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
726a989a
RB
4722 gimple_stmt_iterator si, switch_si;
4723 gimple sections_stmt, stmt, cont;
c34938a8
JJ
4724 edge_iterator ei;
4725 edge e;
777f7f9a 4726 struct omp_region *inner;
726a989a 4727 unsigned i, casei;
e5c95afe 4728 bool exit_reachable = region->cont != NULL;
953ff289 4729
e5c95afe 4730 gcc_assert (exit_reachable == (region->exit != NULL));
777f7f9a 4731 entry_bb = region->entry;
e5c95afe 4732 l0_bb = single_succ (entry_bb);
777f7f9a 4733 l1_bb = region->cont;
e5c95afe
ZD
4734 l2_bb = region->exit;
4735 if (exit_reachable)
d3c673c7 4736 {
057dd91d 4737 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
726a989a 4738 l2 = gimple_block_label (l2_bb);
c34938a8
JJ
4739 else
4740 {
4741 /* This can happen if there are reductions. */
4742 len = EDGE_COUNT (l0_bb->succs);
4743 gcc_assert (len > 0);
4744 e = EDGE_SUCC (l0_bb, len - 1);
726a989a 4745 si = gsi_last_bb (e->dest);
3ac4c44a 4746 l2 = NULL_TREE;
726a989a
RB
4747 if (gsi_end_p (si)
4748 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4749 l2 = gimple_block_label (e->dest);
c34938a8
JJ
4750 else
4751 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4752 {
726a989a
RB
4753 si = gsi_last_bb (e->dest);
4754 if (gsi_end_p (si)
4755 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
c34938a8 4756 {
726a989a 4757 l2 = gimple_block_label (e->dest);
c34938a8
JJ
4758 break;
4759 }
4760 }
4761 }
d3c673c7 4762 default_bb = create_empty_bb (l1_bb->prev_bb);
d3c673c7
JJ
4763 }
4764 else
4765 {
e5c95afe 4766 default_bb = create_empty_bb (l0_bb);
726a989a 4767 l2 = gimple_block_label (default_bb);
d3c673c7 4768 }
50674e96
DN
4769
4770 /* We will build a switch() with enough cases for all the
726a989a 4771 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
50674e96 4772 and a default case to abort if something goes wrong. */
e5c95afe 4773 len = EDGE_COUNT (l0_bb->succs);
726a989a
RB
4774
4775 /* Use VEC_quick_push on label_vec throughout, since we know the size
4776 in advance. */
4777 label_vec = VEC_alloc (tree, heap, len);
953ff289 4778
777f7f9a 4779 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
726a989a
RB
4780 GIMPLE_OMP_SECTIONS statement. */
4781 si = gsi_last_bb (entry_bb);
4782 sections_stmt = gsi_stmt (si);
4783 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4784 vin = gimple_omp_sections_control (sections_stmt);
50674e96 4785 if (!is_combined_parallel (region))
953ff289 4786 {
50674e96
DN
4787 /* If we are not inside a combined parallel+sections region,
4788 call GOMP_sections_start. */
e5c95afe
ZD
4789 t = build_int_cst (unsigned_type_node,
4790 exit_reachable ? len - 1 : len);
e79983f4 4791 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
726a989a 4792 stmt = gimple_build_call (u, 1, t);
953ff289 4793 }
917948d3
ZD
4794 else
4795 {
4796 /* Otherwise, call GOMP_sections_next. */
e79983f4 4797 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
726a989a 4798 stmt = gimple_build_call (u, 0);
917948d3 4799 }
726a989a
RB
4800 gimple_call_set_lhs (stmt, vin);
4801 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4802 gsi_remove (&si, true);
4803
4804 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4805 L0_BB. */
4806 switch_si = gsi_last_bb (l0_bb);
4807 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
917948d3
ZD
4808 if (exit_reachable)
4809 {
4810 cont = last_stmt (l1_bb);
726a989a
RB
4811 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4812 vmain = gimple_omp_continue_control_use (cont);
4813 vnext = gimple_omp_continue_control_def (cont);
917948d3
ZD
4814 }
4815 else
4816 {
4817 vmain = vin;
4818 vnext = NULL_TREE;
4819 }
953ff289 4820
e5c95afe
ZD
4821 i = 0;
4822 if (exit_reachable)
4823 {
3d528853 4824 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
726a989a 4825 VEC_quick_push (tree, label_vec, t);
e5c95afe
ZD
4826 i++;
4827 }
d3c673c7 4828
726a989a 4829 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
e5c95afe
ZD
4830 for (inner = region->inner, casei = 1;
4831 inner;
4832 inner = inner->next, i++, casei++)
953ff289 4833 {
50674e96
DN
4834 basic_block s_entry_bb, s_exit_bb;
4835
c34938a8 4836 /* Skip optional reduction region. */
726a989a 4837 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
c34938a8
JJ
4838 {
4839 --i;
4840 --casei;
4841 continue;
4842 }
4843
777f7f9a
RH
4844 s_entry_bb = inner->entry;
4845 s_exit_bb = inner->exit;
953ff289 4846
726a989a 4847 t = gimple_block_label (s_entry_bb);
e5c95afe 4848 u = build_int_cst (unsigned_type_node, casei);
3d528853 4849 u = build_case_label (u, NULL, t);
726a989a 4850 VEC_quick_push (tree, label_vec, u);
777f7f9a 4851
726a989a
RB
4852 si = gsi_last_bb (s_entry_bb);
4853 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4854 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4855 gsi_remove (&si, true);
777f7f9a 4856 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
d3c673c7
JJ
4857
4858 if (s_exit_bb == NULL)
4859 continue;
4860
726a989a
RB
4861 si = gsi_last_bb (s_exit_bb);
4862 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4863 gsi_remove (&si, true);
d3c673c7 4864
50674e96 4865 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
953ff289
DN
4866 }
4867
50674e96 4868 /* Error handling code goes in DEFAULT_BB. */
726a989a 4869 t = gimple_block_label (default_bb);
3d528853 4870 u = build_case_label (NULL, NULL, t);
777f7f9a 4871 make_edge (l0_bb, default_bb, 0);
953ff289 4872
726a989a
RB
4873 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4874 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4875 gsi_remove (&switch_si, true);
4876 VEC_free (tree, heap, label_vec);
4877
4878 si = gsi_start_bb (default_bb);
e79983f4 4879 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
726a989a 4880 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
50674e96 4881
e5c95afe 4882 if (exit_reachable)
d3c673c7 4883 {
e79983f4
MM
4884 tree bfn_decl;
4885
e5c95afe 4886 /* Code to get the next section goes in L1_BB. */
726a989a
RB
4887 si = gsi_last_bb (l1_bb);
4888 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
953ff289 4889
e79983f4
MM
4890 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4891 stmt = gimple_build_call (bfn_decl, 0);
726a989a
RB
4892 gimple_call_set_lhs (stmt, vnext);
4893 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4894 gsi_remove (&si, true);
50674e96 4895
e5c95afe
ZD
4896 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4897
726a989a
RB
4898 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4899 si = gsi_last_bb (l2_bb);
4900 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
e79983f4 4901 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
d3c673c7 4902 else
e79983f4 4903 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
726a989a
RB
4904 stmt = gimple_build_call (t, 0);
4905 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4906 gsi_remove (&si, true);
d3c673c7 4907 }
50674e96 4908
917948d3 4909 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
50674e96 4910}
953ff289 4911
953ff289 4912
777f7f9a
RH
4913/* Expand code for an OpenMP single directive. We've already expanded
4914 much of the code, here we simply place the GOMP_barrier call. */
4915
4916static void
4917expand_omp_single (struct omp_region *region)
4918{
4919 basic_block entry_bb, exit_bb;
726a989a 4920 gimple_stmt_iterator si;
777f7f9a
RH
4921 bool need_barrier = false;
4922
4923 entry_bb = region->entry;
4924 exit_bb = region->exit;
4925
726a989a 4926 si = gsi_last_bb (entry_bb);
777f7f9a
RH
4927 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4928 be removed. We need to ensure that the thread that entered the single
4929 does not exit before the data is copied out by the other threads. */
726a989a 4930 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
777f7f9a
RH
4931 OMP_CLAUSE_COPYPRIVATE))
4932 need_barrier = true;
726a989a
RB
4933 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4934 gsi_remove (&si, true);
777f7f9a
RH
4935 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4936
726a989a
RB
4937 si = gsi_last_bb (exit_bb);
4938 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4939 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4940 false, GSI_SAME_STMT);
4941 gsi_remove (&si, true);
777f7f9a
RH
4942 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4943}
4944
4945
4946/* Generic expansion for OpenMP synchronization directives: master,
4947 ordered and critical. All we need to do here is remove the entry
4948 and exit markers for REGION. */
50674e96
DN
4949
4950static void
4951expand_omp_synch (struct omp_region *region)
4952{
4953 basic_block entry_bb, exit_bb;
726a989a 4954 gimple_stmt_iterator si;
50674e96 4955
777f7f9a
RH
4956 entry_bb = region->entry;
4957 exit_bb = region->exit;
50674e96 4958
726a989a
RB
4959 si = gsi_last_bb (entry_bb);
4960 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4961 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4962 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4963 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4964 gsi_remove (&si, true);
50674e96
DN
4965 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4966
d3c673c7
JJ
4967 if (exit_bb)
4968 {
726a989a
RB
4969 si = gsi_last_bb (exit_bb);
4970 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4971 gsi_remove (&si, true);
d3c673c7
JJ
4972 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4973 }
50674e96 4974}
953ff289 4975
20906c66
JJ
4976/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4977 operation as a normal volatile load. */
4978
4979static bool
4980expand_omp_atomic_load (basic_block load_bb, tree addr, tree loaded_val)
4981{
4982 /* FIXME */
4983 (void) load_bb;
4984 (void) addr;
4985 (void) loaded_val;
4986 return false;
4987}
4988
4989/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4990 operation as a normal volatile store. */
4991
4992static bool
4993expand_omp_atomic_store (basic_block load_bb, tree addr)
4994{
4995 /* FIXME */
4996 (void) load_bb;
4997 (void) addr;
4998 return false;
4999}
5000
a509ebb5 5001/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
86951993 5002 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
a509ebb5
RL
5003 size of the data type, and thus usable to find the index of the builtin
5004 decl. Returns false if the expression is not of the proper form. */
5005
5006static bool
5007expand_omp_atomic_fetch_op (basic_block load_bb,
5008 tree addr, tree loaded_val,
5009 tree stored_val, int index)
5010{
e79983f4 5011 enum built_in_function oldbase, newbase, tmpbase;
a509ebb5 5012 tree decl, itype, call;
20906c66 5013 tree lhs, rhs;
a509ebb5 5014 basic_block store_bb = single_succ (load_bb);
726a989a
RB
5015 gimple_stmt_iterator gsi;
5016 gimple stmt;
db3927fb 5017 location_t loc;
86951993 5018 enum tree_code code;
20906c66 5019 bool need_old, need_new;
86951993 5020 enum machine_mode imode;
a509ebb5
RL
5021
5022 /* We expect to find the following sequences:
b8698a0f 5023
a509ebb5 5024 load_bb:
726a989a 5025 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
a509ebb5
RL
5026
5027 store_bb:
5028 val = tmp OP something; (or: something OP tmp)
b8698a0f 5029 GIMPLE_OMP_STORE (val)
a509ebb5 5030
b8698a0f 5031 ???FIXME: Allow a more flexible sequence.
a509ebb5 5032 Perhaps use data flow to pick the statements.
b8698a0f 5033
a509ebb5
RL
5034 */
5035
726a989a
RB
5036 gsi = gsi_after_labels (store_bb);
5037 stmt = gsi_stmt (gsi);
db3927fb 5038 loc = gimple_location (stmt);
726a989a 5039 if (!is_gimple_assign (stmt))
a509ebb5 5040 return false;
726a989a
RB
5041 gsi_next (&gsi);
5042 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
a509ebb5 5043 return false;
20906c66
JJ
5044 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
5045 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
5046 gcc_checking_assert (!need_old || !need_new);
a509ebb5 5047
726a989a 5048 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
a509ebb5
RL
5049 return false;
5050
a509ebb5 5051 /* Check for one of the supported fetch-op operations. */
86951993
AM
5052 code = gimple_assign_rhs_code (stmt);
5053 switch (code)
a509ebb5
RL
5054 {
5055 case PLUS_EXPR:
5056 case POINTER_PLUS_EXPR:
86951993
AM
5057 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
5058 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
a509ebb5
RL
5059 break;
5060 case MINUS_EXPR:
86951993
AM
5061 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
5062 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
a509ebb5
RL
5063 break;
5064 case BIT_AND_EXPR:
86951993
AM
5065 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
5066 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
a509ebb5
RL
5067 break;
5068 case BIT_IOR_EXPR:
86951993
AM
5069 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
5070 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
a509ebb5
RL
5071 break;
5072 case BIT_XOR_EXPR:
86951993
AM
5073 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
5074 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
a509ebb5
RL
5075 break;
5076 default:
5077 return false;
5078 }
86951993 5079
a509ebb5 5080 /* Make sure the expression is of the proper form. */
726a989a
RB
5081 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5082 rhs = gimple_assign_rhs2 (stmt);
5083 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5084 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5085 rhs = gimple_assign_rhs1 (stmt);
a509ebb5
RL
5086 else
5087 return false;
5088
e79983f4
MM
5089 tmpbase = ((enum built_in_function)
5090 ((need_new ? newbase : oldbase) + index + 1));
5091 decl = builtin_decl_explicit (tmpbase);
20790697
JJ
5092 if (decl == NULL_TREE)
5093 return false;
a509ebb5 5094 itype = TREE_TYPE (TREE_TYPE (decl));
86951993 5095 imode = TYPE_MODE (itype);
a509ebb5 5096
86951993
AM
5097 /* We could test all of the various optabs involved, but the fact of the
5098 matter is that (with the exception of i486 vs i586 and xadd) all targets
5099 that support any atomic operaton optab also implements compare-and-swap.
5100 Let optabs.c take care of expanding any compare-and-swap loop. */
cedb4a1a 5101 if (!can_compare_and_swap_p (imode, true))
a509ebb5
RL
5102 return false;
5103
726a989a
RB
5104 gsi = gsi_last_bb (load_bb);
5105 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
86951993
AM
5106
5107 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5108 It only requires that the operation happen atomically. Thus we can
5109 use the RELAXED memory model. */
5110 call = build_call_expr_loc (loc, decl, 3, addr,
5111 fold_convert_loc (loc, itype, rhs),
5112 build_int_cst (NULL, MEMMODEL_RELAXED));
5113
20906c66
JJ
5114 if (need_old || need_new)
5115 {
5116 lhs = need_old ? loaded_val : stored_val;
5117 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
5118 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
5119 }
5120 else
5121 call = fold_convert_loc (loc, void_type_node, call);
726a989a
RB
5122 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5123 gsi_remove (&gsi, true);
a509ebb5 5124
726a989a
RB
5125 gsi = gsi_last_bb (store_bb);
5126 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5127 gsi_remove (&gsi, true);
5128 gsi = gsi_last_bb (store_bb);
5129 gsi_remove (&gsi, true);
a509ebb5
RL
5130
5131 if (gimple_in_ssa_p (cfun))
5132 update_ssa (TODO_update_ssa_no_phi);
5133
5134 return true;
5135}
5136
5137/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5138
5139 oldval = *addr;
5140 repeat:
5141 newval = rhs; // with oldval replacing *addr in rhs
5142 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5143 if (oldval != newval)
5144 goto repeat;
5145
5146 INDEX is log2 of the size of the data type, and thus usable to find the
5147 index of the builtin decl. */
5148
5149static bool
5150expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5151 tree addr, tree loaded_val, tree stored_val,
5152 int index)
5153{
c18c98c0 5154 tree loadedi, storedi, initial, new_storedi, old_vali;
a509ebb5 5155 tree type, itype, cmpxchg, iaddr;
726a989a 5156 gimple_stmt_iterator si;
a509ebb5 5157 basic_block loop_header = single_succ (load_bb);
726a989a 5158 gimple phi, stmt;
a509ebb5 5159 edge e;
e79983f4 5160 enum built_in_function fncode;
a509ebb5 5161
86951993
AM
5162 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5163 order to use the RELAXED memory model effectively. */
e79983f4
MM
5164 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5165 + index + 1);
5166 cmpxchg = builtin_decl_explicit (fncode);
20790697
JJ
5167 if (cmpxchg == NULL_TREE)
5168 return false;
a509ebb5
RL
5169 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5170 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5171
cedb4a1a 5172 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
a509ebb5
RL
5173 return false;
5174
726a989a
RB
5175 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5176 si = gsi_last_bb (load_bb);
5177 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5178
c18c98c0
JJ
5179 /* For floating-point values, we'll need to view-convert them to integers
5180 so that we can perform the atomic compare and swap. Simplify the
5181 following code by always setting up the "i"ntegral variables. */
5182 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5183 {
726a989a
RB
5184 tree iaddr_val;
5185
5b21f0f3
RG
5186 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5187 true), NULL);
726a989a
RB
5188 iaddr_val
5189 = force_gimple_operand_gsi (&si,
5190 fold_convert (TREE_TYPE (iaddr), addr),
5191 false, NULL_TREE, true, GSI_SAME_STMT);
5192 stmt = gimple_build_assign (iaddr, iaddr_val);
5193 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
c18c98c0
JJ
5194 loadedi = create_tmp_var (itype, NULL);
5195 if (gimple_in_ssa_p (cfun))
5196 {
5197 add_referenced_var (iaddr);
5198 add_referenced_var (loadedi);
5199 loadedi = make_ssa_name (loadedi, NULL);
5200 }
5201 }
5202 else
5203 {
5204 iaddr = addr;
5205 loadedi = loaded_val;
5206 }
726a989a 5207
70f34814
RG
5208 initial
5209 = force_gimple_operand_gsi (&si,
5210 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5211 iaddr,
5212 build_int_cst (TREE_TYPE (iaddr), 0)),
5213 true, NULL_TREE, true, GSI_SAME_STMT);
c18c98c0
JJ
5214
5215 /* Move the value to the LOADEDI temporary. */
a509ebb5
RL
5216 if (gimple_in_ssa_p (cfun))
5217 {
726a989a 5218 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
c18c98c0
JJ
5219 phi = create_phi_node (loadedi, loop_header);
5220 SSA_NAME_DEF_STMT (loadedi) = phi;
a509ebb5
RL
5221 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5222 initial);
5223 }
5224 else
726a989a
RB
5225 gsi_insert_before (&si,
5226 gimple_build_assign (loadedi, initial),
5227 GSI_SAME_STMT);
c18c98c0
JJ
5228 if (loadedi != loaded_val)
5229 {
726a989a
RB
5230 gimple_stmt_iterator gsi2;
5231 tree x;
c18c98c0
JJ
5232
5233 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
726a989a 5234 gsi2 = gsi_start_bb (loop_header);
c18c98c0
JJ
5235 if (gimple_in_ssa_p (cfun))
5236 {
726a989a
RB
5237 gimple stmt;
5238 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5239 true, GSI_SAME_STMT);
5240 stmt = gimple_build_assign (loaded_val, x);
5241 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
c18c98c0
JJ
5242 }
5243 else
5244 {
726a989a
RB
5245 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5246 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5247 true, GSI_SAME_STMT);
c18c98c0
JJ
5248 }
5249 }
726a989a 5250 gsi_remove (&si, true);
a509ebb5 5251
726a989a
RB
5252 si = gsi_last_bb (store_bb);
5253 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
a509ebb5 5254
c18c98c0
JJ
5255 if (iaddr == addr)
5256 storedi = stored_val;
a509ebb5 5257 else
c18c98c0 5258 storedi =
726a989a 5259 force_gimple_operand_gsi (&si,
c18c98c0
JJ
5260 build1 (VIEW_CONVERT_EXPR, itype,
5261 stored_val), true, NULL_TREE, true,
726a989a 5262 GSI_SAME_STMT);
a509ebb5
RL
5263
5264 /* Build the compare&swap statement. */
5265 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
726a989a 5266 new_storedi = force_gimple_operand_gsi (&si,
587aa063
RG
5267 fold_convert (TREE_TYPE (loadedi),
5268 new_storedi),
a509ebb5 5269 true, NULL_TREE,
726a989a 5270 true, GSI_SAME_STMT);
a509ebb5
RL
5271
5272 if (gimple_in_ssa_p (cfun))
5273 old_vali = loadedi;
5274 else
5275 {
587aa063 5276 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
c18c98c0
JJ
5277 if (gimple_in_ssa_p (cfun))
5278 add_referenced_var (old_vali);
726a989a
RB
5279 stmt = gimple_build_assign (old_vali, loadedi);
5280 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
a509ebb5 5281
726a989a
RB
5282 stmt = gimple_build_assign (loadedi, new_storedi);
5283 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
a509ebb5
RL
5284 }
5285
5286 /* Note that we always perform the comparison as an integer, even for
b8698a0f 5287 floating point. This allows the atomic operation to properly
a509ebb5 5288 succeed even with NaNs and -0.0. */
726a989a
RB
5289 stmt = gimple_build_cond_empty
5290 (build2 (NE_EXPR, boolean_type_node,
5291 new_storedi, old_vali));
5292 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
a509ebb5
RL
5293
5294 /* Update cfg. */
5295 e = single_succ_edge (store_bb);
5296 e->flags &= ~EDGE_FALLTHRU;
5297 e->flags |= EDGE_FALSE_VALUE;
5298
5299 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5300
c18c98c0 5301 /* Copy the new value to loadedi (we already did that before the condition
a509ebb5
RL
5302 if we are not in SSA). */
5303 if (gimple_in_ssa_p (cfun))
5304 {
726a989a 5305 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
c18c98c0 5306 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
a509ebb5
RL
5307 }
5308
726a989a
RB
5309 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5310 gsi_remove (&si, true);
a509ebb5
RL
5311
5312 if (gimple_in_ssa_p (cfun))
5313 update_ssa (TODO_update_ssa_no_phi);
5314
5315 return true;
5316}
5317
5318/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5319
5320 GOMP_atomic_start ();
5321 *addr = rhs;
5322 GOMP_atomic_end ();
5323
5324 The result is not globally atomic, but works so long as all parallel
5325 references are within #pragma omp atomic directives. According to
5326 responses received from omp@openmp.org, appears to be within spec.
5327 Which makes sense, since that's how several other compilers handle
b8698a0f 5328 this situation as well.
726a989a
RB
5329 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5330 expanding. STORED_VAL is the operand of the matching
5331 GIMPLE_OMP_ATOMIC_STORE.
a509ebb5 5332
b8698a0f
L
5333 We replace
5334 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
a509ebb5
RL
5335 loaded_val = *addr;
5336
5337 and replace
726a989a 5338 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
b8698a0f 5339 *addr = stored_val;
a509ebb5
RL
5340*/
5341
5342static bool
5343expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5344 tree addr, tree loaded_val, tree stored_val)
5345{
726a989a
RB
5346 gimple_stmt_iterator si;
5347 gimple stmt;
a509ebb5
RL
5348 tree t;
5349
726a989a
RB
5350 si = gsi_last_bb (load_bb);
5351 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
a509ebb5 5352
e79983f4 5353 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
3bb06db4 5354 t = build_call_expr (t, 0);
726a989a 5355 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
a509ebb5 5356
70f34814 5357 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
726a989a
RB
5358 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5359 gsi_remove (&si, true);
a509ebb5 5360
726a989a
RB
5361 si = gsi_last_bb (store_bb);
5362 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
a509ebb5 5363
70f34814
RG
5364 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5365 stored_val);
726a989a 5366 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
a509ebb5 5367
e79983f4 5368 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
3bb06db4 5369 t = build_call_expr (t, 0);
726a989a
RB
5370 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5371 gsi_remove (&si, true);
a509ebb5
RL
5372
5373 if (gimple_in_ssa_p (cfun))
5374 update_ssa (TODO_update_ssa_no_phi);
5375 return true;
5376}
5377
b8698a0f
L
5378/* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5379 using expand_omp_atomic_fetch_op. If it failed, we try to
a509ebb5
RL
5380 call expand_omp_atomic_pipeline, and if it fails too, the
5381 ultimate fallback is wrapping the operation in a mutex
b8698a0f
L
5382 (expand_omp_atomic_mutex). REGION is the atomic region built
5383 by build_omp_regions_1(). */
a509ebb5
RL
5384
5385static void
5386expand_omp_atomic (struct omp_region *region)
5387{
5388 basic_block load_bb = region->entry, store_bb = region->exit;
726a989a
RB
5389 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5390 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5391 tree addr = gimple_omp_atomic_load_rhs (load);
5392 tree stored_val = gimple_omp_atomic_store_val (store);
a509ebb5
RL
5393 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5394 HOST_WIDE_INT index;
5395
5396 /* Make sure the type is one of the supported sizes. */
5397 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5398 index = exact_log2 (index);
5399 if (index >= 0 && index <= 4)
5400 {
5401 unsigned int align = TYPE_ALIGN_UNIT (type);
5402
5403 /* __sync builtins require strict data alignment. */
5404 if (exact_log2 (align) >= index)
5405 {
20906c66
JJ
5406 /* Atomic load. FIXME: have some target hook signalize what loads
5407 are actually atomic? */
5408 if (loaded_val == stored_val
5409 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5410 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5411 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5412 && expand_omp_atomic_load (load_bb, addr, loaded_val))
5413 return;
5414
5415 /* Atomic store. FIXME: have some target hook signalize what
5416 stores are actually atomic? */
5417 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5418 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5419 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5420 && store_bb == single_succ (load_bb)
5421 && first_stmt (store_bb) == store
5422 && expand_omp_atomic_store (load_bb, addr))
5423 return;
5424
a509ebb5
RL
5425 /* When possible, use specialized atomic update functions. */
5426 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5427 && store_bb == single_succ (load_bb))
5428 {
5429 if (expand_omp_atomic_fetch_op (load_bb, addr,
5430 loaded_val, stored_val, index))
5431 return;
5432 }
5433
5434 /* If we don't have specialized __sync builtins, try and implement
5435 as a compare and swap loop. */
5436 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5437 loaded_val, stored_val, index))
5438 return;
5439 }
5440 }
5441
5442 /* The ultimate fallback is wrapping the operation in a mutex. */
5443 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5444}
5445
953ff289 5446
50674e96
DN
5447/* Expand the parallel region tree rooted at REGION. Expansion
5448 proceeds in depth-first order. Innermost regions are expanded
5449 first. This way, parallel regions that require a new function to
726a989a 5450 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
50674e96
DN
5451 internal dependencies in their body. */
5452
5453static void
5454expand_omp (struct omp_region *region)
5455{
5456 while (region)
5457 {
b357f682
JJ
5458 location_t saved_location;
5459
068e1875
ZD
5460 /* First, determine whether this is a combined parallel+workshare
5461 region. */
726a989a 5462 if (region->type == GIMPLE_OMP_PARALLEL)
068e1875
ZD
5463 determine_parallel_type (region);
5464
50674e96
DN
5465 if (region->inner)
5466 expand_omp (region->inner);
5467
b357f682 5468 saved_location = input_location;
726a989a
RB
5469 if (gimple_has_location (last_stmt (region->entry)))
5470 input_location = gimple_location (last_stmt (region->entry));
b357f682 5471
777f7f9a 5472 switch (region->type)
50674e96 5473 {
726a989a
RB
5474 case GIMPLE_OMP_PARALLEL:
5475 case GIMPLE_OMP_TASK:
a68ab351
JJ
5476 expand_omp_taskreg (region);
5477 break;
5478
726a989a 5479 case GIMPLE_OMP_FOR:
777f7f9a
RH
5480 expand_omp_for (region);
5481 break;
50674e96 5482
726a989a 5483 case GIMPLE_OMP_SECTIONS:
777f7f9a
RH
5484 expand_omp_sections (region);
5485 break;
50674e96 5486
726a989a 5487 case GIMPLE_OMP_SECTION:
777f7f9a 5488 /* Individual omp sections are handled together with their
726a989a 5489 parent GIMPLE_OMP_SECTIONS region. */
777f7f9a 5490 break;
50674e96 5491
726a989a 5492 case GIMPLE_OMP_SINGLE:
777f7f9a
RH
5493 expand_omp_single (region);
5494 break;
50674e96 5495
726a989a
RB
5496 case GIMPLE_OMP_MASTER:
5497 case GIMPLE_OMP_ORDERED:
5498 case GIMPLE_OMP_CRITICAL:
777f7f9a
RH
5499 expand_omp_synch (region);
5500 break;
50674e96 5501
726a989a 5502 case GIMPLE_OMP_ATOMIC_LOAD:
a509ebb5
RL
5503 expand_omp_atomic (region);
5504 break;
5505
777f7f9a
RH
5506 default:
5507 gcc_unreachable ();
5508 }
8d9c1aec 5509
b357f682 5510 input_location = saved_location;
50674e96
DN
5511 region = region->next;
5512 }
5513}
5514
5515
5516/* Helper for build_omp_regions. Scan the dominator tree starting at
5f40b3cb
ZD
5517 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5518 true, the function ends once a single tree is built (otherwise, whole
5519 forest of OMP constructs may be built). */
50674e96
DN
5520
5521static void
5f40b3cb
ZD
5522build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5523 bool single_tree)
50674e96 5524{
726a989a
RB
5525 gimple_stmt_iterator gsi;
5526 gimple stmt;
50674e96
DN
5527 basic_block son;
5528
726a989a
RB
5529 gsi = gsi_last_bb (bb);
5530 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
50674e96
DN
5531 {
5532 struct omp_region *region;
726a989a 5533 enum gimple_code code;
50674e96 5534
726a989a
RB
5535 stmt = gsi_stmt (gsi);
5536 code = gimple_code (stmt);
5537 if (code == GIMPLE_OMP_RETURN)
50674e96
DN
5538 {
5539 /* STMT is the return point out of region PARENT. Mark it
5540 as the exit point and make PARENT the immediately
5541 enclosing region. */
5542 gcc_assert (parent);
5543 region = parent;
777f7f9a 5544 region->exit = bb;
50674e96 5545 parent = parent->outer;
50674e96 5546 }
726a989a 5547 else if (code == GIMPLE_OMP_ATOMIC_STORE)
a509ebb5 5548 {
726a989a
RB
5549 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5550 GIMPLE_OMP_RETURN, but matches with
5551 GIMPLE_OMP_ATOMIC_LOAD. */
a509ebb5 5552 gcc_assert (parent);
726a989a 5553 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
a509ebb5
RL
5554 region = parent;
5555 region->exit = bb;
5556 parent = parent->outer;
5557 }
5558
726a989a 5559 else if (code == GIMPLE_OMP_CONTINUE)
777f7f9a
RH
5560 {
5561 gcc_assert (parent);
5562 parent->cont = bb;
5563 }
726a989a 5564 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
e5c95afe 5565 {
726a989a
RB
5566 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5567 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5568 ;
e5c95afe 5569 }
50674e96
DN
5570 else
5571 {
5572 /* Otherwise, this directive becomes the parent for a new
5573 region. */
777f7f9a 5574 region = new_omp_region (bb, code, parent);
50674e96
DN
5575 parent = region;
5576 }
50674e96
DN
5577 }
5578
5f40b3cb
ZD
5579 if (single_tree && !parent)
5580 return;
5581
50674e96
DN
5582 for (son = first_dom_son (CDI_DOMINATORS, bb);
5583 son;
5584 son = next_dom_son (CDI_DOMINATORS, son))
5f40b3cb
ZD
5585 build_omp_regions_1 (son, parent, single_tree);
5586}
5587
5588/* Builds the tree of OMP regions rooted at ROOT, storing it to
5589 root_omp_region. */
5590
5591static void
5592build_omp_regions_root (basic_block root)
5593{
5594 gcc_assert (root_omp_region == NULL);
5595 build_omp_regions_1 (root, NULL, true);
5596 gcc_assert (root_omp_region != NULL);
50674e96
DN
5597}
5598
5f40b3cb
ZD
5599/* Expands omp construct (and its subconstructs) starting in HEAD. */
5600
5601void
5602omp_expand_local (basic_block head)
5603{
5604 build_omp_regions_root (head);
5605 if (dump_file && (dump_flags & TDF_DETAILS))
5606 {
5607 fprintf (dump_file, "\nOMP region tree\n\n");
5608 dump_omp_region (dump_file, root_omp_region, 0);
5609 fprintf (dump_file, "\n");
5610 }
5611
5612 remove_exit_barriers (root_omp_region);
5613 expand_omp (root_omp_region);
5614
5615 free_omp_regions ();
5616}
50674e96
DN
5617
5618/* Scan the CFG and build a tree of OMP regions. Return the root of
5619 the OMP region tree. */
5620
5621static void
5622build_omp_regions (void)
5623{
777f7f9a 5624 gcc_assert (root_omp_region == NULL);
50674e96 5625 calculate_dominance_info (CDI_DOMINATORS);
5f40b3cb 5626 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
50674e96
DN
5627}
5628
50674e96
DN
5629/* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5630
c2924966 5631static unsigned int
50674e96
DN
5632execute_expand_omp (void)
5633{
5634 build_omp_regions ();
5635
777f7f9a
RH
5636 if (!root_omp_region)
5637 return 0;
50674e96 5638
777f7f9a
RH
5639 if (dump_file)
5640 {
5641 fprintf (dump_file, "\nOMP region tree\n\n");
5642 dump_omp_region (dump_file, root_omp_region, 0);
5643 fprintf (dump_file, "\n");
50674e96 5644 }
777f7f9a
RH
5645
5646 remove_exit_barriers (root_omp_region);
5647
5648 expand_omp (root_omp_region);
5649
777f7f9a
RH
5650 cleanup_tree_cfg ();
5651
5652 free_omp_regions ();
5653
c2924966 5654 return 0;
50674e96
DN
5655}
5656
917948d3
ZD
5657/* OMP expansion -- the default pass, run before creation of SSA form. */
5658
50674e96
DN
5659static bool
5660gate_expand_omp (void)
5661{
1da2ed5f 5662 return (flag_openmp != 0 && !seen_error ());
50674e96
DN
5663}
5664
b8698a0f 5665struct gimple_opt_pass pass_expand_omp =
50674e96 5666{
8ddbbcae
JH
5667 {
5668 GIMPLE_PASS,
50674e96
DN
5669 "ompexp", /* name */
5670 gate_expand_omp, /* gate */
5671 execute_expand_omp, /* execute */
5672 NULL, /* sub */
5673 NULL, /* next */
5674 0, /* static_pass_number */
7072a650 5675 TV_NONE, /* tv_id */
50674e96 5676 PROP_gimple_any, /* properties_required */
535b544a 5677 0, /* properties_provided */
50674e96
DN
5678 0, /* properties_destroyed */
5679 0, /* todo_flags_start */
22c5fa5f 5680 0 /* todo_flags_finish */
8ddbbcae 5681 }
50674e96
DN
5682};
5683\f
5684/* Routines to lower OpenMP directives into OMP-GIMPLE. */
5685
726a989a
RB
5686/* Lower the OpenMP sections directive in the current statement in GSI_P.
5687 CTX is the enclosing OMP context for the current statement. */
50674e96
DN
5688
5689static void
726a989a 5690lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
50674e96 5691{
726a989a
RB
5692 tree block, control;
5693 gimple_stmt_iterator tgsi;
50674e96 5694 unsigned i, len;
726a989a
RB
5695 gimple stmt, new_stmt, bind, t;
5696 gimple_seq ilist, dlist, olist, new_body, body;
d406b663 5697 struct gimplify_ctx gctx;
50674e96 5698
726a989a 5699 stmt = gsi_stmt (*gsi_p);
50674e96 5700
d406b663 5701 push_gimplify_context (&gctx);
50674e96
DN
5702
5703 dlist = NULL;
5704 ilist = NULL;
726a989a
RB
5705 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5706 &ilist, &dlist, ctx);
50674e96 5707
726a989a
RB
5708 tgsi = gsi_start (gimple_omp_body (stmt));
5709 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
50674e96
DN
5710 continue;
5711
726a989a
RB
5712 tgsi = gsi_start (gimple_omp_body (stmt));
5713 body = NULL;
5714 for (i = 0; i < len; i++, gsi_next (&tgsi))
50674e96
DN
5715 {
5716 omp_context *sctx;
726a989a 5717 gimple sec_start;
50674e96 5718
726a989a 5719 sec_start = gsi_stmt (tgsi);
50674e96
DN
5720 sctx = maybe_lookup_ctx (sec_start);
5721 gcc_assert (sctx);
5722
726a989a 5723 gimple_seq_add_stmt (&body, sec_start);
777f7f9a 5724
726a989a
RB
5725 lower_omp (gimple_omp_body (sec_start), sctx);
5726 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5727 gimple_omp_set_body (sec_start, NULL);
50674e96
DN
5728
5729 if (i == len - 1)
5730 {
726a989a
RB
5731 gimple_seq l = NULL;
5732 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
50674e96 5733 &l, ctx);
726a989a
RB
5734 gimple_seq_add_seq (&body, l);
5735 gimple_omp_section_set_last (sec_start);
50674e96 5736 }
b8698a0f 5737
726a989a 5738 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
50674e96 5739 }
953ff289
DN
5740
5741 block = make_node (BLOCK);
726a989a 5742 bind = gimple_build_bind (NULL, body, block);
953ff289 5743
726a989a
RB
5744 olist = NULL;
5745 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
50674e96 5746
b357f682 5747 block = make_node (BLOCK);
726a989a 5748 new_stmt = gimple_build_bind (NULL, NULL, block);
50674e96 5749
b357f682 5750 pop_gimplify_context (new_stmt);
726a989a
RB
5751 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5752 BLOCK_VARS (block) = gimple_bind_vars (bind);
b357f682
JJ
5753 if (BLOCK_VARS (block))
5754 TREE_USED (block) = 1;
5755
726a989a
RB
5756 new_body = NULL;
5757 gimple_seq_add_seq (&new_body, ilist);
5758 gimple_seq_add_stmt (&new_body, stmt);
5759 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5760 gimple_seq_add_stmt (&new_body, bind);
777f7f9a 5761
e5c95afe 5762 control = create_tmp_var (unsigned_type_node, ".section");
726a989a
RB
5763 t = gimple_build_omp_continue (control, control);
5764 gimple_omp_sections_set_control (stmt, control);
5765 gimple_seq_add_stmt (&new_body, t);
777f7f9a 5766
726a989a
RB
5767 gimple_seq_add_seq (&new_body, olist);
5768 gimple_seq_add_seq (&new_body, dlist);
50674e96 5769
726a989a 5770 new_body = maybe_catch_exception (new_body);
4a31b7ee 5771
726a989a
RB
5772 t = gimple_build_omp_return
5773 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5774 OMP_CLAUSE_NOWAIT));
5775 gimple_seq_add_stmt (&new_body, t);
777f7f9a 5776
726a989a
RB
5777 gimple_bind_set_body (new_stmt, new_body);
5778 gimple_omp_set_body (stmt, NULL);
50674e96 5779
726a989a 5780 gsi_replace (gsi_p, new_stmt, true);
953ff289
DN
5781}
5782
5783
50674e96 5784/* A subroutine of lower_omp_single. Expand the simple form of
726a989a 5785 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
953ff289
DN
5786
5787 if (GOMP_single_start ())
5788 BODY;
5789 [ GOMP_barrier (); ] -> unless 'nowait' is present.
50674e96
DN
5790
5791 FIXME. It may be better to delay expanding the logic of this until
5792 pass_expand_omp. The expanded logic may make the job more difficult
5793 to a synchronization analysis pass. */
953ff289
DN
5794
5795static void
726a989a 5796lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
953ff289 5797{
c2255bc4
AH
5798 location_t loc = gimple_location (single_stmt);
5799 tree tlabel = create_artificial_label (loc);
5800 tree flabel = create_artificial_label (loc);
726a989a
RB
5801 gimple call, cond;
5802 tree lhs, decl;
5803
e79983f4 5804 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
726a989a
RB
5805 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5806 call = gimple_build_call (decl, 0);
5807 gimple_call_set_lhs (call, lhs);
5808 gimple_seq_add_stmt (pre_p, call);
5809
5810 cond = gimple_build_cond (EQ_EXPR, lhs,
db3927fb
AH
5811 fold_convert_loc (loc, TREE_TYPE (lhs),
5812 boolean_true_node),
726a989a
RB
5813 tlabel, flabel);
5814 gimple_seq_add_stmt (pre_p, cond);
5815 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5816 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5817 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
953ff289
DN
5818}
5819
50674e96
DN
5820
5821/* A subroutine of lower_omp_single. Expand the simple form of
726a989a 5822 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
953ff289
DN
5823
5824 #pragma omp single copyprivate (a, b, c)
5825
5826 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5827
5828 {
5829 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5830 {
5831 BODY;
5832 copyout.a = a;
5833 copyout.b = b;
5834 copyout.c = c;
5835 GOMP_single_copy_end (&copyout);
5836 }
5837 else
5838 {
5839 a = copyout_p->a;
5840 b = copyout_p->b;
5841 c = copyout_p->c;
5842 }
5843 GOMP_barrier ();
5844 }
50674e96
DN
5845
5846 FIXME. It may be better to delay expanding the logic of this until
5847 pass_expand_omp. The expanded logic may make the job more difficult
5848 to a synchronization analysis pass. */
953ff289
DN
5849
5850static void
726a989a 5851lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
953ff289 5852{
e79983f4 5853 tree ptr_type, t, l0, l1, l2, bfn_decl;
726a989a 5854 gimple_seq copyin_seq;
c2255bc4 5855 location_t loc = gimple_location (single_stmt);
953ff289
DN
5856
5857 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5858
5859 ptr_type = build_pointer_type (ctx->record_type);
5860 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5861
c2255bc4
AH
5862 l0 = create_artificial_label (loc);
5863 l1 = create_artificial_label (loc);
5864 l2 = create_artificial_label (loc);
953ff289 5865
e79983f4
MM
5866 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
5867 t = build_call_expr_loc (loc, bfn_decl, 0);
db3927fb 5868 t = fold_convert_loc (loc, ptr_type, t);
726a989a 5869 gimplify_assign (ctx->receiver_decl, t, pre_p);
953ff289
DN
5870
5871 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5872 build_int_cst (ptr_type, 0));
5873 t = build3 (COND_EXPR, void_type_node, t,
5874 build_and_jump (&l0), build_and_jump (&l1));
5875 gimplify_and_add (t, pre_p);
5876
726a989a 5877 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
953ff289 5878
726a989a 5879 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
953ff289
DN
5880
5881 copyin_seq = NULL;
726a989a 5882 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
953ff289
DN
5883 &copyin_seq, ctx);
5884
db3927fb 5885 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
e79983f4
MM
5886 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
5887 t = build_call_expr_loc (loc, bfn_decl, 1, t);
953ff289
DN
5888 gimplify_and_add (t, pre_p);
5889
5890 t = build_and_jump (&l2);
5891 gimplify_and_add (t, pre_p);
5892
726a989a 5893 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
953ff289 5894
726a989a 5895 gimple_seq_add_seq (pre_p, copyin_seq);
953ff289 5896
726a989a 5897 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
953ff289
DN
5898}
5899
50674e96 5900
953ff289
DN
5901/* Expand code for an OpenMP single directive. */
5902
5903static void
726a989a 5904lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
953ff289 5905{
726a989a
RB
5906 tree block;
5907 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5908 gimple_seq bind_body, dlist;
d406b663 5909 struct gimplify_ctx gctx;
953ff289 5910
d406b663 5911 push_gimplify_context (&gctx);
953ff289 5912
726a989a
RB
5913 bind_body = NULL;
5914 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5915 &bind_body, &dlist, ctx);
5916 lower_omp (gimple_omp_body (single_stmt), ctx);
953ff289 5917
726a989a 5918 gimple_seq_add_stmt (&bind_body, single_stmt);
953ff289
DN
5919
5920 if (ctx->record_type)
726a989a 5921 lower_omp_single_copy (single_stmt, &bind_body, ctx);
953ff289 5922 else
726a989a
RB
5923 lower_omp_single_simple (single_stmt, &bind_body);
5924
5925 gimple_omp_set_body (single_stmt, NULL);
953ff289 5926
726a989a 5927 gimple_seq_add_seq (&bind_body, dlist);
777f7f9a 5928
726a989a 5929 bind_body = maybe_catch_exception (bind_body);
777f7f9a 5930
b8698a0f 5931 t = gimple_build_omp_return
726a989a
RB
5932 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5933 OMP_CLAUSE_NOWAIT));
5934 gimple_seq_add_stmt (&bind_body, t);
4a31b7ee 5935
726a989a
RB
5936 block = make_node (BLOCK);
5937 bind = gimple_build_bind (NULL, bind_body, block);
777f7f9a 5938
953ff289 5939 pop_gimplify_context (bind);
50674e96 5940
726a989a
RB
5941 gimple_bind_append_vars (bind, ctx->block_vars);
5942 BLOCK_VARS (block) = ctx->block_vars;
5943 gsi_replace (gsi_p, bind, true);
b357f682
JJ
5944 if (BLOCK_VARS (block))
5945 TREE_USED (block) = 1;
953ff289
DN
5946}
5947
50674e96 5948
953ff289
DN
5949/* Expand code for an OpenMP master directive. */
5950
5951static void
726a989a 5952lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
953ff289 5953{
e79983f4 5954 tree block, lab = NULL, x, bfn_decl;
726a989a 5955 gimple stmt = gsi_stmt (*gsi_p), bind;
db3927fb 5956 location_t loc = gimple_location (stmt);
726a989a 5957 gimple_seq tseq;
d406b663 5958 struct gimplify_ctx gctx;
953ff289 5959
d406b663 5960 push_gimplify_context (&gctx);
953ff289
DN
5961
5962 block = make_node (BLOCK);
726a989a
RB
5963 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5964 block);
777f7f9a 5965
e79983f4
MM
5966 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
5967 x = build_call_expr_loc (loc, bfn_decl, 0);
953ff289
DN
5968 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5969 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
726a989a
RB
5970 tseq = NULL;
5971 gimplify_and_add (x, &tseq);
5972 gimple_bind_add_seq (bind, tseq);
953ff289 5973
726a989a
RB
5974 lower_omp (gimple_omp_body (stmt), ctx);
5975 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5976 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5977 gimple_omp_set_body (stmt, NULL);
953ff289 5978
726a989a 5979 gimple_bind_add_stmt (bind, gimple_build_label (lab));
777f7f9a 5980
726a989a 5981 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
777f7f9a 5982
953ff289 5983 pop_gimplify_context (bind);
50674e96 5984
726a989a
RB
5985 gimple_bind_append_vars (bind, ctx->block_vars);
5986 BLOCK_VARS (block) = ctx->block_vars;
5987 gsi_replace (gsi_p, bind, true);
953ff289
DN
5988}
5989
50674e96 5990
953ff289
DN
5991/* Expand code for an OpenMP ordered directive. */
5992
5993static void
726a989a 5994lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
953ff289 5995{
726a989a
RB
5996 tree block;
5997 gimple stmt = gsi_stmt (*gsi_p), bind, x;
d406b663 5998 struct gimplify_ctx gctx;
953ff289 5999
d406b663 6000 push_gimplify_context (&gctx);
953ff289
DN
6001
6002 block = make_node (BLOCK);
726a989a
RB
6003 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
6004 block);
777f7f9a 6005
e79983f4
MM
6006 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
6007 0);
726a989a 6008 gimple_bind_add_stmt (bind, x);
953ff289 6009
726a989a
RB
6010 lower_omp (gimple_omp_body (stmt), ctx);
6011 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6012 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6013 gimple_omp_set_body (stmt, NULL);
953ff289 6014
e79983f4 6015 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
726a989a 6016 gimple_bind_add_stmt (bind, x);
777f7f9a 6017
726a989a 6018 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
777f7f9a 6019
953ff289 6020 pop_gimplify_context (bind);
50674e96 6021
726a989a
RB
6022 gimple_bind_append_vars (bind, ctx->block_vars);
6023 BLOCK_VARS (block) = gimple_bind_vars (bind);
6024 gsi_replace (gsi_p, bind, true);
953ff289
DN
6025}
6026
953ff289 6027
726a989a 6028/* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
953ff289
DN
6029 substitution of a couple of function calls. But in the NAMED case,
6030 requires that languages coordinate a symbol name. It is therefore
6031 best put here in common code. */
6032
6033static GTY((param1_is (tree), param2_is (tree)))
6034 splay_tree critical_name_mutexes;
6035
6036static void
726a989a 6037lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
953ff289 6038{
726a989a
RB
6039 tree block;
6040 tree name, lock, unlock;
6041 gimple stmt = gsi_stmt (*gsi_p), bind;
db3927fb 6042 location_t loc = gimple_location (stmt);
726a989a 6043 gimple_seq tbody;
d406b663 6044 struct gimplify_ctx gctx;
953ff289 6045
726a989a 6046 name = gimple_omp_critical_name (stmt);
953ff289
DN
6047 if (name)
6048 {
5039610b 6049 tree decl;
953ff289
DN
6050 splay_tree_node n;
6051
6052 if (!critical_name_mutexes)
6053 critical_name_mutexes
a9429e29
LB
6054 = splay_tree_new_ggc (splay_tree_compare_pointers,
6055 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
6056 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
953ff289
DN
6057
6058 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
6059 if (n == NULL)
6060 {
6061 char *new_str;
6062
6063 decl = create_tmp_var_raw (ptr_type_node, NULL);
6064
6065 new_str = ACONCAT ((".gomp_critical_user_",
6066 IDENTIFIER_POINTER (name), NULL));
6067 DECL_NAME (decl) = get_identifier (new_str);
6068 TREE_PUBLIC (decl) = 1;
6069 TREE_STATIC (decl) = 1;
6070 DECL_COMMON (decl) = 1;
6071 DECL_ARTIFICIAL (decl) = 1;
6072 DECL_IGNORED_P (decl) = 1;
8a4a83ed 6073 varpool_finalize_decl (decl);
953ff289
DN
6074
6075 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
6076 (splay_tree_value) decl);
6077 }
6078 else
6079 decl = (tree) n->value;
6080
e79983f4 6081 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
db3927fb 6082 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
953ff289 6083
e79983f4 6084 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
db3927fb
AH
6085 unlock = build_call_expr_loc (loc, unlock, 1,
6086 build_fold_addr_expr_loc (loc, decl));
953ff289
DN
6087 }
6088 else
6089 {
e79983f4 6090 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
db3927fb 6091 lock = build_call_expr_loc (loc, lock, 0);
953ff289 6092
e79983f4 6093 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
db3927fb 6094 unlock = build_call_expr_loc (loc, unlock, 0);
953ff289
DN
6095 }
6096
d406b663 6097 push_gimplify_context (&gctx);
953ff289
DN
6098
6099 block = make_node (BLOCK);
726a989a 6100 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
777f7f9a 6101
726a989a
RB
6102 tbody = gimple_bind_body (bind);
6103 gimplify_and_add (lock, &tbody);
6104 gimple_bind_set_body (bind, tbody);
953ff289 6105
726a989a
RB
6106 lower_omp (gimple_omp_body (stmt), ctx);
6107 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6108 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6109 gimple_omp_set_body (stmt, NULL);
953ff289 6110
726a989a
RB
6111 tbody = gimple_bind_body (bind);
6112 gimplify_and_add (unlock, &tbody);
6113 gimple_bind_set_body (bind, tbody);
777f7f9a 6114
726a989a 6115 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
953ff289
DN
6116
6117 pop_gimplify_context (bind);
726a989a
RB
6118 gimple_bind_append_vars (bind, ctx->block_vars);
6119 BLOCK_VARS (block) = gimple_bind_vars (bind);
6120 gsi_replace (gsi_p, bind, true);
50674e96
DN
6121}
6122
6123
6124/* A subroutine of lower_omp_for. Generate code to emit the predicate
6125 for a lastprivate clause. Given a loop control predicate of (V
6126 cond N2), we gate the clause on (!(V cond N2)). The lowered form
3d55c64b
JJ
6127 is appended to *DLIST, iterator initialization is appended to
6128 *BODY_P. */
50674e96
DN
6129
6130static void
726a989a
RB
6131lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6132 gimple_seq *dlist, struct omp_context *ctx)
50674e96 6133{
726a989a 6134 tree clauses, cond, vinit;
50674e96 6135 enum tree_code cond_code;
726a989a 6136 gimple_seq stmts;
b8698a0f 6137
a68ab351 6138 cond_code = fd->loop.cond_code;
50674e96
DN
6139 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6140
6141 /* When possible, use a strict equality expression. This can let VRP
6142 type optimizations deduce the value and remove a copy. */
a68ab351 6143 if (host_integerp (fd->loop.step, 0))
50674e96 6144 {
a68ab351 6145 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
50674e96
DN
6146 if (step == 1 || step == -1)
6147 cond_code = EQ_EXPR;
6148 }
6149
a68ab351 6150 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
50674e96 6151
726a989a 6152 clauses = gimple_omp_for_clauses (fd->for_stmt);
3d55c64b
JJ
6153 stmts = NULL;
6154 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
726a989a 6155 if (!gimple_seq_empty_p (stmts))
3d55c64b 6156 {
726a989a 6157 gimple_seq_add_seq (&stmts, *dlist);
a68ab351 6158 *dlist = stmts;
3d55c64b
JJ
6159
6160 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
a68ab351 6161 vinit = fd->loop.n1;
3d55c64b 6162 if (cond_code == EQ_EXPR
a68ab351
JJ
6163 && host_integerp (fd->loop.n2, 0)
6164 && ! integer_zerop (fd->loop.n2))
6165 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
3d55c64b
JJ
6166
6167 /* Initialize the iterator variable, so that threads that don't execute
6168 any iterations don't execute the lastprivate clauses by accident. */
726a989a 6169 gimplify_assign (fd->loop.v, vinit, body_p);
3d55c64b 6170 }
50674e96
DN
6171}
6172
6173
6174/* Lower code for an OpenMP loop directive. */
6175
6176static void
726a989a 6177lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
50674e96 6178{
726a989a 6179 tree *rhs_p, block;
50674e96 6180 struct omp_for_data fd;
726a989a 6181 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
0f900dfa 6182 gimple_seq omp_for_body, body, dlist;
726a989a 6183 size_t i;
d406b663 6184 struct gimplify_ctx gctx;
50674e96 6185
d406b663 6186 push_gimplify_context (&gctx);
50674e96 6187
726a989a
RB
6188 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6189 lower_omp (gimple_omp_body (stmt), ctx);
50674e96 6190
b357f682 6191 block = make_node (BLOCK);
726a989a 6192 new_stmt = gimple_build_bind (NULL, NULL, block);
b357f682 6193
50674e96
DN
6194 /* Move declaration of temporaries in the loop body before we make
6195 it go away. */
726a989a
RB
6196 omp_for_body = gimple_omp_body (stmt);
6197 if (!gimple_seq_empty_p (omp_for_body)
6198 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6199 {
6200 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6201 gimple_bind_append_vars (new_stmt, vars);
6202 }
50674e96 6203
726a989a 6204 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
50674e96 6205 dlist = NULL;
726a989a
RB
6206 body = NULL;
6207 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6208 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
50674e96
DN
6209
6210 /* Lower the header expressions. At this point, we can assume that
6211 the header is of the form:
6212
6213 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6214
6215 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6216 using the .omp_data_s mapping, if needed. */
726a989a 6217 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
a68ab351 6218 {
726a989a 6219 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
a68ab351 6220 if (!is_gimple_min_invariant (*rhs_p))
726a989a 6221 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
a68ab351 6222
726a989a 6223 rhs_p = gimple_omp_for_final_ptr (stmt, i);
a68ab351 6224 if (!is_gimple_min_invariant (*rhs_p))
726a989a 6225 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
a68ab351 6226
726a989a 6227 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
a68ab351 6228 if (!is_gimple_min_invariant (*rhs_p))
726a989a 6229 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
a68ab351 6230 }
50674e96
DN
6231
6232 /* Once lowered, extract the bounds and clauses. */
a68ab351 6233 extract_omp_for_data (stmt, &fd, NULL);
50674e96 6234
726a989a 6235 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
50674e96 6236
726a989a
RB
6237 gimple_seq_add_stmt (&body, stmt);
6238 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
777f7f9a 6239
726a989a
RB
6240 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6241 fd.loop.v));
777f7f9a 6242
50674e96 6243 /* After the loop, add exit clauses. */
726a989a
RB
6244 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6245 gimple_seq_add_seq (&body, dlist);
50674e96 6246
726a989a 6247 body = maybe_catch_exception (body);
4a31b7ee 6248
777f7f9a 6249 /* Region exit marker goes at the end of the loop body. */
726a989a 6250 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
50674e96 6251
b357f682 6252 pop_gimplify_context (new_stmt);
726a989a
RB
6253
6254 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6255 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
b357f682
JJ
6256 if (BLOCK_VARS (block))
6257 TREE_USED (block) = 1;
50674e96 6258
726a989a
RB
6259 gimple_bind_set_body (new_stmt, body);
6260 gimple_omp_set_body (stmt, NULL);
6261 gimple_omp_for_set_pre_body (stmt, NULL);
6262 gsi_replace (gsi_p, new_stmt, true);
953ff289
DN
6263}
6264
b8698a0f 6265/* Callback for walk_stmts. Check if the current statement only contains
726a989a 6266 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
69f1837b
JJ
6267
6268static tree
726a989a
RB
6269check_combined_parallel (gimple_stmt_iterator *gsi_p,
6270 bool *handled_ops_p,
6271 struct walk_stmt_info *wi)
69f1837b 6272{
d3bfe4de 6273 int *info = (int *) wi->info;
726a989a 6274 gimple stmt = gsi_stmt (*gsi_p);
69f1837b 6275
726a989a
RB
6276 *handled_ops_p = true;
6277 switch (gimple_code (stmt))
69f1837b 6278 {
726a989a
RB
6279 WALK_SUBSTMTS;
6280
6281 case GIMPLE_OMP_FOR:
6282 case GIMPLE_OMP_SECTIONS:
69f1837b
JJ
6283 *info = *info == 0 ? 1 : -1;
6284 break;
6285 default:
6286 *info = -1;
6287 break;
6288 }
6289 return NULL;
6290}
50674e96 6291
a68ab351
JJ
6292struct omp_taskcopy_context
6293{
6294 /* This field must be at the beginning, as we do "inheritance": Some
6295 callback functions for tree-inline.c (e.g., omp_copy_decl)
6296 receive a copy_body_data pointer that is up-casted to an
6297 omp_context pointer. */
6298 copy_body_data cb;
6299 omp_context *ctx;
6300};
6301
6302static tree
6303task_copyfn_copy_decl (tree var, copy_body_data *cb)
6304{
6305 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6306
6307 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6308 return create_tmp_var (TREE_TYPE (var), NULL);
6309
6310 return var;
6311}
6312
6313static tree
6314task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6315{
6316 tree name, new_fields = NULL, type, f;
6317
6318 type = lang_hooks.types.make_type (RECORD_TYPE);
6319 name = DECL_NAME (TYPE_NAME (orig_type));
c2255bc4
AH
6320 name = build_decl (gimple_location (tcctx->ctx->stmt),
6321 TYPE_DECL, name, type);
a68ab351
JJ
6322 TYPE_NAME (type) = name;
6323
6324 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6325 {
6326 tree new_f = copy_node (f);
6327 DECL_CONTEXT (new_f) = type;
6328 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6329 TREE_CHAIN (new_f) = new_fields;
726a989a
RB
6330 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6331 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6332 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6333 &tcctx->cb, NULL);
a68ab351
JJ
6334 new_fields = new_f;
6335 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6336 }
6337 TYPE_FIELDS (type) = nreverse (new_fields);
6338 layout_type (type);
6339 return type;
6340}
6341
6342/* Create task copyfn. */
6343
6344static void
726a989a 6345create_task_copyfn (gimple task_stmt, omp_context *ctx)
a68ab351
JJ
6346{
6347 struct function *child_cfun;
6348 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6349 tree record_type, srecord_type, bind, list;
6350 bool record_needs_remap = false, srecord_needs_remap = false;
6351 splay_tree_node n;
6352 struct omp_taskcopy_context tcctx;
d406b663 6353 struct gimplify_ctx gctx;
db3927fb 6354 location_t loc = gimple_location (task_stmt);
a68ab351 6355
726a989a 6356 child_fn = gimple_omp_task_copy_fn (task_stmt);
a68ab351
JJ
6357 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6358 gcc_assert (child_cfun->cfg == NULL);
a68ab351
JJ
6359 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6360
6361 /* Reset DECL_CONTEXT on function arguments. */
910ad8de 6362 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
a68ab351
JJ
6363 DECL_CONTEXT (t) = child_fn;
6364
6365 /* Populate the function. */
d406b663 6366 push_gimplify_context (&gctx);
a68ab351
JJ
6367 current_function_decl = child_fn;
6368
6369 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6370 TREE_SIDE_EFFECTS (bind) = 1;
6371 list = NULL;
6372 DECL_SAVED_TREE (child_fn) = bind;
726a989a 6373 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
a68ab351
JJ
6374
6375 /* Remap src and dst argument types if needed. */
6376 record_type = ctx->record_type;
6377 srecord_type = ctx->srecord_type;
910ad8de 6378 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
a68ab351
JJ
6379 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6380 {
6381 record_needs_remap = true;
6382 break;
6383 }
910ad8de 6384 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
a68ab351
JJ
6385 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6386 {
6387 srecord_needs_remap = true;
6388 break;
6389 }
6390
6391 if (record_needs_remap || srecord_needs_remap)
6392 {
6393 memset (&tcctx, '\0', sizeof (tcctx));
6394 tcctx.cb.src_fn = ctx->cb.src_fn;
6395 tcctx.cb.dst_fn = child_fn;
fe660d7b
MJ
6396 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6397 gcc_checking_assert (tcctx.cb.src_node);
a68ab351
JJ
6398 tcctx.cb.dst_node = tcctx.cb.src_node;
6399 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6400 tcctx.cb.copy_decl = task_copyfn_copy_decl;
1d65f45c 6401 tcctx.cb.eh_lp_nr = 0;
a68ab351
JJ
6402 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6403 tcctx.cb.decl_map = pointer_map_create ();
6404 tcctx.ctx = ctx;
6405
6406 if (record_needs_remap)
6407 record_type = task_copyfn_remap_type (&tcctx, record_type);
6408 if (srecord_needs_remap)
6409 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6410 }
6411 else
6412 tcctx.cb.decl_map = NULL;
6413
6414 push_cfun (child_cfun);
6415
6416 arg = DECL_ARGUMENTS (child_fn);
6417 TREE_TYPE (arg) = build_pointer_type (record_type);
910ad8de 6418 sarg = DECL_CHAIN (arg);
a68ab351
JJ
6419 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6420
6421 /* First pass: initialize temporaries used in record_type and srecord_type
6422 sizes and field offsets. */
6423 if (tcctx.cb.decl_map)
726a989a 6424 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
a68ab351
JJ
6425 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6426 {
6427 tree *p;
6428
6429 decl = OMP_CLAUSE_DECL (c);
6430 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6431 if (p == NULL)
6432 continue;
6433 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6434 sf = (tree) n->value;
6435 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
70f34814 6436 src = build_simple_mem_ref_loc (loc, sarg);
a68ab351 6437 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
726a989a 6438 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
a68ab351
JJ
6439 append_to_statement_list (t, &list);
6440 }
6441
6442 /* Second pass: copy shared var pointers and copy construct non-VLA
6443 firstprivate vars. */
726a989a 6444 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
a68ab351
JJ
6445 switch (OMP_CLAUSE_CODE (c))
6446 {
6447 case OMP_CLAUSE_SHARED:
6448 decl = OMP_CLAUSE_DECL (c);
6449 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6450 if (n == NULL)
6451 break;
6452 f = (tree) n->value;
6453 if (tcctx.cb.decl_map)
6454 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6455 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6456 sf = (tree) n->value;
6457 if (tcctx.cb.decl_map)
6458 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
70f34814 6459 src = build_simple_mem_ref_loc (loc, sarg);
a68ab351 6460 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
70f34814 6461 dst = build_simple_mem_ref_loc (loc, arg);
a68ab351 6462 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
726a989a 6463 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
a68ab351
JJ
6464 append_to_statement_list (t, &list);
6465 break;
6466 case OMP_CLAUSE_FIRSTPRIVATE:
6467 decl = OMP_CLAUSE_DECL (c);
6468 if (is_variable_sized (decl))
6469 break;
6470 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6471 if (n == NULL)
6472 break;
6473 f = (tree) n->value;
6474 if (tcctx.cb.decl_map)
6475 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6476 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6477 if (n != NULL)
6478 {
6479 sf = (tree) n->value;
6480 if (tcctx.cb.decl_map)
6481 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
70f34814 6482 src = build_simple_mem_ref_loc (loc, sarg);
a68ab351
JJ
6483 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6484 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
70f34814 6485 src = build_simple_mem_ref_loc (loc, src);
a68ab351
JJ
6486 }
6487 else
6488 src = decl;
70f34814 6489 dst = build_simple_mem_ref_loc (loc, arg);
a68ab351
JJ
6490 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6491 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6492 append_to_statement_list (t, &list);
6493 break;
6494 case OMP_CLAUSE_PRIVATE:
6495 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6496 break;
6497 decl = OMP_CLAUSE_DECL (c);
6498 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6499 f = (tree) n->value;
6500 if (tcctx.cb.decl_map)
6501 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6502 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6503 if (n != NULL)
6504 {
6505 sf = (tree) n->value;
6506 if (tcctx.cb.decl_map)
6507 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
70f34814 6508 src = build_simple_mem_ref_loc (loc, sarg);
a68ab351
JJ
6509 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6510 if (use_pointer_for_field (decl, NULL))
70f34814 6511 src = build_simple_mem_ref_loc (loc, src);
a68ab351
JJ
6512 }
6513 else
6514 src = decl;
70f34814 6515 dst = build_simple_mem_ref_loc (loc, arg);
a68ab351 6516 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
726a989a 6517 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
a68ab351
JJ
6518 append_to_statement_list (t, &list);
6519 break;
6520 default:
6521 break;
6522 }
6523
6524 /* Last pass: handle VLA firstprivates. */
6525 if (tcctx.cb.decl_map)
726a989a 6526 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
a68ab351
JJ
6527 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6528 {
6529 tree ind, ptr, df;
6530
6531 decl = OMP_CLAUSE_DECL (c);
6532 if (!is_variable_sized (decl))
6533 continue;
6534 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6535 if (n == NULL)
6536 continue;
6537 f = (tree) n->value;
6538 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6539 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6540 ind = DECL_VALUE_EXPR (decl);
6541 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6542 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6543 n = splay_tree_lookup (ctx->sfield_map,
6544 (splay_tree_key) TREE_OPERAND (ind, 0));
6545 sf = (tree) n->value;
6546 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
70f34814 6547 src = build_simple_mem_ref_loc (loc, sarg);
a68ab351 6548 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
70f34814
RG
6549 src = build_simple_mem_ref_loc (loc, src);
6550 dst = build_simple_mem_ref_loc (loc, arg);
a68ab351
JJ
6551 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6552 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6553 append_to_statement_list (t, &list);
6554 n = splay_tree_lookup (ctx->field_map,
6555 (splay_tree_key) TREE_OPERAND (ind, 0));
6556 df = (tree) n->value;
6557 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
70f34814 6558 ptr = build_simple_mem_ref_loc (loc, arg);
a68ab351 6559 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
726a989a 6560 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
db3927fb 6561 build_fold_addr_expr_loc (loc, dst));
a68ab351
JJ
6562 append_to_statement_list (t, &list);
6563 }
6564
6565 t = build1 (RETURN_EXPR, void_type_node, NULL);
6566 append_to_statement_list (t, &list);
6567
6568 if (tcctx.cb.decl_map)
6569 pointer_map_destroy (tcctx.cb.decl_map);
6570 pop_gimplify_context (NULL);
6571 BIND_EXPR_BODY (bind) = list;
6572 pop_cfun ();
6573 current_function_decl = ctx->cb.src_fn;
6574}
6575
726a989a
RB
6576/* Lower the OpenMP parallel or task directive in the current statement
6577 in GSI_P. CTX holds context information for the directive. */
50674e96
DN
6578
6579static void
726a989a 6580lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
50674e96 6581{
726a989a
RB
6582 tree clauses;
6583 tree child_fn, t;
6584 gimple stmt = gsi_stmt (*gsi_p);
6585 gimple par_bind, bind;
6586 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
d406b663 6587 struct gimplify_ctx gctx;
db3927fb 6588 location_t loc = gimple_location (stmt);
50674e96 6589
726a989a
RB
6590 clauses = gimple_omp_taskreg_clauses (stmt);
6591 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6592 par_body = gimple_bind_body (par_bind);
50674e96 6593 child_fn = ctx->cb.dst_fn;
726a989a
RB
6594 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6595 && !gimple_omp_parallel_combined_p (stmt))
69f1837b
JJ
6596 {
6597 struct walk_stmt_info wi;
6598 int ws_num = 0;
6599
6600 memset (&wi, 0, sizeof (wi));
69f1837b
JJ
6601 wi.info = &ws_num;
6602 wi.val_only = true;
726a989a 6603 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
69f1837b 6604 if (ws_num == 1)
726a989a 6605 gimple_omp_parallel_set_combined_p (stmt, true);
69f1837b 6606 }
a68ab351
JJ
6607 if (ctx->srecord_type)
6608 create_task_copyfn (stmt, ctx);
50674e96 6609
d406b663 6610 push_gimplify_context (&gctx);
50674e96 6611
726a989a
RB
6612 par_olist = NULL;
6613 par_ilist = NULL;
50674e96 6614 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
726a989a
RB
6615 lower_omp (par_body, ctx);
6616 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
a68ab351 6617 lower_reduction_clauses (clauses, &par_olist, ctx);
50674e96
DN
6618
6619 /* Declare all the variables created by mapping and the variables
6620 declared in the scope of the parallel body. */
6621 record_vars_into (ctx->block_vars, child_fn);
726a989a 6622 record_vars_into (gimple_bind_vars (par_bind), child_fn);
50674e96
DN
6623
6624 if (ctx->record_type)
6625 {
a68ab351
JJ
6626 ctx->sender_decl
6627 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6628 : ctx->record_type, ".omp_data_o");
cd3f04c8 6629 DECL_NAMELESS (ctx->sender_decl) = 1;
628c189e 6630 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
726a989a 6631 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
50674e96
DN
6632 }
6633
726a989a
RB
6634 olist = NULL;
6635 ilist = NULL;
50674e96
DN
6636 lower_send_clauses (clauses, &ilist, &olist, ctx);
6637 lower_send_shared_vars (&ilist, &olist, ctx);
6638
6639 /* Once all the expansions are done, sequence all the different
726a989a 6640 fragments inside gimple_omp_body. */
50674e96 6641
726a989a 6642 new_body = NULL;
50674e96
DN
6643
6644 if (ctx->record_type)
6645 {
db3927fb 6646 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
018b899b 6647 /* fixup_child_record_type might have changed receiver_decl's type. */
db3927fb 6648 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
726a989a
RB
6649 gimple_seq_add_stmt (&new_body,
6650 gimple_build_assign (ctx->receiver_decl, t));
50674e96
DN
6651 }
6652
726a989a
RB
6653 gimple_seq_add_seq (&new_body, par_ilist);
6654 gimple_seq_add_seq (&new_body, par_body);
6655 gimple_seq_add_seq (&new_body, par_olist);
6656 new_body = maybe_catch_exception (new_body);
6657 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6658 gimple_omp_set_body (stmt, new_body);
50674e96 6659
726a989a
RB
6660 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6661 gimple_bind_add_stmt (bind, stmt);
b357f682
JJ
6662 if (ilist || olist)
6663 {
726a989a
RB
6664 gimple_seq_add_stmt (&ilist, bind);
6665 gimple_seq_add_seq (&ilist, olist);
6666 bind = gimple_build_bind (NULL, ilist, NULL);
b357f682 6667 }
50674e96 6668
726a989a 6669 gsi_replace (gsi_p, bind, true);
50674e96 6670
726a989a 6671 pop_gimplify_context (NULL);
50674e96
DN
6672}
6673
d0fb20be 6674/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
726a989a
RB
6675 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6676 of OpenMP context, but with task_shared_vars set. */
75a4c3c1
AP
6677
6678static tree
726a989a
RB
6679lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6680 void *data)
75a4c3c1 6681{
d0fb20be 6682 tree t = *tp;
75a4c3c1 6683
d0fb20be 6684 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
726a989a 6685 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
b826efd9
JJ
6686 return t;
6687
6688 if (task_shared_vars
6689 && DECL_P (t)
6690 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
d0fb20be 6691 return t;
75a4c3c1 6692
d0fb20be
JJ
6693 /* If a global variable has been privatized, TREE_CONSTANT on
6694 ADDR_EXPR might be wrong. */
726a989a 6695 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
d0fb20be 6696 recompute_tree_invariant_for_addr_expr (t);
75a4c3c1 6697
d0fb20be
JJ
6698 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6699 return NULL_TREE;
75a4c3c1 6700}
50674e96 6701
d0fb20be 6702static void
726a989a 6703lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
953ff289 6704{
726a989a
RB
6705 gimple stmt = gsi_stmt (*gsi_p);
6706 struct walk_stmt_info wi;
953ff289 6707
726a989a
RB
6708 if (gimple_has_location (stmt))
6709 input_location = gimple_location (stmt);
d0fb20be 6710
726a989a
RB
6711 if (task_shared_vars)
6712 memset (&wi, '\0', sizeof (wi));
d0fb20be 6713
50674e96
DN
6714 /* If we have issued syntax errors, avoid doing any heavy lifting.
6715 Just replace the OpenMP directives with a NOP to avoid
6716 confusing RTL expansion. */
1da2ed5f 6717 if (seen_error () && is_gimple_omp (stmt))
50674e96 6718 {
726a989a 6719 gsi_replace (gsi_p, gimple_build_nop (), true);
d0fb20be 6720 return;
50674e96
DN
6721 }
6722
726a989a 6723 switch (gimple_code (stmt))
953ff289 6724 {
726a989a 6725 case GIMPLE_COND:
a68ab351 6726 if ((ctx || task_shared_vars)
726a989a
RB
6727 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6728 ctx ? NULL : &wi, NULL)
6729 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6730 ctx ? NULL : &wi, NULL)))
6731 gimple_regimplify_operands (stmt, gsi_p);
d0fb20be 6732 break;
726a989a
RB
6733 case GIMPLE_CATCH:
6734 lower_omp (gimple_catch_handler (stmt), ctx);
d0fb20be 6735 break;
726a989a
RB
6736 case GIMPLE_EH_FILTER:
6737 lower_omp (gimple_eh_filter_failure (stmt), ctx);
d0fb20be 6738 break;
726a989a
RB
6739 case GIMPLE_TRY:
6740 lower_omp (gimple_try_eval (stmt), ctx);
6741 lower_omp (gimple_try_cleanup (stmt), ctx);
d0fb20be 6742 break;
726a989a
RB
6743 case GIMPLE_BIND:
6744 lower_omp (gimple_bind_body (stmt), ctx);
d0fb20be 6745 break;
726a989a
RB
6746 case GIMPLE_OMP_PARALLEL:
6747 case GIMPLE_OMP_TASK:
6748 ctx = maybe_lookup_ctx (stmt);
6749 lower_omp_taskreg (gsi_p, ctx);
d0fb20be 6750 break;
726a989a
RB
6751 case GIMPLE_OMP_FOR:
6752 ctx = maybe_lookup_ctx (stmt);
953ff289 6753 gcc_assert (ctx);
726a989a 6754 lower_omp_for (gsi_p, ctx);
953ff289 6755 break;
726a989a
RB
6756 case GIMPLE_OMP_SECTIONS:
6757 ctx = maybe_lookup_ctx (stmt);
953ff289 6758 gcc_assert (ctx);
726a989a 6759 lower_omp_sections (gsi_p, ctx);
953ff289 6760 break;
726a989a
RB
6761 case GIMPLE_OMP_SINGLE:
6762 ctx = maybe_lookup_ctx (stmt);
953ff289 6763 gcc_assert (ctx);
726a989a 6764 lower_omp_single (gsi_p, ctx);
953ff289 6765 break;
726a989a
RB
6766 case GIMPLE_OMP_MASTER:
6767 ctx = maybe_lookup_ctx (stmt);
953ff289 6768 gcc_assert (ctx);
726a989a 6769 lower_omp_master (gsi_p, ctx);
953ff289 6770 break;
726a989a
RB
6771 case GIMPLE_OMP_ORDERED:
6772 ctx = maybe_lookup_ctx (stmt);
953ff289 6773 gcc_assert (ctx);
726a989a 6774 lower_omp_ordered (gsi_p, ctx);
953ff289 6775 break;
726a989a
RB
6776 case GIMPLE_OMP_CRITICAL:
6777 ctx = maybe_lookup_ctx (stmt);
953ff289 6778 gcc_assert (ctx);
726a989a
RB
6779 lower_omp_critical (gsi_p, ctx);
6780 break;
6781 case GIMPLE_OMP_ATOMIC_LOAD:
6782 if ((ctx || task_shared_vars)
6783 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6784 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6785 gimple_regimplify_operands (stmt, gsi_p);
953ff289 6786 break;
d0fb20be 6787 default:
a68ab351 6788 if ((ctx || task_shared_vars)
726a989a
RB
6789 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6790 ctx ? NULL : &wi))
6791 gimple_regimplify_operands (stmt, gsi_p);
953ff289 6792 break;
953ff289 6793 }
953ff289
DN
6794}
6795
6796static void
726a989a 6797lower_omp (gimple_seq body, omp_context *ctx)
953ff289 6798{
b357f682 6799 location_t saved_location = input_location;
726a989a
RB
6800 gimple_stmt_iterator gsi = gsi_start (body);
6801 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6802 lower_omp_1 (&gsi, ctx);
b357f682 6803 input_location = saved_location;
953ff289
DN
6804}
6805\f
6806/* Main entry point. */
6807
c2924966 6808static unsigned int
953ff289
DN
6809execute_lower_omp (void)
6810{
726a989a
RB
6811 gimple_seq body;
6812
535b544a
SB
6813 /* This pass always runs, to provide PROP_gimple_lomp.
6814 But there is nothing to do unless -fopenmp is given. */
6815 if (flag_openmp == 0)
6816 return 0;
6817
953ff289
DN
6818 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6819 delete_omp_context);
6820
726a989a
RB
6821 body = gimple_body (current_function_decl);
6822 scan_omp (body, NULL);
a68ab351 6823 gcc_assert (taskreg_nesting_level == 0);
953ff289
DN
6824
6825 if (all_contexts->root)
a68ab351 6826 {
d406b663
JJ
6827 struct gimplify_ctx gctx;
6828
a68ab351 6829 if (task_shared_vars)
d406b663 6830 push_gimplify_context (&gctx);
726a989a 6831 lower_omp (body, NULL);
a68ab351
JJ
6832 if (task_shared_vars)
6833 pop_gimplify_context (NULL);
6834 }
953ff289 6835
50674e96
DN
6836 if (all_contexts)
6837 {
6838 splay_tree_delete (all_contexts);
6839 all_contexts = NULL;
6840 }
a68ab351 6841 BITMAP_FREE (task_shared_vars);
c2924966 6842 return 0;
953ff289
DN
6843}
6844
b8698a0f 6845struct gimple_opt_pass pass_lower_omp =
953ff289 6846{
8ddbbcae
JH
6847 {
6848 GIMPLE_PASS,
953ff289 6849 "omplower", /* name */
535b544a 6850 NULL, /* gate */
953ff289
DN
6851 execute_lower_omp, /* execute */
6852 NULL, /* sub */
6853 NULL, /* next */
6854 0, /* static_pass_number */
7072a650 6855 TV_NONE, /* tv_id */
953ff289
DN
6856 PROP_gimple_any, /* properties_required */
6857 PROP_gimple_lomp, /* properties_provided */
6858 0, /* properties_destroyed */
6859 0, /* todo_flags_start */
22c5fa5f 6860 0 /* todo_flags_finish */
8ddbbcae 6861 }
953ff289 6862};
953ff289
DN
6863\f
6864/* The following is a utility to diagnose OpenMP structured block violations.
777f7f9a
RH
6865 It is not part of the "omplower" pass, as that's invoked too late. It
6866 should be invoked by the respective front ends after gimplification. */
953ff289
DN
6867
6868static splay_tree all_labels;
6869
6870/* Check for mismatched contexts and generate an error if needed. Return
6871 true if an error is detected. */
6872
6873static bool
726a989a
RB
6874diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6875 gimple branch_ctx, gimple label_ctx)
953ff289 6876{
726a989a 6877 if (label_ctx == branch_ctx)
953ff289
DN
6878 return false;
6879
b8698a0f 6880
726a989a
RB
6881 /*
6882 Previously we kept track of the label's entire context in diagnose_sb_[12]
6883 so we could traverse it and issue a correct "exit" or "enter" error
6884 message upon a structured block violation.
6885
6886 We built the context by building a list with tree_cons'ing, but there is
6887 no easy counterpart in gimple tuples. It seems like far too much work
6888 for issuing exit/enter error messages. If someone really misses the
6889 distinct error message... patches welcome.
6890 */
b8698a0f 6891
726a989a 6892#if 0
953ff289 6893 /* Try to avoid confusing the user by producing and error message
fa10beec 6894 with correct "exit" or "enter" verbiage. We prefer "exit"
953ff289
DN
6895 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6896 if (branch_ctx == NULL)
6897 exit_p = false;
6898 else
6899 {
6900 while (label_ctx)
6901 {
6902 if (TREE_VALUE (label_ctx) == branch_ctx)
6903 {
6904 exit_p = false;
6905 break;
6906 }
6907 label_ctx = TREE_CHAIN (label_ctx);
6908 }
6909 }
6910
6911 if (exit_p)
6912 error ("invalid exit from OpenMP structured block");
6913 else
6914 error ("invalid entry to OpenMP structured block");
726a989a 6915#endif
953ff289 6916
726a989a
RB
6917 /* If it's obvious we have an invalid entry, be specific about the error. */
6918 if (branch_ctx == NULL)
6919 error ("invalid entry to OpenMP structured block");
6920 else
6921 /* Otherwise, be vague and lazy, but efficient. */
6922 error ("invalid branch to/from an OpenMP structured block");
6923
6924 gsi_replace (gsi_p, gimple_build_nop (), false);
953ff289
DN
6925 return true;
6926}
6927
6928/* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
726a989a 6929 where each label is found. */
953ff289
DN
6930
6931static tree
726a989a
RB
6932diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6933 struct walk_stmt_info *wi)
953ff289 6934{
726a989a
RB
6935 gimple context = (gimple) wi->info;
6936 gimple inner_context;
6937 gimple stmt = gsi_stmt (*gsi_p);
953ff289 6938
726a989a
RB
6939 *handled_ops_p = true;
6940
6941 switch (gimple_code (stmt))
953ff289 6942 {
726a989a 6943 WALK_SUBSTMTS;
b8698a0f 6944
726a989a
RB
6945 case GIMPLE_OMP_PARALLEL:
6946 case GIMPLE_OMP_TASK:
6947 case GIMPLE_OMP_SECTIONS:
6948 case GIMPLE_OMP_SINGLE:
6949 case GIMPLE_OMP_SECTION:
6950 case GIMPLE_OMP_MASTER:
6951 case GIMPLE_OMP_ORDERED:
6952 case GIMPLE_OMP_CRITICAL:
6953 /* The minimal context here is just the current OMP construct. */
6954 inner_context = stmt;
953ff289 6955 wi->info = inner_context;
726a989a 6956 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
953ff289
DN
6957 wi->info = context;
6958 break;
6959
726a989a
RB
6960 case GIMPLE_OMP_FOR:
6961 inner_context = stmt;
953ff289 6962 wi->info = inner_context;
726a989a
RB
6963 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6964 walk them. */
6965 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6966 diagnose_sb_1, NULL, wi);
6967 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
953ff289
DN
6968 wi->info = context;
6969 break;
6970
726a989a
RB
6971 case GIMPLE_LABEL:
6972 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
953ff289
DN
6973 (splay_tree_value) context);
6974 break;
6975
6976 default:
6977 break;
6978 }
6979
6980 return NULL_TREE;
6981}
6982
6983/* Pass 2: Check each branch and see if its context differs from that of
6984 the destination label's context. */
6985
6986static tree
726a989a
RB
6987diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6988 struct walk_stmt_info *wi)
953ff289 6989{
726a989a 6990 gimple context = (gimple) wi->info;
953ff289 6991 splay_tree_node n;
726a989a 6992 gimple stmt = gsi_stmt (*gsi_p);
953ff289 6993
726a989a
RB
6994 *handled_ops_p = true;
6995
6996 switch (gimple_code (stmt))
953ff289 6997 {
726a989a
RB
6998 WALK_SUBSTMTS;
6999
7000 case GIMPLE_OMP_PARALLEL:
7001 case GIMPLE_OMP_TASK:
7002 case GIMPLE_OMP_SECTIONS:
7003 case GIMPLE_OMP_SINGLE:
7004 case GIMPLE_OMP_SECTION:
7005 case GIMPLE_OMP_MASTER:
7006 case GIMPLE_OMP_ORDERED:
7007 case GIMPLE_OMP_CRITICAL:
7008 wi->info = stmt;
7009 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
953ff289
DN
7010 wi->info = context;
7011 break;
7012
726a989a
RB
7013 case GIMPLE_OMP_FOR:
7014 wi->info = stmt;
7015 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7016 walk them. */
7017 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7018 diagnose_sb_2, NULL, wi);
7019 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
953ff289
DN
7020 wi->info = context;
7021 break;
7022
ca50f84a
L
7023 case GIMPLE_COND:
7024 {
7025 tree lab = gimple_cond_true_label (stmt);
7026 if (lab)
7027 {
7028 n = splay_tree_lookup (all_labels,
7029 (splay_tree_key) lab);
7030 diagnose_sb_0 (gsi_p, context,
7031 n ? (gimple) n->value : NULL);
7032 }
7033 lab = gimple_cond_false_label (stmt);
7034 if (lab)
7035 {
7036 n = splay_tree_lookup (all_labels,
7037 (splay_tree_key) lab);
7038 diagnose_sb_0 (gsi_p, context,
7039 n ? (gimple) n->value : NULL);
7040 }
7041 }
7042 break;
7043
726a989a 7044 case GIMPLE_GOTO:
953ff289 7045 {
726a989a 7046 tree lab = gimple_goto_dest (stmt);
953ff289
DN
7047 if (TREE_CODE (lab) != LABEL_DECL)
7048 break;
7049
7050 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
726a989a 7051 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
953ff289
DN
7052 }
7053 break;
7054
726a989a 7055 case GIMPLE_SWITCH:
953ff289 7056 {
726a989a
RB
7057 unsigned int i;
7058 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
953ff289 7059 {
726a989a 7060 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
953ff289 7061 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
726a989a 7062 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
953ff289
DN
7063 break;
7064 }
7065 }
7066 break;
7067
726a989a
RB
7068 case GIMPLE_RETURN:
7069 diagnose_sb_0 (gsi_p, context, NULL);
953ff289
DN
7070 break;
7071
7072 default:
7073 break;
7074 }
7075
7076 return NULL_TREE;
7077}
7078
a406865a
RG
7079static unsigned int
7080diagnose_omp_structured_block_errors (void)
953ff289 7081{
953ff289 7082 struct walk_stmt_info wi;
a406865a 7083 gimple_seq body = gimple_body (current_function_decl);
953ff289
DN
7084
7085 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
7086
7087 memset (&wi, 0, sizeof (wi));
726a989a 7088 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
953ff289
DN
7089
7090 memset (&wi, 0, sizeof (wi));
953ff289 7091 wi.want_locations = true;
726a989a 7092 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
953ff289
DN
7093
7094 splay_tree_delete (all_labels);
7095 all_labels = NULL;
7096
a406865a 7097 return 0;
953ff289
DN
7098}
7099
a406865a
RG
7100static bool
7101gate_diagnose_omp_blocks (void)
7102{
7103 return flag_openmp != 0;
7104}
7105
7106struct gimple_opt_pass pass_diagnose_omp_blocks =
7107{
7108 {
7109 GIMPLE_PASS,
2329c6f5 7110 "*diagnose_omp_blocks", /* name */
a406865a
RG
7111 gate_diagnose_omp_blocks, /* gate */
7112 diagnose_omp_structured_block_errors, /* execute */
7113 NULL, /* sub */
7114 NULL, /* next */
7115 0, /* static_pass_number */
7116 TV_NONE, /* tv_id */
7117 PROP_gimple_any, /* properties_required */
7118 0, /* properties_provided */
7119 0, /* properties_destroyed */
7120 0, /* todo_flags_start */
7121 0, /* todo_flags_finish */
7122 }
7123};
7124
953ff289 7125#include "gt-omp-low.h"