]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/omp-low.c
pr40835.c: Require a thumb1 target, do not force -march=armv5e.
[thirdparty/gcc.git] / gcc / omp-low.c
CommitLineData
953ff289
DN
1/* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
03742a9b 6 Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
953ff289
DN
7
8This file is part of GCC.
9
10GCC is free software; you can redistribute it and/or modify it under
11the terms of the GNU General Public License as published by the Free
9dcd6f09 12Software Foundation; either version 3, or (at your option) any later
953ff289
DN
13version.
14
15GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16WARRANTY; without even the implied warranty of MERCHANTABILITY or
17FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18for more details.
19
20You should have received a copy of the GNU General Public License
9dcd6f09
NC
21along with GCC; see the file COPYING3. If not see
22<http://www.gnu.org/licenses/>. */
953ff289
DN
23
24#include "config.h"
25#include "system.h"
26#include "coretypes.h"
27#include "tm.h"
28#include "tree.h"
29#include "rtl.h"
726a989a
RB
30#include "gimple.h"
31#include "tree-iterator.h"
953ff289
DN
32#include "tree-inline.h"
33#include "langhooks.h"
34#include "diagnostic.h"
35#include "tree-flow.h"
36#include "timevar.h"
37#include "flags.h"
38#include "function.h"
39#include "expr.h"
40#include "toplev.h"
41#include "tree-pass.h"
42#include "ggc.h"
43#include "except.h"
6be42dd4 44#include "splay-tree.h"
a509ebb5
RL
45#include "optabs.h"
46#include "cfgloop.h"
953ff289 47
726a989a 48
953ff289
DN
49/* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
c0220ea4 53 re-gimplifying things when variables have been replaced with complex
953ff289
DN
54 expressions.
55
7ebaeab5
DN
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
953ff289
DN
59
60/* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63typedef struct omp_context
64{
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
726a989a 73 gimple stmt;
953ff289
DN
74
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
a68ab351
JJ
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
953ff289
DN
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
953ff289
DN
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
953ff289
DN
105} omp_context;
106
107
a68ab351
JJ
108struct omp_for_data_loop
109{
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112};
113
50674e96 114/* A structure describing the main elements of a parallel loop. */
953ff289 115
50674e96 116struct omp_for_data
953ff289 117{
a68ab351 118 struct omp_for_data_loop loop;
726a989a
RB
119 tree chunk_size;
120 gimple for_stmt;
a68ab351
JJ
121 tree pre, iter_type;
122 int collapse;
953ff289
DN
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
a68ab351 125 struct omp_for_data_loop *loops;
953ff289
DN
126};
127
50674e96 128
953ff289 129static splay_tree all_contexts;
a68ab351 130static int taskreg_nesting_level;
777f7f9a 131struct omp_region *root_omp_region;
a68ab351 132static bitmap task_shared_vars;
953ff289 133
726a989a
RB
134static void scan_omp (gimple_seq, omp_context *);
135static tree scan_omp_1_op (tree *, int *, void *);
136
137#define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
145
146/* Convenience function for calling scan_omp_1_op on tree operands. */
147
148static inline tree
149scan_omp_op (tree *tp, omp_context *ctx)
150{
151 struct walk_stmt_info wi;
152
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
156
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158}
159
160static void lower_omp (gimple_seq, omp_context *);
8ca5b2a2
JJ
161static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
953ff289
DN
163
164/* Find an OpenMP clause of type KIND within CLAUSES. */
165
917948d3 166tree
e0c68ce9 167find_omp_clause (tree clauses, enum omp_clause_code kind)
953ff289
DN
168{
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
aaf46ef9 170 if (OMP_CLAUSE_CODE (clauses) == kind)
953ff289
DN
171 return clauses;
172
173 return NULL_TREE;
174}
175
176/* Return true if CTX is for an omp parallel. */
177
178static inline bool
179is_parallel_ctx (omp_context *ctx)
180{
726a989a 181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
953ff289
DN
182}
183
50674e96 184
a68ab351
JJ
185/* Return true if CTX is for an omp task. */
186
187static inline bool
188is_task_ctx (omp_context *ctx)
189{
726a989a 190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
a68ab351
JJ
191}
192
193
194/* Return true if CTX is for an omp parallel or omp task. */
195
196static inline bool
197is_taskreg_ctx (omp_context *ctx)
198{
726a989a
RB
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
a68ab351
JJ
201}
202
203
50674e96 204/* Return true if REGION is a combined parallel+workshare region. */
953ff289
DN
205
206static inline bool
50674e96
DN
207is_combined_parallel (struct omp_region *region)
208{
209 return region->is_combined_parallel;
210}
211
212
213/* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
215
216static void
726a989a 217extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
a68ab351 218 struct omp_for_data_loop *loops)
50674e96 219{
a68ab351
JJ
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
db3927fb 225 location_t loc = gimple_location (for_stmt);
50674e96
DN
226
227 fd->for_stmt = for_stmt;
228 fd->pre = NULL;
726a989a 229 fd->collapse = gimple_omp_for_collapse (for_stmt);
a68ab351
JJ
230 if (fd->collapse > 1)
231 fd->loops = loops;
232 else
233 fd->loops = &fd->loop;
50674e96
DN
234
235 fd->have_nowait = fd->have_ordered = false;
236 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
237 fd->chunk_size = NULL_TREE;
a68ab351
JJ
238 collapse_iter = NULL;
239 collapse_count = NULL;
50674e96 240
726a989a 241 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
aaf46ef9 242 switch (OMP_CLAUSE_CODE (t))
50674e96
DN
243 {
244 case OMP_CLAUSE_NOWAIT:
245 fd->have_nowait = true;
246 break;
247 case OMP_CLAUSE_ORDERED:
248 fd->have_ordered = true;
249 break;
250 case OMP_CLAUSE_SCHEDULE:
251 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
252 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
253 break;
a68ab351
JJ
254 case OMP_CLAUSE_COLLAPSE:
255 if (fd->collapse > 1)
256 {
257 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
258 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
259 }
50674e96
DN
260 default:
261 break;
262 }
263
a68ab351
JJ
264 /* FIXME: for now map schedule(auto) to schedule(static).
265 There should be analysis to determine whether all iterations
266 are approximately the same amount of work (then schedule(static)
1cbc62c0 267 is best) or if it varies (then schedule(dynamic,N) is better). */
a68ab351
JJ
268 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
269 {
270 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
271 gcc_assert (fd->chunk_size == NULL);
272 }
273 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
50674e96
DN
274 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
275 gcc_assert (fd->chunk_size == NULL);
276 else if (fd->chunk_size == NULL)
277 {
278 /* We only need to compute a default chunk size for ordered
279 static loops and dynamic loops. */
a68ab351
JJ
280 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
281 || fd->have_ordered
282 || fd->collapse > 1)
50674e96
DN
283 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
284 ? integer_zero_node : integer_one_node;
285 }
a68ab351
JJ
286
287 for (i = 0; i < fd->collapse; i++)
288 {
289 if (fd->collapse == 1)
290 loop = &fd->loop;
291 else if (loops != NULL)
292 loop = loops + i;
293 else
294 loop = &dummy_loop;
295
726a989a
RB
296
297 loop->v = gimple_omp_for_index (for_stmt, i);
a68ab351
JJ
298 gcc_assert (SSA_VAR_P (loop->v));
299 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
300 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
301 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
726a989a 302 loop->n1 = gimple_omp_for_initial (for_stmt, i);
a68ab351 303
726a989a
RB
304 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
305 loop->n2 = gimple_omp_for_final (for_stmt, i);
a68ab351
JJ
306 switch (loop->cond_code)
307 {
308 case LT_EXPR:
309 case GT_EXPR:
310 break;
311 case LE_EXPR:
312 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
db3927fb
AH
313 loop->n2 = fold_build2_loc (loc,
314 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
a68ab351
JJ
315 loop->n2, size_one_node);
316 else
db3927fb
AH
317 loop->n2 = fold_build2_loc (loc,
318 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
a68ab351
JJ
319 build_int_cst (TREE_TYPE (loop->n2), 1));
320 loop->cond_code = LT_EXPR;
321 break;
322 case GE_EXPR:
323 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
db3927fb
AH
324 loop->n2 = fold_build2_loc (loc,
325 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
a68ab351
JJ
326 loop->n2, size_int (-1));
327 else
db3927fb
AH
328 loop->n2 = fold_build2_loc (loc,
329 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
a68ab351
JJ
330 build_int_cst (TREE_TYPE (loop->n2), 1));
331 loop->cond_code = GT_EXPR;
332 break;
333 default:
334 gcc_unreachable ();
335 }
336
726a989a 337 t = gimple_omp_for_incr (for_stmt, i);
a68ab351
JJ
338 gcc_assert (TREE_OPERAND (t, 0) == var);
339 switch (TREE_CODE (t))
340 {
341 case PLUS_EXPR:
342 case POINTER_PLUS_EXPR:
343 loop->step = TREE_OPERAND (t, 1);
344 break;
345 case MINUS_EXPR:
346 loop->step = TREE_OPERAND (t, 1);
db3927fb
AH
347 loop->step = fold_build1_loc (loc,
348 NEGATE_EXPR, TREE_TYPE (loop->step),
a68ab351
JJ
349 loop->step);
350 break;
351 default:
352 gcc_unreachable ();
353 }
354
355 if (iter_type != long_long_unsigned_type_node)
356 {
357 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
358 iter_type = long_long_unsigned_type_node;
359 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
360 && TYPE_PRECISION (TREE_TYPE (loop->v))
361 >= TYPE_PRECISION (iter_type))
362 {
363 tree n;
364
365 if (loop->cond_code == LT_EXPR)
db3927fb
AH
366 n = fold_build2_loc (loc,
367 PLUS_EXPR, TREE_TYPE (loop->v),
a68ab351
JJ
368 loop->n2, loop->step);
369 else
370 n = loop->n1;
371 if (TREE_CODE (n) != INTEGER_CST
372 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
373 iter_type = long_long_unsigned_type_node;
374 }
375 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
376 > TYPE_PRECISION (iter_type))
377 {
378 tree n1, n2;
379
380 if (loop->cond_code == LT_EXPR)
381 {
382 n1 = loop->n1;
db3927fb
AH
383 n2 = fold_build2_loc (loc,
384 PLUS_EXPR, TREE_TYPE (loop->v),
a68ab351
JJ
385 loop->n2, loop->step);
386 }
387 else
388 {
db3927fb
AH
389 n1 = fold_build2_loc (loc,
390 MINUS_EXPR, TREE_TYPE (loop->v),
a68ab351
JJ
391 loop->n2, loop->step);
392 n2 = loop->n1;
393 }
394 if (TREE_CODE (n1) != INTEGER_CST
395 || TREE_CODE (n2) != INTEGER_CST
396 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
397 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
398 iter_type = long_long_unsigned_type_node;
399 }
400 }
401
402 if (collapse_count && *collapse_count == NULL)
403 {
404 if ((i == 0 || count != NULL_TREE)
405 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
406 && TREE_CONSTANT (loop->n1)
407 && TREE_CONSTANT (loop->n2)
408 && TREE_CODE (loop->step) == INTEGER_CST)
409 {
410 tree itype = TREE_TYPE (loop->v);
411
412 if (POINTER_TYPE_P (itype))
413 itype
414 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
415 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
db3927fb
AH
416 t = fold_build2_loc (loc,
417 PLUS_EXPR, itype,
418 fold_convert_loc (loc, itype, loop->step), t);
419 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
420 fold_convert_loc (loc, itype, loop->n2));
421 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
422 fold_convert_loc (loc, itype, loop->n1));
a68ab351 423 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
db3927fb
AH
424 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
425 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
426 fold_build1_loc (loc, NEGATE_EXPR, itype,
427 fold_convert_loc (loc, itype,
428 loop->step)));
a68ab351 429 else
db3927fb
AH
430 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
431 fold_convert_loc (loc, itype, loop->step));
432 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
a68ab351 433 if (count != NULL_TREE)
db3927fb
AH
434 count = fold_build2_loc (loc,
435 MULT_EXPR, long_long_unsigned_type_node,
a68ab351
JJ
436 count, t);
437 else
438 count = t;
439 if (TREE_CODE (count) != INTEGER_CST)
440 count = NULL_TREE;
441 }
442 else
443 count = NULL_TREE;
444 }
445 }
446
447 if (count)
448 {
449 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
450 iter_type = long_long_unsigned_type_node;
451 else
452 iter_type = long_integer_type_node;
453 }
454 else if (collapse_iter && *collapse_iter != NULL)
455 iter_type = TREE_TYPE (*collapse_iter);
456 fd->iter_type = iter_type;
457 if (collapse_iter && *collapse_iter == NULL)
458 *collapse_iter = create_tmp_var (iter_type, ".iter");
459 if (collapse_count && *collapse_count == NULL)
460 {
461 if (count)
db3927fb 462 *collapse_count = fold_convert_loc (loc, iter_type, count);
a68ab351
JJ
463 else
464 *collapse_count = create_tmp_var (iter_type, ".count");
465 }
466
467 if (fd->collapse > 1)
468 {
469 fd->loop.v = *collapse_iter;
470 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
471 fd->loop.n2 = *collapse_count;
472 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
473 fd->loop.cond_code = LT_EXPR;
474 }
50674e96
DN
475}
476
477
478/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
479 is the immediate dominator of PAR_ENTRY_BB, return true if there
480 are no data dependencies that would prevent expanding the parallel
481 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
482
483 When expanding a combined parallel+workshare region, the call to
484 the child function may need additional arguments in the case of
726a989a
RB
485 GIMPLE_OMP_FOR regions. In some cases, these arguments are
486 computed out of variables passed in from the parent to the child
487 via 'struct .omp_data_s'. For instance:
50674e96
DN
488
489 #pragma omp parallel for schedule (guided, i * 4)
490 for (j ...)
491
492 Is lowered into:
493
494 # BLOCK 2 (PAR_ENTRY_BB)
495 .omp_data_o.i = i;
496 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
497
498 # BLOCK 3 (WS_ENTRY_BB)
499 .omp_data_i = &.omp_data_o;
500 D.1667 = .omp_data_i->i;
501 D.1598 = D.1667 * 4;
502 #pragma omp for schedule (guided, D.1598)
503
504 When we outline the parallel region, the call to the child function
505 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
506 that value is computed *after* the call site. So, in principle we
507 cannot do the transformation.
508
509 To see whether the code in WS_ENTRY_BB blocks the combined
510 parallel+workshare call, we collect all the variables used in the
726a989a 511 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
50674e96
DN
512 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
513 call.
514
515 FIXME. If we had the SSA form built at this point, we could merely
516 hoist the code in block 3 into block 2 and be done with it. But at
517 this point we don't have dataflow information and though we could
518 hack something up here, it is really not worth the aggravation. */
519
520static bool
521workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
522{
523 struct omp_for_data fd;
726a989a 524 gimple par_stmt, ws_stmt;
50674e96
DN
525
526 par_stmt = last_stmt (par_entry_bb);
527 ws_stmt = last_stmt (ws_entry_bb);
528
726a989a 529 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
50674e96
DN
530 return true;
531
726a989a 532 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
50674e96 533
a68ab351
JJ
534 extract_omp_for_data (ws_stmt, &fd, NULL);
535
536 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
537 return false;
538 if (fd.iter_type != long_integer_type_node)
539 return false;
50674e96
DN
540
541 /* FIXME. We give up too easily here. If any of these arguments
542 are not constants, they will likely involve variables that have
543 been mapped into fields of .omp_data_s for sharing with the child
544 function. With appropriate data flow, it would be possible to
545 see through this. */
a68ab351
JJ
546 if (!is_gimple_min_invariant (fd.loop.n1)
547 || !is_gimple_min_invariant (fd.loop.n2)
548 || !is_gimple_min_invariant (fd.loop.step)
50674e96
DN
549 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
550 return false;
551
552 return true;
553}
554
555
556/* Collect additional arguments needed to emit a combined
557 parallel+workshare call. WS_STMT is the workshare directive being
558 expanded. */
559
560static tree
726a989a 561get_ws_args_for (gimple ws_stmt)
50674e96
DN
562{
563 tree t;
db3927fb 564 location_t loc = gimple_location (ws_stmt);
50674e96 565
726a989a 566 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
50674e96
DN
567 {
568 struct omp_for_data fd;
569 tree ws_args;
570
a68ab351 571 extract_omp_for_data (ws_stmt, &fd, NULL);
50674e96
DN
572
573 ws_args = NULL_TREE;
574 if (fd.chunk_size)
575 {
db3927fb 576 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
50674e96
DN
577 ws_args = tree_cons (NULL, t, ws_args);
578 }
579
db3927fb 580 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
50674e96
DN
581 ws_args = tree_cons (NULL, t, ws_args);
582
db3927fb 583 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
50674e96
DN
584 ws_args = tree_cons (NULL, t, ws_args);
585
db3927fb 586 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
50674e96
DN
587 ws_args = tree_cons (NULL, t, ws_args);
588
589 return ws_args;
590 }
726a989a 591 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
50674e96 592 {
e5c95afe 593 /* Number of sections is equal to the number of edges from the
726a989a
RB
594 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
595 the exit of the sections region. */
596 basic_block bb = single_succ (gimple_bb (ws_stmt));
e5c95afe 597 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
50674e96
DN
598 t = tree_cons (NULL, t, NULL);
599 return t;
600 }
601
602 gcc_unreachable ();
603}
604
605
606/* Discover whether REGION is a combined parallel+workshare region. */
607
608static void
609determine_parallel_type (struct omp_region *region)
953ff289 610{
50674e96
DN
611 basic_block par_entry_bb, par_exit_bb;
612 basic_block ws_entry_bb, ws_exit_bb;
613
d3c673c7 614 if (region == NULL || region->inner == NULL
e5c95afe
ZD
615 || region->exit == NULL || region->inner->exit == NULL
616 || region->inner->cont == NULL)
50674e96
DN
617 return;
618
619 /* We only support parallel+for and parallel+sections. */
726a989a
RB
620 if (region->type != GIMPLE_OMP_PARALLEL
621 || (region->inner->type != GIMPLE_OMP_FOR
622 && region->inner->type != GIMPLE_OMP_SECTIONS))
50674e96
DN
623 return;
624
625 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
626 WS_EXIT_BB -> PAR_EXIT_BB. */
777f7f9a
RH
627 par_entry_bb = region->entry;
628 par_exit_bb = region->exit;
629 ws_entry_bb = region->inner->entry;
630 ws_exit_bb = region->inner->exit;
50674e96
DN
631
632 if (single_succ (par_entry_bb) == ws_entry_bb
633 && single_succ (ws_exit_bb) == par_exit_bb
69f1837b 634 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
726a989a 635 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
69f1837b
JJ
636 || (last_and_only_stmt (ws_entry_bb)
637 && last_and_only_stmt (par_exit_bb))))
50674e96 638 {
726a989a 639 gimple ws_stmt = last_stmt (ws_entry_bb);
777f7f9a 640
726a989a 641 if (region->inner->type == GIMPLE_OMP_FOR)
50674e96
DN
642 {
643 /* If this is a combined parallel loop, we need to determine
644 whether or not to use the combined library calls. There
645 are two cases where we do not apply the transformation:
646 static loops and any kind of ordered loop. In the first
647 case, we already open code the loop so there is no need
648 to do anything else. In the latter case, the combined
649 parallel loop call would still need extra synchronization
650 to implement ordered semantics, so there would not be any
651 gain in using the combined call. */
726a989a 652 tree clauses = gimple_omp_for_clauses (ws_stmt);
50674e96
DN
653 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
654 if (c == NULL
655 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
656 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
657 {
658 region->is_combined_parallel = false;
659 region->inner->is_combined_parallel = false;
660 return;
661 }
662 }
663
664 region->is_combined_parallel = true;
665 region->inner->is_combined_parallel = true;
777f7f9a 666 region->ws_args = get_ws_args_for (ws_stmt);
50674e96 667 }
953ff289
DN
668}
669
50674e96 670
953ff289
DN
671/* Return true if EXPR is variable sized. */
672
673static inline bool
22ea9ec0 674is_variable_sized (const_tree expr)
953ff289
DN
675{
676 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
677}
678
679/* Return true if DECL is a reference type. */
680
681static inline bool
682is_reference (tree decl)
683{
684 return lang_hooks.decls.omp_privatize_by_reference (decl);
685}
686
687/* Lookup variables in the decl or field splay trees. The "maybe" form
688 allows for the variable form to not have been entered, otherwise we
689 assert that the variable must have been entered. */
690
691static inline tree
692lookup_decl (tree var, omp_context *ctx)
693{
6be42dd4
RG
694 tree *n;
695 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
696 return *n;
953ff289
DN
697}
698
699static inline tree
7c8f7639 700maybe_lookup_decl (const_tree var, omp_context *ctx)
953ff289 701{
6be42dd4
RG
702 tree *n;
703 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
704 return n ? *n : NULL_TREE;
953ff289
DN
705}
706
707static inline tree
708lookup_field (tree var, omp_context *ctx)
709{
710 splay_tree_node n;
711 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
712 return (tree) n->value;
713}
714
a68ab351
JJ
715static inline tree
716lookup_sfield (tree var, omp_context *ctx)
717{
718 splay_tree_node n;
719 n = splay_tree_lookup (ctx->sfield_map
720 ? ctx->sfield_map : ctx->field_map,
721 (splay_tree_key) var);
722 return (tree) n->value;
723}
724
953ff289
DN
725static inline tree
726maybe_lookup_field (tree var, omp_context *ctx)
727{
728 splay_tree_node n;
729 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
730 return n ? (tree) n->value : NULL_TREE;
731}
732
7c8f7639
JJ
733/* Return true if DECL should be copied by pointer. SHARED_CTX is
734 the parallel context if DECL is to be shared. */
953ff289
DN
735
736static bool
a68ab351 737use_pointer_for_field (tree decl, omp_context *shared_ctx)
953ff289
DN
738{
739 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
740 return true;
741
6fc0bb99 742 /* We can only use copy-in/copy-out semantics for shared variables
953ff289 743 when we know the value is not accessible from an outer scope. */
7c8f7639 744 if (shared_ctx)
953ff289
DN
745 {
746 /* ??? Trivially accessible from anywhere. But why would we even
747 be passing an address in this case? Should we simply assert
748 this to be false, or should we have a cleanup pass that removes
749 these from the list of mappings? */
750 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
751 return true;
752
753 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
754 without analyzing the expression whether or not its location
755 is accessible to anyone else. In the case of nested parallel
756 regions it certainly may be. */
077b0dfb 757 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
953ff289
DN
758 return true;
759
760 /* Do not use copy-in/copy-out for variables that have their
761 address taken. */
762 if (TREE_ADDRESSABLE (decl))
763 return true;
7c8f7639
JJ
764
765 /* Disallow copy-in/out in nested parallel if
766 decl is shared in outer parallel, otherwise
767 each thread could store the shared variable
768 in its own copy-in location, making the
769 variable no longer really shared. */
770 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
771 {
772 omp_context *up;
773
774 for (up = shared_ctx->outer; up; up = up->outer)
d9c194cb 775 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
7c8f7639
JJ
776 break;
777
d9c194cb 778 if (up)
7c8f7639
JJ
779 {
780 tree c;
781
726a989a 782 for (c = gimple_omp_taskreg_clauses (up->stmt);
7c8f7639
JJ
783 c; c = OMP_CLAUSE_CHAIN (c))
784 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
785 && OMP_CLAUSE_DECL (c) == decl)
786 break;
787
788 if (c)
789 return true;
790 }
791 }
a68ab351
JJ
792
793 /* For tasks avoid using copy-in/out, unless they are readonly
794 (in which case just copy-in is used). As tasks can be
795 deferred or executed in different thread, when GOMP_task
796 returns, the task hasn't necessarily terminated. */
797 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
798 {
799 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
800 if (is_gimple_reg (outer))
801 {
802 /* Taking address of OUTER in lower_send_shared_vars
803 might need regimplification of everything that uses the
804 variable. */
805 if (!task_shared_vars)
806 task_shared_vars = BITMAP_ALLOC (NULL);
807 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
808 TREE_ADDRESSABLE (outer) = 1;
809 }
810 return true;
811 }
953ff289
DN
812 }
813
814 return false;
815}
816
917948d3 817/* Create a new VAR_DECL and copy information from VAR to it. */
953ff289 818
917948d3
ZD
819tree
820copy_var_decl (tree var, tree name, tree type)
953ff289 821{
c2255bc4 822 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
953ff289
DN
823
824 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
917948d3 825 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
0890b981 826 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
953ff289
DN
827 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
828 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
917948d3 829 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
953ff289 830 TREE_USED (copy) = 1;
953ff289
DN
831 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
832
917948d3
ZD
833 return copy;
834}
835
836/* Construct a new automatic decl similar to VAR. */
837
838static tree
839omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
840{
841 tree copy = copy_var_decl (var, name, type);
842
843 DECL_CONTEXT (copy) = current_function_decl;
953ff289
DN
844 TREE_CHAIN (copy) = ctx->block_vars;
845 ctx->block_vars = copy;
846
847 return copy;
848}
849
850static tree
851omp_copy_decl_1 (tree var, omp_context *ctx)
852{
853 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
854}
855
856/* Build tree nodes to access the field for VAR on the receiver side. */
857
858static tree
859build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
860{
861 tree x, field = lookup_field (var, ctx);
862
863 /* If the receiver record type was remapped in the child function,
864 remap the field into the new record type. */
865 x = maybe_lookup_field (field, ctx);
866 if (x != NULL)
867 field = x;
868
869 x = build_fold_indirect_ref (ctx->receiver_decl);
870 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
871 if (by_ref)
872 x = build_fold_indirect_ref (x);
873
874 return x;
875}
876
877/* Build tree nodes to access VAR in the scope outer to CTX. In the case
878 of a parallel, this is a component reference; for workshare constructs
879 this is some variable. */
880
881static tree
882build_outer_var_ref (tree var, omp_context *ctx)
883{
884 tree x;
885
8ca5b2a2 886 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
953ff289
DN
887 x = var;
888 else if (is_variable_sized (var))
889 {
890 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
891 x = build_outer_var_ref (x, ctx);
892 x = build_fold_indirect_ref (x);
893 }
a68ab351 894 else if (is_taskreg_ctx (ctx))
953ff289 895 {
7c8f7639 896 bool by_ref = use_pointer_for_field (var, NULL);
953ff289
DN
897 x = build_receiver_ref (var, by_ref, ctx);
898 }
899 else if (ctx->outer)
900 x = lookup_decl (var, ctx->outer);
eeb1d9e0
JJ
901 else if (is_reference (var))
902 /* This can happen with orphaned constructs. If var is reference, it is
903 possible it is shared and as such valid. */
904 x = var;
953ff289
DN
905 else
906 gcc_unreachable ();
907
908 if (is_reference (var))
909 x = build_fold_indirect_ref (x);
910
911 return x;
912}
913
914/* Build tree nodes to access the field for VAR on the sender side. */
915
916static tree
917build_sender_ref (tree var, omp_context *ctx)
918{
a68ab351 919 tree field = lookup_sfield (var, ctx);
953ff289
DN
920 return build3 (COMPONENT_REF, TREE_TYPE (field),
921 ctx->sender_decl, field, NULL);
922}
923
924/* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
925
926static void
a68ab351 927install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
953ff289 928{
a68ab351 929 tree field, type, sfield = NULL_TREE;
953ff289 930
a68ab351
JJ
931 gcc_assert ((mask & 1) == 0
932 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
933 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
934 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
953ff289
DN
935
936 type = TREE_TYPE (var);
937 if (by_ref)
938 type = build_pointer_type (type);
a68ab351
JJ
939 else if ((mask & 3) == 1 && is_reference (var))
940 type = TREE_TYPE (type);
953ff289 941
c2255bc4
AH
942 field = build_decl (DECL_SOURCE_LOCATION (var),
943 FIELD_DECL, DECL_NAME (var), type);
953ff289
DN
944
945 /* Remember what variable this field was created for. This does have a
946 side effect of making dwarf2out ignore this member, so for helpful
947 debugging we clear it later in delete_omp_context. */
948 DECL_ABSTRACT_ORIGIN (field) = var;
a68ab351
JJ
949 if (type == TREE_TYPE (var))
950 {
951 DECL_ALIGN (field) = DECL_ALIGN (var);
952 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
953 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
954 }
955 else
956 DECL_ALIGN (field) = TYPE_ALIGN (type);
953ff289 957
a68ab351
JJ
958 if ((mask & 3) == 3)
959 {
960 insert_field_into_struct (ctx->record_type, field);
961 if (ctx->srecord_type)
962 {
c2255bc4
AH
963 sfield = build_decl (DECL_SOURCE_LOCATION (var),
964 FIELD_DECL, DECL_NAME (var), type);
a68ab351
JJ
965 DECL_ABSTRACT_ORIGIN (sfield) = var;
966 DECL_ALIGN (sfield) = DECL_ALIGN (field);
967 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
968 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
969 insert_field_into_struct (ctx->srecord_type, sfield);
970 }
971 }
972 else
973 {
974 if (ctx->srecord_type == NULL_TREE)
975 {
976 tree t;
977
978 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
979 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
980 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
981 {
c2255bc4
AH
982 sfield = build_decl (DECL_SOURCE_LOCATION (var),
983 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
a68ab351
JJ
984 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
985 insert_field_into_struct (ctx->srecord_type, sfield);
986 splay_tree_insert (ctx->sfield_map,
987 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
988 (splay_tree_value) sfield);
989 }
990 }
991 sfield = field;
992 insert_field_into_struct ((mask & 1) ? ctx->record_type
993 : ctx->srecord_type, field);
994 }
953ff289 995
a68ab351
JJ
996 if (mask & 1)
997 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
998 (splay_tree_value) field);
999 if ((mask & 2) && ctx->sfield_map)
1000 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1001 (splay_tree_value) sfield);
953ff289
DN
1002}
1003
1004static tree
1005install_var_local (tree var, omp_context *ctx)
1006{
1007 tree new_var = omp_copy_decl_1 (var, ctx);
1008 insert_decl_map (&ctx->cb, var, new_var);
1009 return new_var;
1010}
1011
1012/* Adjust the replacement for DECL in CTX for the new context. This means
1013 copying the DECL_VALUE_EXPR, and fixing up the type. */
1014
1015static void
1016fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1017{
1018 tree new_decl, size;
1019
1020 new_decl = lookup_decl (decl, ctx);
1021
1022 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1023
1024 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1025 && DECL_HAS_VALUE_EXPR_P (decl))
1026 {
1027 tree ve = DECL_VALUE_EXPR (decl);
726a989a 1028 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
953ff289
DN
1029 SET_DECL_VALUE_EXPR (new_decl, ve);
1030 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1031 }
1032
1033 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1034 {
1035 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1036 if (size == error_mark_node)
1037 size = TYPE_SIZE (TREE_TYPE (new_decl));
1038 DECL_SIZE (new_decl) = size;
1039
1040 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1041 if (size == error_mark_node)
1042 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1043 DECL_SIZE_UNIT (new_decl) = size;
1044 }
1045}
1046
1047/* The callback for remap_decl. Search all containing contexts for a
1048 mapping of the variable; this avoids having to duplicate the splay
1049 tree ahead of time. We know a mapping doesn't already exist in the
1050 given context. Create new mappings to implement default semantics. */
1051
1052static tree
1053omp_copy_decl (tree var, copy_body_data *cb)
1054{
1055 omp_context *ctx = (omp_context *) cb;
1056 tree new_var;
1057
953ff289
DN
1058 if (TREE_CODE (var) == LABEL_DECL)
1059 {
c2255bc4 1060 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
50674e96 1061 DECL_CONTEXT (new_var) = current_function_decl;
953ff289
DN
1062 insert_decl_map (&ctx->cb, var, new_var);
1063 return new_var;
1064 }
1065
a68ab351 1066 while (!is_taskreg_ctx (ctx))
953ff289
DN
1067 {
1068 ctx = ctx->outer;
1069 if (ctx == NULL)
1070 return var;
1071 new_var = maybe_lookup_decl (var, ctx);
1072 if (new_var)
1073 return new_var;
1074 }
1075
8ca5b2a2
JJ
1076 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1077 return var;
1078
953ff289
DN
1079 return error_mark_node;
1080}
1081
50674e96
DN
1082
1083/* Return the parallel region associated with STMT. */
1084
50674e96
DN
1085/* Debugging dumps for parallel regions. */
1086void dump_omp_region (FILE *, struct omp_region *, int);
1087void debug_omp_region (struct omp_region *);
1088void debug_all_omp_regions (void);
1089
1090/* Dump the parallel region tree rooted at REGION. */
1091
1092void
1093dump_omp_region (FILE *file, struct omp_region *region, int indent)
1094{
777f7f9a 1095 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
726a989a 1096 gimple_code_name[region->type]);
50674e96
DN
1097
1098 if (region->inner)
1099 dump_omp_region (file, region->inner, indent + 4);
1100
777f7f9a
RH
1101 if (region->cont)
1102 {
726a989a 1103 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
777f7f9a
RH
1104 region->cont->index);
1105 }
1106
50674e96 1107 if (region->exit)
726a989a 1108 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
777f7f9a 1109 region->exit->index);
50674e96 1110 else
777f7f9a 1111 fprintf (file, "%*s[no exit marker]\n", indent, "");
50674e96
DN
1112
1113 if (region->next)
777f7f9a 1114 dump_omp_region (file, region->next, indent);
50674e96
DN
1115}
1116
1117void
1118debug_omp_region (struct omp_region *region)
1119{
1120 dump_omp_region (stderr, region, 0);
1121}
1122
1123void
1124debug_all_omp_regions (void)
1125{
1126 dump_omp_region (stderr, root_omp_region, 0);
1127}
1128
1129
1130/* Create a new parallel region starting at STMT inside region PARENT. */
1131
777f7f9a 1132struct omp_region *
726a989a
RB
1133new_omp_region (basic_block bb, enum gimple_code type,
1134 struct omp_region *parent)
50674e96 1135{
d3bfe4de 1136 struct omp_region *region = XCNEW (struct omp_region);
50674e96
DN
1137
1138 region->outer = parent;
777f7f9a
RH
1139 region->entry = bb;
1140 region->type = type;
50674e96
DN
1141
1142 if (parent)
1143 {
1144 /* This is a nested region. Add it to the list of inner
1145 regions in PARENT. */
1146 region->next = parent->inner;
1147 parent->inner = region;
1148 }
777f7f9a 1149 else
50674e96
DN
1150 {
1151 /* This is a toplevel region. Add it to the list of toplevel
1152 regions in ROOT_OMP_REGION. */
1153 region->next = root_omp_region;
1154 root_omp_region = region;
1155 }
777f7f9a
RH
1156
1157 return region;
1158}
1159
1160/* Release the memory associated with the region tree rooted at REGION. */
1161
1162static void
1163free_omp_region_1 (struct omp_region *region)
1164{
1165 struct omp_region *i, *n;
1166
1167 for (i = region->inner; i ; i = n)
50674e96 1168 {
777f7f9a
RH
1169 n = i->next;
1170 free_omp_region_1 (i);
50674e96
DN
1171 }
1172
777f7f9a
RH
1173 free (region);
1174}
50674e96 1175
777f7f9a
RH
1176/* Release the memory for the entire omp region tree. */
1177
1178void
1179free_omp_regions (void)
1180{
1181 struct omp_region *r, *n;
1182 for (r = root_omp_region; r ; r = n)
1183 {
1184 n = r->next;
1185 free_omp_region_1 (r);
1186 }
1187 root_omp_region = NULL;
50674e96
DN
1188}
1189
1190
953ff289
DN
1191/* Create a new context, with OUTER_CTX being the surrounding context. */
1192
1193static omp_context *
726a989a 1194new_omp_context (gimple stmt, omp_context *outer_ctx)
953ff289
DN
1195{
1196 omp_context *ctx = XCNEW (omp_context);
1197
1198 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1199 (splay_tree_value) ctx);
1200 ctx->stmt = stmt;
1201
1202 if (outer_ctx)
1203 {
1204 ctx->outer = outer_ctx;
1205 ctx->cb = outer_ctx->cb;
1206 ctx->cb.block = NULL;
1207 ctx->depth = outer_ctx->depth + 1;
1208 }
1209 else
1210 {
1211 ctx->cb.src_fn = current_function_decl;
1212 ctx->cb.dst_fn = current_function_decl;
1213 ctx->cb.src_node = cgraph_node (current_function_decl);
1214 ctx->cb.dst_node = ctx->cb.src_node;
1215 ctx->cb.src_cfun = cfun;
1216 ctx->cb.copy_decl = omp_copy_decl;
1d65f45c 1217 ctx->cb.eh_lp_nr = 0;
953ff289
DN
1218 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1219 ctx->depth = 1;
1220 }
1221
6be42dd4 1222 ctx->cb.decl_map = pointer_map_create ();
953ff289
DN
1223
1224 return ctx;
1225}
1226
726a989a 1227static gimple_seq maybe_catch_exception (gimple_seq);
2368a460
JJ
1228
1229/* Finalize task copyfn. */
1230
1231static void
726a989a 1232finalize_task_copyfn (gimple task_stmt)
2368a460
JJ
1233{
1234 struct function *child_cfun;
1235 tree child_fn, old_fn;
726a989a
RB
1236 gimple_seq seq, new_seq;
1237 gimple bind;
2368a460 1238
726a989a 1239 child_fn = gimple_omp_task_copy_fn (task_stmt);
2368a460
JJ
1240 if (child_fn == NULL_TREE)
1241 return;
1242
1243 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1244
1245 /* Inform the callgraph about the new function. */
1246 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1247 = cfun->curr_properties;
1248
1249 old_fn = current_function_decl;
1250 push_cfun (child_cfun);
1251 current_function_decl = child_fn;
726a989a
RB
1252 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1253 seq = gimple_seq_alloc ();
1254 gimple_seq_add_stmt (&seq, bind);
1255 new_seq = maybe_catch_exception (seq);
1256 if (new_seq != seq)
1257 {
1258 bind = gimple_build_bind (NULL, new_seq, NULL);
1259 seq = gimple_seq_alloc ();
1260 gimple_seq_add_stmt (&seq, bind);
1261 }
1262 gimple_set_body (child_fn, seq);
2368a460
JJ
1263 pop_cfun ();
1264 current_function_decl = old_fn;
1265
1266 cgraph_add_new_function (child_fn, false);
1267}
1268
953ff289
DN
1269/* Destroy a omp_context data structures. Called through the splay tree
1270 value delete callback. */
1271
1272static void
1273delete_omp_context (splay_tree_value value)
1274{
1275 omp_context *ctx = (omp_context *) value;
1276
6be42dd4 1277 pointer_map_destroy (ctx->cb.decl_map);
953ff289
DN
1278
1279 if (ctx->field_map)
1280 splay_tree_delete (ctx->field_map);
a68ab351
JJ
1281 if (ctx->sfield_map)
1282 splay_tree_delete (ctx->sfield_map);
953ff289
DN
1283
1284 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1285 it produces corrupt debug information. */
1286 if (ctx->record_type)
1287 {
1288 tree t;
1289 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1290 DECL_ABSTRACT_ORIGIN (t) = NULL;
1291 }
a68ab351
JJ
1292 if (ctx->srecord_type)
1293 {
1294 tree t;
1295 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1296 DECL_ABSTRACT_ORIGIN (t) = NULL;
1297 }
953ff289 1298
2368a460
JJ
1299 if (is_task_ctx (ctx))
1300 finalize_task_copyfn (ctx->stmt);
1301
953ff289
DN
1302 XDELETE (ctx);
1303}
1304
1305/* Fix up RECEIVER_DECL with a type that has been remapped to the child
1306 context. */
1307
1308static void
1309fixup_child_record_type (omp_context *ctx)
1310{
1311 tree f, type = ctx->record_type;
1312
1313 /* ??? It isn't sufficient to just call remap_type here, because
1314 variably_modified_type_p doesn't work the way we expect for
1315 record types. Testing each field for whether it needs remapping
1316 and creating a new record by hand works, however. */
1317 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1318 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1319 break;
1320 if (f)
1321 {
1322 tree name, new_fields = NULL;
1323
1324 type = lang_hooks.types.make_type (RECORD_TYPE);
1325 name = DECL_NAME (TYPE_NAME (ctx->record_type));
c2255bc4
AH
1326 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1327 TYPE_DECL, name, type);
953ff289
DN
1328 TYPE_NAME (type) = name;
1329
1330 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1331 {
1332 tree new_f = copy_node (f);
1333 DECL_CONTEXT (new_f) = type;
1334 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1335 TREE_CHAIN (new_f) = new_fields;
726a989a
RB
1336 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1337 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1338 &ctx->cb, NULL);
1339 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1340 &ctx->cb, NULL);
953ff289
DN
1341 new_fields = new_f;
1342
1343 /* Arrange to be able to look up the receiver field
1344 given the sender field. */
1345 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1346 (splay_tree_value) new_f);
1347 }
1348 TYPE_FIELDS (type) = nreverse (new_fields);
1349 layout_type (type);
1350 }
1351
1352 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1353}
1354
1355/* Instantiate decls as necessary in CTX to satisfy the data sharing
1356 specified by CLAUSES. */
1357
1358static void
1359scan_sharing_clauses (tree clauses, omp_context *ctx)
1360{
1361 tree c, decl;
1362 bool scan_array_reductions = false;
1363
1364 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1365 {
1366 bool by_ref;
1367
aaf46ef9 1368 switch (OMP_CLAUSE_CODE (c))
953ff289
DN
1369 {
1370 case OMP_CLAUSE_PRIVATE:
1371 decl = OMP_CLAUSE_DECL (c);
a68ab351
JJ
1372 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1373 goto do_private;
1374 else if (!is_variable_sized (decl))
953ff289
DN
1375 install_var_local (decl, ctx);
1376 break;
1377
1378 case OMP_CLAUSE_SHARED:
a68ab351 1379 gcc_assert (is_taskreg_ctx (ctx));
953ff289 1380 decl = OMP_CLAUSE_DECL (c);
5da250fc
JJ
1381 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1382 || !is_variable_sized (decl));
8ca5b2a2
JJ
1383 /* Global variables don't need to be copied,
1384 the receiver side will use them directly. */
1385 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1386 break;
a68ab351 1387 by_ref = use_pointer_for_field (decl, ctx);
953ff289
DN
1388 if (! TREE_READONLY (decl)
1389 || TREE_ADDRESSABLE (decl)
1390 || by_ref
1391 || is_reference (decl))
1392 {
a68ab351 1393 install_var_field (decl, by_ref, 3, ctx);
953ff289
DN
1394 install_var_local (decl, ctx);
1395 break;
1396 }
1397 /* We don't need to copy const scalar vars back. */
aaf46ef9 1398 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
953ff289
DN
1399 goto do_private;
1400
1401 case OMP_CLAUSE_LASTPRIVATE:
1402 /* Let the corresponding firstprivate clause create
1403 the variable. */
1404 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1405 break;
1406 /* FALLTHRU */
1407
1408 case OMP_CLAUSE_FIRSTPRIVATE:
1409 case OMP_CLAUSE_REDUCTION:
1410 decl = OMP_CLAUSE_DECL (c);
1411 do_private:
1412 if (is_variable_sized (decl))
953ff289 1413 {
a68ab351
JJ
1414 if (is_task_ctx (ctx))
1415 install_var_field (decl, false, 1, ctx);
1416 break;
1417 }
1418 else if (is_taskreg_ctx (ctx))
1419 {
1420 bool global
1421 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
7c8f7639 1422 by_ref = use_pointer_for_field (decl, NULL);
a68ab351
JJ
1423
1424 if (is_task_ctx (ctx)
1425 && (global || by_ref || is_reference (decl)))
1426 {
1427 install_var_field (decl, false, 1, ctx);
1428 if (!global)
1429 install_var_field (decl, by_ref, 2, ctx);
1430 }
1431 else if (!global)
1432 install_var_field (decl, by_ref, 3, ctx);
953ff289
DN
1433 }
1434 install_var_local (decl, ctx);
1435 break;
1436
1437 case OMP_CLAUSE_COPYPRIVATE:
1438 if (ctx->outer)
726a989a 1439 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
953ff289
DN
1440 /* FALLTHRU */
1441
1442 case OMP_CLAUSE_COPYIN:
1443 decl = OMP_CLAUSE_DECL (c);
7c8f7639 1444 by_ref = use_pointer_for_field (decl, NULL);
a68ab351 1445 install_var_field (decl, by_ref, 3, ctx);
953ff289
DN
1446 break;
1447
1448 case OMP_CLAUSE_DEFAULT:
1449 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1450 break;
1451
1452 case OMP_CLAUSE_IF:
1453 case OMP_CLAUSE_NUM_THREADS:
1454 case OMP_CLAUSE_SCHEDULE:
1455 if (ctx->outer)
726a989a 1456 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
953ff289
DN
1457 break;
1458
1459 case OMP_CLAUSE_NOWAIT:
1460 case OMP_CLAUSE_ORDERED:
a68ab351
JJ
1461 case OMP_CLAUSE_COLLAPSE:
1462 case OMP_CLAUSE_UNTIED:
953ff289
DN
1463 break;
1464
1465 default:
1466 gcc_unreachable ();
1467 }
1468 }
1469
1470 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1471 {
aaf46ef9 1472 switch (OMP_CLAUSE_CODE (c))
953ff289
DN
1473 {
1474 case OMP_CLAUSE_LASTPRIVATE:
1475 /* Let the corresponding firstprivate clause create
1476 the variable. */
726a989a 1477 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
a68ab351 1478 scan_array_reductions = true;
953ff289
DN
1479 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1480 break;
1481 /* FALLTHRU */
1482
1483 case OMP_CLAUSE_PRIVATE:
1484 case OMP_CLAUSE_FIRSTPRIVATE:
1485 case OMP_CLAUSE_REDUCTION:
1486 decl = OMP_CLAUSE_DECL (c);
1487 if (is_variable_sized (decl))
1488 install_var_local (decl, ctx);
1489 fixup_remapped_decl (decl, ctx,
aaf46ef9 1490 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
953ff289 1491 && OMP_CLAUSE_PRIVATE_DEBUG (c));
aaf46ef9 1492 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
953ff289
DN
1493 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1494 scan_array_reductions = true;
1495 break;
1496
1497 case OMP_CLAUSE_SHARED:
1498 decl = OMP_CLAUSE_DECL (c);
8ca5b2a2
JJ
1499 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1500 fixup_remapped_decl (decl, ctx, false);
953ff289
DN
1501 break;
1502
1503 case OMP_CLAUSE_COPYPRIVATE:
1504 case OMP_CLAUSE_COPYIN:
1505 case OMP_CLAUSE_DEFAULT:
1506 case OMP_CLAUSE_IF:
1507 case OMP_CLAUSE_NUM_THREADS:
1508 case OMP_CLAUSE_SCHEDULE:
1509 case OMP_CLAUSE_NOWAIT:
1510 case OMP_CLAUSE_ORDERED:
a68ab351
JJ
1511 case OMP_CLAUSE_COLLAPSE:
1512 case OMP_CLAUSE_UNTIED:
953ff289
DN
1513 break;
1514
1515 default:
1516 gcc_unreachable ();
1517 }
1518 }
1519
1520 if (scan_array_reductions)
1521 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
aaf46ef9 1522 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
953ff289
DN
1523 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1524 {
726a989a
RB
1525 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1526 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
953ff289 1527 }
a68ab351 1528 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
726a989a
RB
1529 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1530 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
953ff289
DN
1531}
1532
1533/* Create a new name for omp child function. Returns an identifier. */
1534
1535static GTY(()) unsigned int tmp_ompfn_id_num;
1536
1537static tree
a68ab351 1538create_omp_child_function_name (bool task_copy)
953ff289
DN
1539{
1540 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1541 size_t len = IDENTIFIER_LENGTH (name);
1542 char *tmp_name, *prefix;
a68ab351 1543 const char *suffix;
953ff289 1544
a68ab351 1545 suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
d3bfe4de 1546 prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
953ff289 1547 memcpy (prefix, IDENTIFIER_POINTER (name), len);
a68ab351 1548 strcpy (prefix + len, suffix);
953ff289
DN
1549#ifndef NO_DOT_IN_LABEL
1550 prefix[len] = '.';
1551#elif !defined NO_DOLLAR_IN_LABEL
1552 prefix[len] = '$';
1553#endif
1554 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1555 return get_identifier (tmp_name);
1556}
1557
1558/* Build a decl for the omp child function. It'll not contain a body
1559 yet, just the bare decl. */
1560
1561static void
a68ab351 1562create_omp_child_function (omp_context *ctx, bool task_copy)
953ff289
DN
1563{
1564 tree decl, type, name, t;
1565
a68ab351
JJ
1566 name = create_omp_child_function_name (task_copy);
1567 if (task_copy)
1568 type = build_function_type_list (void_type_node, ptr_type_node,
1569 ptr_type_node, NULL_TREE);
1570 else
1571 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
953ff289 1572
c2255bc4
AH
1573 decl = build_decl (gimple_location (ctx->stmt),
1574 FUNCTION_DECL, name, type);
953ff289 1575
a68ab351
JJ
1576 if (!task_copy)
1577 ctx->cb.dst_fn = decl;
1578 else
726a989a 1579 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
953ff289
DN
1580
1581 TREE_STATIC (decl) = 1;
1582 TREE_USED (decl) = 1;
1583 DECL_ARTIFICIAL (decl) = 1;
1584 DECL_IGNORED_P (decl) = 0;
1585 TREE_PUBLIC (decl) = 0;
1586 DECL_UNINLINABLE (decl) = 1;
1587 DECL_EXTERNAL (decl) = 0;
1588 DECL_CONTEXT (decl) = NULL_TREE;
50674e96 1589 DECL_INITIAL (decl) = make_node (BLOCK);
953ff289 1590
c2255bc4
AH
1591 t = build_decl (DECL_SOURCE_LOCATION (decl),
1592 RESULT_DECL, NULL_TREE, void_type_node);
953ff289
DN
1593 DECL_ARTIFICIAL (t) = 1;
1594 DECL_IGNORED_P (t) = 1;
07485407 1595 DECL_CONTEXT (t) = decl;
953ff289
DN
1596 DECL_RESULT (decl) = t;
1597
c2255bc4
AH
1598 t = build_decl (DECL_SOURCE_LOCATION (decl),
1599 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
953ff289
DN
1600 DECL_ARTIFICIAL (t) = 1;
1601 DECL_ARG_TYPE (t) = ptr_type_node;
50674e96 1602 DECL_CONTEXT (t) = current_function_decl;
953ff289
DN
1603 TREE_USED (t) = 1;
1604 DECL_ARGUMENTS (decl) = t;
a68ab351
JJ
1605 if (!task_copy)
1606 ctx->receiver_decl = t;
1607 else
1608 {
c2255bc4
AH
1609 t = build_decl (DECL_SOURCE_LOCATION (decl),
1610 PARM_DECL, get_identifier (".omp_data_o"),
a68ab351
JJ
1611 ptr_type_node);
1612 DECL_ARTIFICIAL (t) = 1;
1613 DECL_ARG_TYPE (t) = ptr_type_node;
1614 DECL_CONTEXT (t) = current_function_decl;
1615 TREE_USED (t) = 1;
628c189e 1616 TREE_ADDRESSABLE (t) = 1;
a68ab351
JJ
1617 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1618 DECL_ARGUMENTS (decl) = t;
1619 }
953ff289
DN
1620
1621 /* Allocate memory for the function structure. The call to
50674e96 1622 allocate_struct_function clobbers CFUN, so we need to restore
953ff289 1623 it afterward. */
db2960f4 1624 push_struct_function (decl);
726a989a 1625 cfun->function_end_locus = gimple_location (ctx->stmt);
db2960f4 1626 pop_cfun ();
953ff289
DN
1627}
1628
953ff289
DN
1629
1630/* Scan an OpenMP parallel directive. */
1631
1632static void
726a989a 1633scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
953ff289
DN
1634{
1635 omp_context *ctx;
1636 tree name;
726a989a 1637 gimple stmt = gsi_stmt (*gsi);
953ff289
DN
1638
1639 /* Ignore parallel directives with empty bodies, unless there
1640 are copyin clauses. */
1641 if (optimize > 0
726a989a
RB
1642 && empty_body_p (gimple_omp_body (stmt))
1643 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1644 OMP_CLAUSE_COPYIN) == NULL)
953ff289 1645 {
726a989a 1646 gsi_replace (gsi, gimple_build_nop (), false);
953ff289
DN
1647 return;
1648 }
1649
726a989a 1650 ctx = new_omp_context (stmt, outer_ctx);
a68ab351 1651 if (taskreg_nesting_level > 1)
50674e96 1652 ctx->is_nested = true;
953ff289 1653 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
953ff289
DN
1654 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1655 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
953ff289 1656 name = create_tmp_var_name (".omp_data_s");
c2255bc4
AH
1657 name = build_decl (gimple_location (stmt),
1658 TYPE_DECL, name, ctx->record_type);
953ff289 1659 TYPE_NAME (ctx->record_type) = name;
a68ab351 1660 create_omp_child_function (ctx, false);
726a989a 1661 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
953ff289 1662
726a989a
RB
1663 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1664 scan_omp (gimple_omp_body (stmt), ctx);
953ff289
DN
1665
1666 if (TYPE_FIELDS (ctx->record_type) == NULL)
1667 ctx->record_type = ctx->receiver_decl = NULL;
1668 else
1669 {
1670 layout_type (ctx->record_type);
1671 fixup_child_record_type (ctx);
1672 }
1673}
1674
a68ab351
JJ
1675/* Scan an OpenMP task directive. */
1676
1677static void
726a989a 1678scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
a68ab351
JJ
1679{
1680 omp_context *ctx;
726a989a
RB
1681 tree name, t;
1682 gimple stmt = gsi_stmt (*gsi);
db3927fb 1683 location_t loc = gimple_location (stmt);
a68ab351
JJ
1684
1685 /* Ignore task directives with empty bodies. */
1686 if (optimize > 0
726a989a 1687 && empty_body_p (gimple_omp_body (stmt)))
a68ab351 1688 {
726a989a 1689 gsi_replace (gsi, gimple_build_nop (), false);
a68ab351
JJ
1690 return;
1691 }
1692
726a989a 1693 ctx = new_omp_context (stmt, outer_ctx);
a68ab351
JJ
1694 if (taskreg_nesting_level > 1)
1695 ctx->is_nested = true;
1696 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1697 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1698 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1699 name = create_tmp_var_name (".omp_data_s");
c2255bc4
AH
1700 name = build_decl (gimple_location (stmt),
1701 TYPE_DECL, name, ctx->record_type);
a68ab351
JJ
1702 TYPE_NAME (ctx->record_type) = name;
1703 create_omp_child_function (ctx, false);
726a989a 1704 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
a68ab351 1705
726a989a 1706 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
a68ab351
JJ
1707
1708 if (ctx->srecord_type)
1709 {
1710 name = create_tmp_var_name (".omp_data_a");
c2255bc4
AH
1711 name = build_decl (gimple_location (stmt),
1712 TYPE_DECL, name, ctx->srecord_type);
a68ab351
JJ
1713 TYPE_NAME (ctx->srecord_type) = name;
1714 create_omp_child_function (ctx, true);
1715 }
1716
726a989a 1717 scan_omp (gimple_omp_body (stmt), ctx);
a68ab351
JJ
1718
1719 if (TYPE_FIELDS (ctx->record_type) == NULL)
1720 {
1721 ctx->record_type = ctx->receiver_decl = NULL;
726a989a
RB
1722 t = build_int_cst (long_integer_type_node, 0);
1723 gimple_omp_task_set_arg_size (stmt, t);
1724 t = build_int_cst (long_integer_type_node, 1);
1725 gimple_omp_task_set_arg_align (stmt, t);
a68ab351
JJ
1726 }
1727 else
1728 {
1729 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1730 /* Move VLA fields to the end. */
1731 p = &TYPE_FIELDS (ctx->record_type);
1732 while (*p)
1733 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1734 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1735 {
1736 *q = *p;
1737 *p = TREE_CHAIN (*p);
1738 TREE_CHAIN (*q) = NULL_TREE;
1739 q = &TREE_CHAIN (*q);
1740 }
1741 else
1742 p = &TREE_CHAIN (*p);
1743 *p = vla_fields;
1744 layout_type (ctx->record_type);
1745 fixup_child_record_type (ctx);
1746 if (ctx->srecord_type)
1747 layout_type (ctx->srecord_type);
db3927fb 1748 t = fold_convert_loc (loc, long_integer_type_node,
a68ab351 1749 TYPE_SIZE_UNIT (ctx->record_type));
726a989a
RB
1750 gimple_omp_task_set_arg_size (stmt, t);
1751 t = build_int_cst (long_integer_type_node,
a68ab351 1752 TYPE_ALIGN_UNIT (ctx->record_type));
726a989a 1753 gimple_omp_task_set_arg_align (stmt, t);
a68ab351
JJ
1754 }
1755}
1756
953ff289 1757
50674e96 1758/* Scan an OpenMP loop directive. */
953ff289
DN
1759
1760static void
726a989a 1761scan_omp_for (gimple stmt, omp_context *outer_ctx)
953ff289 1762{
50674e96 1763 omp_context *ctx;
726a989a 1764 size_t i;
953ff289 1765
50674e96 1766 ctx = new_omp_context (stmt, outer_ctx);
953ff289 1767
726a989a 1768 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
953ff289 1769
726a989a
RB
1770 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1771 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
a68ab351 1772 {
726a989a
RB
1773 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1774 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1775 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1776 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
a68ab351 1777 }
726a989a 1778 scan_omp (gimple_omp_body (stmt), ctx);
953ff289
DN
1779}
1780
1781/* Scan an OpenMP sections directive. */
1782
1783static void
726a989a 1784scan_omp_sections (gimple stmt, omp_context *outer_ctx)
953ff289 1785{
953ff289
DN
1786 omp_context *ctx;
1787
1788 ctx = new_omp_context (stmt, outer_ctx);
726a989a
RB
1789 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1790 scan_omp (gimple_omp_body (stmt), ctx);
953ff289
DN
1791}
1792
1793/* Scan an OpenMP single directive. */
1794
1795static void
726a989a 1796scan_omp_single (gimple stmt, omp_context *outer_ctx)
953ff289 1797{
953ff289
DN
1798 omp_context *ctx;
1799 tree name;
1800
1801 ctx = new_omp_context (stmt, outer_ctx);
1802 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1803 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1804 name = create_tmp_var_name (".omp_copy_s");
c2255bc4
AH
1805 name = build_decl (gimple_location (stmt),
1806 TYPE_DECL, name, ctx->record_type);
953ff289
DN
1807 TYPE_NAME (ctx->record_type) = name;
1808
726a989a
RB
1809 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1810 scan_omp (gimple_omp_body (stmt), ctx);
953ff289
DN
1811
1812 if (TYPE_FIELDS (ctx->record_type) == NULL)
1813 ctx->record_type = NULL;
1814 else
1815 layout_type (ctx->record_type);
1816}
1817
953ff289 1818
a6fc8e21
JJ
1819/* Check OpenMP nesting restrictions. */
1820static void
726a989a 1821check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
a6fc8e21 1822{
726a989a 1823 switch (gimple_code (stmt))
a6fc8e21 1824 {
726a989a
RB
1825 case GIMPLE_OMP_FOR:
1826 case GIMPLE_OMP_SECTIONS:
1827 case GIMPLE_OMP_SINGLE:
1828 case GIMPLE_CALL:
a6fc8e21 1829 for (; ctx != NULL; ctx = ctx->outer)
726a989a 1830 switch (gimple_code (ctx->stmt))
a6fc8e21 1831 {
726a989a
RB
1832 case GIMPLE_OMP_FOR:
1833 case GIMPLE_OMP_SECTIONS:
1834 case GIMPLE_OMP_SINGLE:
1835 case GIMPLE_OMP_ORDERED:
1836 case GIMPLE_OMP_MASTER:
1837 case GIMPLE_OMP_TASK:
1838 if (is_gimple_call (stmt))
a68ab351
JJ
1839 {
1840 warning (0, "barrier region may not be closely nested inside "
1841 "of work-sharing, critical, ordered, master or "
1842 "explicit task region");
1843 return;
1844 }
a6fc8e21 1845 warning (0, "work-sharing region may not be closely nested inside "
a68ab351
JJ
1846 "of work-sharing, critical, ordered, master or explicit "
1847 "task region");
a6fc8e21 1848 return;
726a989a 1849 case GIMPLE_OMP_PARALLEL:
a6fc8e21
JJ
1850 return;
1851 default:
1852 break;
1853 }
1854 break;
726a989a 1855 case GIMPLE_OMP_MASTER:
a6fc8e21 1856 for (; ctx != NULL; ctx = ctx->outer)
726a989a 1857 switch (gimple_code (ctx->stmt))
a6fc8e21 1858 {
726a989a
RB
1859 case GIMPLE_OMP_FOR:
1860 case GIMPLE_OMP_SECTIONS:
1861 case GIMPLE_OMP_SINGLE:
1862 case GIMPLE_OMP_TASK:
a6fc8e21 1863 warning (0, "master region may not be closely nested inside "
a68ab351 1864 "of work-sharing or explicit task region");
a6fc8e21 1865 return;
726a989a 1866 case GIMPLE_OMP_PARALLEL:
a6fc8e21
JJ
1867 return;
1868 default:
1869 break;
1870 }
1871 break;
726a989a 1872 case GIMPLE_OMP_ORDERED:
a6fc8e21 1873 for (; ctx != NULL; ctx = ctx->outer)
726a989a 1874 switch (gimple_code (ctx->stmt))
a6fc8e21 1875 {
726a989a
RB
1876 case GIMPLE_OMP_CRITICAL:
1877 case GIMPLE_OMP_TASK:
a6fc8e21 1878 warning (0, "ordered region may not be closely nested inside "
a68ab351 1879 "of critical or explicit task region");
a6fc8e21 1880 return;
726a989a
RB
1881 case GIMPLE_OMP_FOR:
1882 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
a6fc8e21
JJ
1883 OMP_CLAUSE_ORDERED) == NULL)
1884 warning (0, "ordered region must be closely nested inside "
1885 "a loop region with an ordered clause");
1886 return;
726a989a 1887 case GIMPLE_OMP_PARALLEL:
a6fc8e21
JJ
1888 return;
1889 default:
1890 break;
1891 }
1892 break;
726a989a 1893 case GIMPLE_OMP_CRITICAL:
a6fc8e21 1894 for (; ctx != NULL; ctx = ctx->outer)
726a989a
RB
1895 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1896 && (gimple_omp_critical_name (stmt)
1897 == gimple_omp_critical_name (ctx->stmt)))
a6fc8e21
JJ
1898 {
1899 warning (0, "critical region may not be nested inside a critical "
1900 "region with the same name");
1901 return;
1902 }
1903 break;
1904 default:
1905 break;
1906 }
1907}
1908
1909
726a989a
RB
1910/* Helper function scan_omp.
1911
1912 Callback for walk_tree or operators in walk_gimple_stmt used to
1913 scan for OpenMP directives in TP. */
953ff289
DN
1914
1915static tree
726a989a 1916scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
953ff289 1917{
d3bfe4de
KG
1918 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1919 omp_context *ctx = (omp_context *) wi->info;
953ff289
DN
1920 tree t = *tp;
1921
726a989a
RB
1922 switch (TREE_CODE (t))
1923 {
1924 case VAR_DECL:
1925 case PARM_DECL:
1926 case LABEL_DECL:
1927 case RESULT_DECL:
1928 if (ctx)
1929 *tp = remap_decl (t, &ctx->cb);
1930 break;
1931
1932 default:
1933 if (ctx && TYPE_P (t))
1934 *tp = remap_type (t, &ctx->cb);
1935 else if (!DECL_P (t))
a900ae6b
JJ
1936 {
1937 *walk_subtrees = 1;
1938 if (ctx)
1939 TREE_TYPE (t) = remap_type (TREE_TYPE (t), &ctx->cb);
1940 }
726a989a
RB
1941 break;
1942 }
1943
1944 return NULL_TREE;
1945}
1946
1947
1948/* Helper function for scan_omp.
1949
1950 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1951 the current statement in GSI. */
1952
1953static tree
1954scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1955 struct walk_stmt_info *wi)
1956{
1957 gimple stmt = gsi_stmt (*gsi);
1958 omp_context *ctx = (omp_context *) wi->info;
1959
1960 if (gimple_has_location (stmt))
1961 input_location = gimple_location (stmt);
953ff289 1962
a6fc8e21 1963 /* Check the OpenMP nesting restrictions. */
a68ab351
JJ
1964 if (ctx != NULL)
1965 {
726a989a
RB
1966 if (is_gimple_omp (stmt))
1967 check_omp_nesting_restrictions (stmt, ctx);
1968 else if (is_gimple_call (stmt))
a68ab351 1969 {
726a989a 1970 tree fndecl = gimple_call_fndecl (stmt);
a68ab351
JJ
1971 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1972 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
726a989a 1973 check_omp_nesting_restrictions (stmt, ctx);
a68ab351
JJ
1974 }
1975 }
a6fc8e21 1976
726a989a
RB
1977 *handled_ops_p = true;
1978
1979 switch (gimple_code (stmt))
953ff289 1980 {
726a989a 1981 case GIMPLE_OMP_PARALLEL:
a68ab351 1982 taskreg_nesting_level++;
726a989a 1983 scan_omp_parallel (gsi, ctx);
a68ab351
JJ
1984 taskreg_nesting_level--;
1985 break;
1986
726a989a 1987 case GIMPLE_OMP_TASK:
a68ab351 1988 taskreg_nesting_level++;
726a989a 1989 scan_omp_task (gsi, ctx);
a68ab351 1990 taskreg_nesting_level--;
953ff289
DN
1991 break;
1992
726a989a
RB
1993 case GIMPLE_OMP_FOR:
1994 scan_omp_for (stmt, ctx);
953ff289
DN
1995 break;
1996
726a989a
RB
1997 case GIMPLE_OMP_SECTIONS:
1998 scan_omp_sections (stmt, ctx);
953ff289
DN
1999 break;
2000
726a989a
RB
2001 case GIMPLE_OMP_SINGLE:
2002 scan_omp_single (stmt, ctx);
953ff289
DN
2003 break;
2004
726a989a
RB
2005 case GIMPLE_OMP_SECTION:
2006 case GIMPLE_OMP_MASTER:
2007 case GIMPLE_OMP_ORDERED:
2008 case GIMPLE_OMP_CRITICAL:
2009 ctx = new_omp_context (stmt, ctx);
2010 scan_omp (gimple_omp_body (stmt), ctx);
953ff289
DN
2011 break;
2012
726a989a 2013 case GIMPLE_BIND:
953ff289
DN
2014 {
2015 tree var;
953ff289 2016
726a989a
RB
2017 *handled_ops_p = false;
2018 if (ctx)
2019 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
2020 insert_decl_map (&ctx->cb, var, var);
953ff289
DN
2021 }
2022 break;
953ff289 2023 default:
726a989a 2024 *handled_ops_p = false;
953ff289
DN
2025 break;
2026 }
2027
2028 return NULL_TREE;
2029}
2030
2031
726a989a
RB
2032/* Scan all the statements starting at the current statement. CTX
2033 contains context information about the OpenMP directives and
2034 clauses found during the scan. */
953ff289
DN
2035
2036static void
726a989a 2037scan_omp (gimple_seq body, omp_context *ctx)
953ff289
DN
2038{
2039 location_t saved_location;
2040 struct walk_stmt_info wi;
2041
2042 memset (&wi, 0, sizeof (wi));
953ff289 2043 wi.info = ctx;
953ff289
DN
2044 wi.want_locations = true;
2045
2046 saved_location = input_location;
726a989a 2047 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
953ff289
DN
2048 input_location = saved_location;
2049}
2050\f
2051/* Re-gimplification and code generation routines. */
2052
2053/* Build a call to GOMP_barrier. */
2054
917948d3
ZD
2055static tree
2056build_omp_barrier (void)
953ff289 2057{
917948d3 2058 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
953ff289
DN
2059}
2060
2061/* If a context was created for STMT when it was scanned, return it. */
2062
2063static omp_context *
726a989a 2064maybe_lookup_ctx (gimple stmt)
953ff289
DN
2065{
2066 splay_tree_node n;
2067 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2068 return n ? (omp_context *) n->value : NULL;
2069}
2070
50674e96
DN
2071
2072/* Find the mapping for DECL in CTX or the immediately enclosing
2073 context that has a mapping for DECL.
2074
2075 If CTX is a nested parallel directive, we may have to use the decl
2076 mappings created in CTX's parent context. Suppose that we have the
2077 following parallel nesting (variable UIDs showed for clarity):
2078
2079 iD.1562 = 0;
2080 #omp parallel shared(iD.1562) -> outer parallel
2081 iD.1562 = iD.1562 + 1;
2082
2083 #omp parallel shared (iD.1562) -> inner parallel
2084 iD.1562 = iD.1562 - 1;
2085
2086 Each parallel structure will create a distinct .omp_data_s structure
2087 for copying iD.1562 in/out of the directive:
2088
2089 outer parallel .omp_data_s.1.i -> iD.1562
2090 inner parallel .omp_data_s.2.i -> iD.1562
2091
2092 A shared variable mapping will produce a copy-out operation before
2093 the parallel directive and a copy-in operation after it. So, in
2094 this case we would have:
2095
2096 iD.1562 = 0;
2097 .omp_data_o.1.i = iD.1562;
2098 #omp parallel shared(iD.1562) -> outer parallel
2099 .omp_data_i.1 = &.omp_data_o.1
2100 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2101
2102 .omp_data_o.2.i = iD.1562; -> **
2103 #omp parallel shared(iD.1562) -> inner parallel
2104 .omp_data_i.2 = &.omp_data_o.2
2105 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2106
2107
2108 ** This is a problem. The symbol iD.1562 cannot be referenced
2109 inside the body of the outer parallel region. But since we are
2110 emitting this copy operation while expanding the inner parallel
2111 directive, we need to access the CTX structure of the outer
2112 parallel directive to get the correct mapping:
2113
2114 .omp_data_o.2.i = .omp_data_i.1->i
2115
2116 Since there may be other workshare or parallel directives enclosing
2117 the parallel directive, it may be necessary to walk up the context
2118 parent chain. This is not a problem in general because nested
2119 parallelism happens only rarely. */
2120
2121static tree
2122lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2123{
2124 tree t;
2125 omp_context *up;
2126
50674e96
DN
2127 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2128 t = maybe_lookup_decl (decl, up);
2129
d2dda7fe 2130 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
50674e96 2131
64964499 2132 return t ? t : decl;
50674e96
DN
2133}
2134
2135
8ca5b2a2
JJ
2136/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2137 in outer contexts. */
2138
2139static tree
2140maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2141{
2142 tree t = NULL;
2143 omp_context *up;
2144
d2dda7fe
JJ
2145 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2146 t = maybe_lookup_decl (decl, up);
8ca5b2a2
JJ
2147
2148 return t ? t : decl;
2149}
2150
2151
953ff289
DN
2152/* Construct the initialization value for reduction CLAUSE. */
2153
2154tree
2155omp_reduction_init (tree clause, tree type)
2156{
db3927fb 2157 location_t loc = OMP_CLAUSE_LOCATION (clause);
953ff289
DN
2158 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2159 {
2160 case PLUS_EXPR:
2161 case MINUS_EXPR:
2162 case BIT_IOR_EXPR:
2163 case BIT_XOR_EXPR:
2164 case TRUTH_OR_EXPR:
2165 case TRUTH_ORIF_EXPR:
2166 case TRUTH_XOR_EXPR:
2167 case NE_EXPR:
db3927fb 2168 return fold_convert_loc (loc, type, integer_zero_node);
953ff289
DN
2169
2170 case MULT_EXPR:
2171 case TRUTH_AND_EXPR:
2172 case TRUTH_ANDIF_EXPR:
2173 case EQ_EXPR:
db3927fb 2174 return fold_convert_loc (loc, type, integer_one_node);
953ff289
DN
2175
2176 case BIT_AND_EXPR:
db3927fb 2177 return fold_convert_loc (loc, type, integer_minus_one_node);
953ff289
DN
2178
2179 case MAX_EXPR:
2180 if (SCALAR_FLOAT_TYPE_P (type))
2181 {
2182 REAL_VALUE_TYPE max, min;
2183 if (HONOR_INFINITIES (TYPE_MODE (type)))
2184 {
2185 real_inf (&max);
2186 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2187 }
2188 else
2189 real_maxval (&min, 1, TYPE_MODE (type));
2190 return build_real (type, min);
2191 }
2192 else
2193 {
2194 gcc_assert (INTEGRAL_TYPE_P (type));
2195 return TYPE_MIN_VALUE (type);
2196 }
2197
2198 case MIN_EXPR:
2199 if (SCALAR_FLOAT_TYPE_P (type))
2200 {
2201 REAL_VALUE_TYPE max;
2202 if (HONOR_INFINITIES (TYPE_MODE (type)))
2203 real_inf (&max);
2204 else
2205 real_maxval (&max, 0, TYPE_MODE (type));
2206 return build_real (type, max);
2207 }
2208 else
2209 {
2210 gcc_assert (INTEGRAL_TYPE_P (type));
2211 return TYPE_MAX_VALUE (type);
2212 }
2213
2214 default:
2215 gcc_unreachable ();
2216 }
2217}
2218
2219/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2220 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2221 private variables. Initialization statements go in ILIST, while calls
2222 to destructors go in DLIST. */
2223
2224static void
726a989a 2225lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3d55c64b 2226 omp_context *ctx)
953ff289 2227{
726a989a 2228 gimple_stmt_iterator diter;
5039610b 2229 tree c, dtor, copyin_seq, x, ptr;
953ff289 2230 bool copyin_by_ref = false;
8ca5b2a2 2231 bool lastprivate_firstprivate = false;
953ff289
DN
2232 int pass;
2233
726a989a
RB
2234 *dlist = gimple_seq_alloc ();
2235 diter = gsi_start (*dlist);
953ff289
DN
2236 copyin_seq = NULL;
2237
2238 /* Do all the fixed sized types in the first pass, and the variable sized
2239 types in the second pass. This makes sure that the scalar arguments to
2240 the variable sized types are processed before we use them in the
2241 variable sized operations. */
2242 for (pass = 0; pass < 2; ++pass)
2243 {
2244 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2245 {
aaf46ef9 2246 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
953ff289
DN
2247 tree var, new_var;
2248 bool by_ref;
db3927fb 2249 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
953ff289
DN
2250
2251 switch (c_kind)
2252 {
2253 case OMP_CLAUSE_PRIVATE:
2254 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2255 continue;
2256 break;
2257 case OMP_CLAUSE_SHARED:
8ca5b2a2
JJ
2258 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2259 {
2260 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2261 continue;
2262 }
953ff289 2263 case OMP_CLAUSE_FIRSTPRIVATE:
953ff289
DN
2264 case OMP_CLAUSE_COPYIN:
2265 case OMP_CLAUSE_REDUCTION:
2266 break;
077b0dfb 2267 case OMP_CLAUSE_LASTPRIVATE:
8ca5b2a2
JJ
2268 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2269 {
2270 lastprivate_firstprivate = true;
2271 if (pass != 0)
2272 continue;
2273 }
077b0dfb 2274 break;
953ff289
DN
2275 default:
2276 continue;
2277 }
2278
2279 new_var = var = OMP_CLAUSE_DECL (c);
2280 if (c_kind != OMP_CLAUSE_COPYIN)
2281 new_var = lookup_decl (var, ctx);
2282
2283 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2284 {
2285 if (pass != 0)
2286 continue;
2287 }
953ff289
DN
2288 else if (is_variable_sized (var))
2289 {
50674e96
DN
2290 /* For variable sized types, we need to allocate the
2291 actual storage here. Call alloca and store the
2292 result in the pointer decl that we created elsewhere. */
953ff289
DN
2293 if (pass == 0)
2294 continue;
2295
a68ab351
JJ
2296 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2297 {
726a989a
RB
2298 gimple stmt;
2299 tree tmp;
2300
a68ab351
JJ
2301 ptr = DECL_VALUE_EXPR (new_var);
2302 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2303 ptr = TREE_OPERAND (ptr, 0);
2304 gcc_assert (DECL_P (ptr));
2305 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
726a989a
RB
2306
2307 /* void *tmp = __builtin_alloca */
2308 stmt
2309 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2310 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2311 gimple_add_tmp_var (tmp);
2312 gimple_call_set_lhs (stmt, tmp);
2313
2314 gimple_seq_add_stmt (ilist, stmt);
2315
db3927fb 2316 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
726a989a 2317 gimplify_assign (ptr, x, ilist);
a68ab351 2318 }
953ff289 2319 }
953ff289
DN
2320 else if (is_reference (var))
2321 {
50674e96
DN
2322 /* For references that are being privatized for Fortran,
2323 allocate new backing storage for the new pointer
2324 variable. This allows us to avoid changing all the
2325 code that expects a pointer to something that expects
2326 a direct variable. Note that this doesn't apply to
2327 C++, since reference types are disallowed in data
077b0dfb
JJ
2328 sharing clauses there, except for NRV optimized
2329 return values. */
953ff289
DN
2330 if (pass == 0)
2331 continue;
2332
2333 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
a68ab351
JJ
2334 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2335 {
2336 x = build_receiver_ref (var, false, ctx);
db3927fb 2337 x = build_fold_addr_expr_loc (clause_loc, x);
a68ab351
JJ
2338 }
2339 else if (TREE_CONSTANT (x))
953ff289
DN
2340 {
2341 const char *name = NULL;
2342 if (DECL_NAME (var))
2343 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2344
077b0dfb
JJ
2345 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2346 name);
2347 gimple_add_tmp_var (x);
628c189e 2348 TREE_ADDRESSABLE (x) = 1;
db3927fb 2349 x = build_fold_addr_expr_loc (clause_loc, x);
953ff289
DN
2350 }
2351 else
2352 {
db3927fb
AH
2353 x = build_call_expr_loc (clause_loc,
2354 built_in_decls[BUILT_IN_ALLOCA], 1, x);
953ff289
DN
2355 }
2356
db3927fb 2357 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
726a989a 2358 gimplify_assign (new_var, x, ilist);
953ff289 2359
db3927fb 2360 new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
953ff289
DN
2361 }
2362 else if (c_kind == OMP_CLAUSE_REDUCTION
2363 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2364 {
2365 if (pass == 0)
2366 continue;
2367 }
2368 else if (pass != 0)
2369 continue;
2370
aaf46ef9 2371 switch (OMP_CLAUSE_CODE (c))
953ff289
DN
2372 {
2373 case OMP_CLAUSE_SHARED:
8ca5b2a2
JJ
2374 /* Shared global vars are just accessed directly. */
2375 if (is_global_var (new_var))
2376 break;
953ff289
DN
2377 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2378 needs to be delayed until after fixup_child_record_type so
2379 that we get the correct type during the dereference. */
7c8f7639 2380 by_ref = use_pointer_for_field (var, ctx);
953ff289
DN
2381 x = build_receiver_ref (var, by_ref, ctx);
2382 SET_DECL_VALUE_EXPR (new_var, x);
2383 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2384
2385 /* ??? If VAR is not passed by reference, and the variable
2386 hasn't been initialized yet, then we'll get a warning for
2387 the store into the omp_data_s structure. Ideally, we'd be
2388 able to notice this and not store anything at all, but
2389 we're generating code too early. Suppress the warning. */
2390 if (!by_ref)
2391 TREE_NO_WARNING (var) = 1;
2392 break;
2393
2394 case OMP_CLAUSE_LASTPRIVATE:
2395 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2396 break;
2397 /* FALLTHRU */
2398
2399 case OMP_CLAUSE_PRIVATE:
a68ab351
JJ
2400 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2401 x = build_outer_var_ref (var, ctx);
2402 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2403 {
2404 if (is_task_ctx (ctx))
2405 x = build_receiver_ref (var, false, ctx);
2406 else
2407 x = build_outer_var_ref (var, ctx);
2408 }
2409 else
2410 x = NULL;
2411 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
953ff289
DN
2412 if (x)
2413 gimplify_and_add (x, ilist);
2414 /* FALLTHRU */
2415
2416 do_dtor:
2417 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2418 if (x)
2419 {
726a989a
RB
2420 gimple_seq tseq = NULL;
2421
953ff289 2422 dtor = x;
726a989a
RB
2423 gimplify_stmt (&dtor, &tseq);
2424 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
953ff289
DN
2425 }
2426 break;
2427
2428 case OMP_CLAUSE_FIRSTPRIVATE:
a68ab351
JJ
2429 if (is_task_ctx (ctx))
2430 {
2431 if (is_reference (var) || is_variable_sized (var))
2432 goto do_dtor;
2433 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2434 ctx))
2435 || use_pointer_for_field (var, NULL))
2436 {
2437 x = build_receiver_ref (var, false, ctx);
2438 SET_DECL_VALUE_EXPR (new_var, x);
2439 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2440 goto do_dtor;
2441 }
2442 }
953ff289
DN
2443 x = build_outer_var_ref (var, ctx);
2444 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2445 gimplify_and_add (x, ilist);
2446 goto do_dtor;
2447 break;
2448
2449 case OMP_CLAUSE_COPYIN:
7c8f7639 2450 by_ref = use_pointer_for_field (var, NULL);
953ff289
DN
2451 x = build_receiver_ref (var, by_ref, ctx);
2452 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2453 append_to_statement_list (x, &copyin_seq);
2454 copyin_by_ref |= by_ref;
2455 break;
2456
2457 case OMP_CLAUSE_REDUCTION:
2458 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2459 {
a68ab351
JJ
2460 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2461 x = build_outer_var_ref (var, ctx);
2462
2463 if (is_reference (var))
db3927fb 2464 x = build_fold_addr_expr_loc (clause_loc, x);
a68ab351
JJ
2465 SET_DECL_VALUE_EXPR (placeholder, x);
2466 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
726a989a
RB
2467 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2468 gimple_seq_add_seq (ilist,
2469 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2470 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
a68ab351 2471 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
953ff289
DN
2472 }
2473 else
2474 {
2475 x = omp_reduction_init (c, TREE_TYPE (new_var));
2476 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
726a989a 2477 gimplify_assign (new_var, x, ilist);
953ff289
DN
2478 }
2479 break;
2480
2481 default:
2482 gcc_unreachable ();
2483 }
2484 }
2485 }
2486
2487 /* The copyin sequence is not to be executed by the main thread, since
2488 that would result in self-copies. Perhaps not visible to scalars,
2489 but it certainly is to C++ operator=. */
2490 if (copyin_seq)
2491 {
5039610b 2492 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
953ff289
DN
2493 x = build2 (NE_EXPR, boolean_type_node, x,
2494 build_int_cst (TREE_TYPE (x), 0));
2495 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2496 gimplify_and_add (x, ilist);
2497 }
2498
2499 /* If any copyin variable is passed by reference, we must ensure the
2500 master thread doesn't modify it before it is copied over in all
8ca5b2a2
JJ
2501 threads. Similarly for variables in both firstprivate and
2502 lastprivate clauses we need to ensure the lastprivate copying
2503 happens after firstprivate copying in all threads. */
2504 if (copyin_by_ref || lastprivate_firstprivate)
917948d3 2505 gimplify_and_add (build_omp_barrier (), ilist);
953ff289
DN
2506}
2507
50674e96 2508
953ff289
DN
2509/* Generate code to implement the LASTPRIVATE clauses. This is used for
2510 both parallel and workshare constructs. PREDICATE may be NULL if it's
2511 always true. */
2512
2513static void
726a989a
RB
2514lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2515 omp_context *ctx)
953ff289 2516{
726a989a 2517 tree x, c, label = NULL;
a68ab351 2518 bool par_clauses = false;
953ff289
DN
2519
2520 /* Early exit if there are no lastprivate clauses. */
2521 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2522 if (clauses == NULL)
2523 {
2524 /* If this was a workshare clause, see if it had been combined
2525 with its parallel. In that case, look for the clauses on the
2526 parallel statement itself. */
2527 if (is_parallel_ctx (ctx))
2528 return;
2529
2530 ctx = ctx->outer;
2531 if (ctx == NULL || !is_parallel_ctx (ctx))
2532 return;
2533
726a989a 2534 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
953ff289
DN
2535 OMP_CLAUSE_LASTPRIVATE);
2536 if (clauses == NULL)
2537 return;
a68ab351 2538 par_clauses = true;
953ff289
DN
2539 }
2540
726a989a
RB
2541 if (predicate)
2542 {
2543 gimple stmt;
2544 tree label_true, arm1, arm2;
2545
c2255bc4
AH
2546 label = create_artificial_label (UNKNOWN_LOCATION);
2547 label_true = create_artificial_label (UNKNOWN_LOCATION);
726a989a
RB
2548 arm1 = TREE_OPERAND (predicate, 0);
2549 arm2 = TREE_OPERAND (predicate, 1);
2550 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2551 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2552 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2553 label_true, label);
2554 gimple_seq_add_stmt (stmt_list, stmt);
2555 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2556 }
953ff289 2557
a68ab351 2558 for (c = clauses; c ;)
953ff289
DN
2559 {
2560 tree var, new_var;
db3927fb 2561 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
953ff289 2562
a68ab351
JJ
2563 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2564 {
2565 var = OMP_CLAUSE_DECL (c);
2566 new_var = lookup_decl (var, ctx);
953ff289 2567
726a989a
RB
2568 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2569 {
2570 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2571 gimple_seq_add_seq (stmt_list,
2572 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2573 }
2574 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
953ff289 2575
a68ab351
JJ
2576 x = build_outer_var_ref (var, ctx);
2577 if (is_reference (var))
db3927fb 2578 new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
a68ab351 2579 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
726a989a 2580 gimplify_and_add (x, stmt_list);
a68ab351
JJ
2581 }
2582 c = OMP_CLAUSE_CHAIN (c);
2583 if (c == NULL && !par_clauses)
2584 {
2585 /* If this was a workshare clause, see if it had been combined
2586 with its parallel. In that case, continue looking for the
2587 clauses also on the parallel statement itself. */
2588 if (is_parallel_ctx (ctx))
2589 break;
2590
2591 ctx = ctx->outer;
2592 if (ctx == NULL || !is_parallel_ctx (ctx))
2593 break;
2594
726a989a 2595 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
a68ab351
JJ
2596 OMP_CLAUSE_LASTPRIVATE);
2597 par_clauses = true;
2598 }
953ff289
DN
2599 }
2600
726a989a
RB
2601 if (label)
2602 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
953ff289
DN
2603}
2604
50674e96 2605
953ff289
DN
2606/* Generate code to implement the REDUCTION clauses. */
2607
2608static void
726a989a 2609lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
953ff289 2610{
726a989a
RB
2611 gimple_seq sub_seq = NULL;
2612 gimple stmt;
2613 tree x, c;
953ff289
DN
2614 int count = 0;
2615
2616 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2617 update in that case, otherwise use a lock. */
2618 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
aaf46ef9 2619 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
953ff289
DN
2620 {
2621 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2622 {
2623 /* Never use OMP_ATOMIC for array reductions. */
2624 count = -1;
2625 break;
2626 }
2627 count++;
2628 }
2629
2630 if (count == 0)
2631 return;
2632
2633 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2634 {
2635 tree var, ref, new_var;
2636 enum tree_code code;
db3927fb 2637 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
953ff289 2638
aaf46ef9 2639 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
953ff289
DN
2640 continue;
2641
2642 var = OMP_CLAUSE_DECL (c);
2643 new_var = lookup_decl (var, ctx);
2644 if (is_reference (var))
db3927fb 2645 new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
953ff289
DN
2646 ref = build_outer_var_ref (var, ctx);
2647 code = OMP_CLAUSE_REDUCTION_CODE (c);
50674e96
DN
2648
2649 /* reduction(-:var) sums up the partial results, so it acts
2650 identically to reduction(+:var). */
953ff289
DN
2651 if (code == MINUS_EXPR)
2652 code = PLUS_EXPR;
2653
2654 if (count == 1)
2655 {
db3927fb 2656 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
953ff289
DN
2657
2658 addr = save_expr (addr);
2659 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
db3927fb 2660 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
953ff289 2661 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
726a989a 2662 gimplify_and_add (x, stmt_seqp);
953ff289
DN
2663 return;
2664 }
2665
2666 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2667 {
2668 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2669
2670 if (is_reference (var))
db3927fb 2671 ref = build_fold_addr_expr_loc (clause_loc, ref);
953ff289
DN
2672 SET_DECL_VALUE_EXPR (placeholder, ref);
2673 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
726a989a
RB
2674 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2675 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2676 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
953ff289
DN
2677 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2678 }
2679 else
2680 {
2681 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2682 ref = build_outer_var_ref (var, ctx);
726a989a 2683 gimplify_assign (ref, x, &sub_seq);
953ff289
DN
2684 }
2685 }
2686
726a989a
RB
2687 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2688 gimple_seq_add_stmt (stmt_seqp, stmt);
953ff289 2689
726a989a 2690 gimple_seq_add_seq (stmt_seqp, sub_seq);
953ff289 2691
726a989a
RB
2692 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2693 gimple_seq_add_stmt (stmt_seqp, stmt);
953ff289
DN
2694}
2695
50674e96 2696
953ff289
DN
2697/* Generate code to implement the COPYPRIVATE clauses. */
2698
2699static void
726a989a 2700lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
953ff289
DN
2701 omp_context *ctx)
2702{
2703 tree c;
2704
2705 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2706 {
2707 tree var, ref, x;
2708 bool by_ref;
db3927fb 2709 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
953ff289 2710
aaf46ef9 2711 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
953ff289
DN
2712 continue;
2713
2714 var = OMP_CLAUSE_DECL (c);
7c8f7639 2715 by_ref = use_pointer_for_field (var, NULL);
953ff289
DN
2716
2717 ref = build_sender_ref (var, ctx);
d2dda7fe 2718 x = lookup_decl_in_outer_ctx (var, ctx);
db3927fb 2719 x = by_ref ? build_fold_addr_expr_loc (clause_loc, x) : x;
726a989a 2720 gimplify_assign (ref, x, slist);
953ff289
DN
2721
2722 ref = build_receiver_ref (var, by_ref, ctx);
2723 if (is_reference (var))
2724 {
db3927fb
AH
2725 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2726 var = build_fold_indirect_ref_loc (clause_loc, var);
953ff289
DN
2727 }
2728 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2729 gimplify_and_add (x, rlist);
2730 }
2731}
2732
50674e96 2733
953ff289
DN
2734/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2735 and REDUCTION from the sender (aka parent) side. */
2736
2737static void
726a989a
RB
2738lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2739 omp_context *ctx)
953ff289
DN
2740{
2741 tree c;
2742
2743 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2744 {
50674e96 2745 tree val, ref, x, var;
953ff289 2746 bool by_ref, do_in = false, do_out = false;
db3927fb 2747 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
953ff289 2748
aaf46ef9 2749 switch (OMP_CLAUSE_CODE (c))
953ff289 2750 {
a68ab351
JJ
2751 case OMP_CLAUSE_PRIVATE:
2752 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2753 break;
2754 continue;
953ff289
DN
2755 case OMP_CLAUSE_FIRSTPRIVATE:
2756 case OMP_CLAUSE_COPYIN:
2757 case OMP_CLAUSE_LASTPRIVATE:
2758 case OMP_CLAUSE_REDUCTION:
2759 break;
2760 default:
2761 continue;
2762 }
2763
d2dda7fe
JJ
2764 val = OMP_CLAUSE_DECL (c);
2765 var = lookup_decl_in_outer_ctx (val, ctx);
50674e96 2766
8ca5b2a2
JJ
2767 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2768 && is_global_var (var))
2769 continue;
953ff289
DN
2770 if (is_variable_sized (val))
2771 continue;
7c8f7639 2772 by_ref = use_pointer_for_field (val, NULL);
953ff289 2773
aaf46ef9 2774 switch (OMP_CLAUSE_CODE (c))
953ff289 2775 {
a68ab351 2776 case OMP_CLAUSE_PRIVATE:
953ff289
DN
2777 case OMP_CLAUSE_FIRSTPRIVATE:
2778 case OMP_CLAUSE_COPYIN:
2779 do_in = true;
2780 break;
2781
2782 case OMP_CLAUSE_LASTPRIVATE:
2783 if (by_ref || is_reference (val))
2784 {
2785 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2786 continue;
2787 do_in = true;
2788 }
2789 else
a68ab351
JJ
2790 {
2791 do_out = true;
2792 if (lang_hooks.decls.omp_private_outer_ref (val))
2793 do_in = true;
2794 }
953ff289
DN
2795 break;
2796
2797 case OMP_CLAUSE_REDUCTION:
2798 do_in = true;
2799 do_out = !(by_ref || is_reference (val));
2800 break;
2801
2802 default:
2803 gcc_unreachable ();
2804 }
2805
2806 if (do_in)
2807 {
2808 ref = build_sender_ref (val, ctx);
db3927fb 2809 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
726a989a 2810 gimplify_assign (ref, x, ilist);
a68ab351
JJ
2811 if (is_task_ctx (ctx))
2812 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
953ff289 2813 }
50674e96 2814
953ff289
DN
2815 if (do_out)
2816 {
2817 ref = build_sender_ref (val, ctx);
726a989a 2818 gimplify_assign (var, ref, olist);
953ff289
DN
2819 }
2820 }
2821}
2822
726a989a
RB
2823/* Generate code to implement SHARED from the sender (aka parent)
2824 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2825 list things that got automatically shared. */
953ff289
DN
2826
2827static void
726a989a 2828lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
953ff289 2829{
a68ab351 2830 tree var, ovar, nvar, f, x, record_type;
953ff289
DN
2831
2832 if (ctx->record_type == NULL)
2833 return;
50674e96 2834
a68ab351
JJ
2835 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2836 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
953ff289
DN
2837 {
2838 ovar = DECL_ABSTRACT_ORIGIN (f);
2839 nvar = maybe_lookup_decl (ovar, ctx);
2840 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2841 continue;
2842
50674e96
DN
2843 /* If CTX is a nested parallel directive. Find the immediately
2844 enclosing parallel or workshare construct that contains a
2845 mapping for OVAR. */
d2dda7fe 2846 var = lookup_decl_in_outer_ctx (ovar, ctx);
50674e96 2847
7c8f7639 2848 if (use_pointer_for_field (ovar, ctx))
953ff289
DN
2849 {
2850 x = build_sender_ref (ovar, ctx);
50674e96 2851 var = build_fold_addr_expr (var);
726a989a 2852 gimplify_assign (x, var, ilist);
953ff289
DN
2853 }
2854 else
2855 {
2856 x = build_sender_ref (ovar, ctx);
726a989a 2857 gimplify_assign (x, var, ilist);
953ff289 2858
14e5b285
RG
2859 if (!TREE_READONLY (var)
2860 /* We don't need to receive a new reference to a result
2861 or parm decl. In fact we may not store to it as we will
2862 invalidate any pending RSO and generate wrong gimple
2863 during inlining. */
2864 && !((TREE_CODE (var) == RESULT_DECL
2865 || TREE_CODE (var) == PARM_DECL)
2866 && DECL_BY_REFERENCE (var)))
a68ab351
JJ
2867 {
2868 x = build_sender_ref (ovar, ctx);
726a989a 2869 gimplify_assign (var, x, olist);
a68ab351 2870 }
953ff289
DN
2871 }
2872 }
2873}
2874
726a989a
RB
2875
2876/* A convenience function to build an empty GIMPLE_COND with just the
2877 condition. */
2878
2879static gimple
2880gimple_build_cond_empty (tree cond)
2881{
2882 enum tree_code pred_code;
2883 tree lhs, rhs;
2884
2885 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2886 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2887}
2888
2889
953ff289 2890/* Build the function calls to GOMP_parallel_start etc to actually
50674e96
DN
2891 generate the parallel operation. REGION is the parallel region
2892 being expanded. BB is the block where to insert the code. WS_ARGS
2893 will be set if this is a call to a combined parallel+workshare
2894 construct, it contains the list of additional arguments needed by
2895 the workshare construct. */
953ff289
DN
2896
2897static void
777f7f9a 2898expand_parallel_call (struct omp_region *region, basic_block bb,
726a989a 2899 gimple entry_stmt, tree ws_args)
953ff289 2900{
917948d3 2901 tree t, t1, t2, val, cond, c, clauses;
726a989a
RB
2902 gimple_stmt_iterator gsi;
2903 gimple stmt;
50674e96 2904 int start_ix;
db3927fb 2905 location_t clause_loc;
50674e96 2906
726a989a 2907 clauses = gimple_omp_parallel_clauses (entry_stmt);
50674e96 2908
c0220ea4 2909 /* Determine what flavor of GOMP_parallel_start we will be
50674e96
DN
2910 emitting. */
2911 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2912 if (is_combined_parallel (region))
2913 {
777f7f9a 2914 switch (region->inner->type)
50674e96 2915 {
726a989a 2916 case GIMPLE_OMP_FOR:
a68ab351 2917 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
21a66e91 2918 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
a68ab351
JJ
2919 + (region->inner->sched_kind
2920 == OMP_CLAUSE_SCHEDULE_RUNTIME
2921 ? 3 : region->inner->sched_kind);
777f7f9a 2922 break;
726a989a 2923 case GIMPLE_OMP_SECTIONS:
777f7f9a
RH
2924 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2925 break;
2926 default:
2927 gcc_unreachable ();
50674e96 2928 }
50674e96 2929 }
953ff289
DN
2930
2931 /* By default, the value of NUM_THREADS is zero (selected at run time)
2932 and there is no conditional. */
2933 cond = NULL_TREE;
2934 val = build_int_cst (unsigned_type_node, 0);
2935
2936 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2937 if (c)
2938 cond = OMP_CLAUSE_IF_EXPR (c);
2939
2940 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2941 if (c)
db3927fb
AH
2942 {
2943 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2944 clause_loc = OMP_CLAUSE_LOCATION (c);
2945 }
2946 else
2947 clause_loc = gimple_location (entry_stmt);
953ff289
DN
2948
2949 /* Ensure 'val' is of the correct type. */
db3927fb 2950 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
953ff289
DN
2951
2952 /* If we found the clause 'if (cond)', build either
2953 (cond != 0) or (cond ? val : 1u). */
2954 if (cond)
2955 {
726a989a 2956 gimple_stmt_iterator gsi;
50674e96
DN
2957
2958 cond = gimple_boolify (cond);
2959
953ff289 2960 if (integer_zerop (val))
db3927fb
AH
2961 val = fold_build2_loc (clause_loc,
2962 EQ_EXPR, unsigned_type_node, cond,
917948d3 2963 build_int_cst (TREE_TYPE (cond), 0));
953ff289 2964 else
50674e96
DN
2965 {
2966 basic_block cond_bb, then_bb, else_bb;
917948d3 2967 edge e, e_then, e_else;
726a989a 2968 tree tmp_then, tmp_else, tmp_join, tmp_var;
917948d3
ZD
2969
2970 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2971 if (gimple_in_ssa_p (cfun))
2972 {
726a989a
RB
2973 tmp_then = make_ssa_name (tmp_var, NULL);
2974 tmp_else = make_ssa_name (tmp_var, NULL);
2975 tmp_join = make_ssa_name (tmp_var, NULL);
917948d3
ZD
2976 }
2977 else
2978 {
2979 tmp_then = tmp_var;
2980 tmp_else = tmp_var;
2981 tmp_join = tmp_var;
2982 }
50674e96 2983
50674e96
DN
2984 e = split_block (bb, NULL);
2985 cond_bb = e->src;
2986 bb = e->dest;
2987 remove_edge (e);
2988
2989 then_bb = create_empty_bb (cond_bb);
2990 else_bb = create_empty_bb (then_bb);
917948d3
ZD
2991 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2992 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
50674e96 2993
726a989a
RB
2994 stmt = gimple_build_cond_empty (cond);
2995 gsi = gsi_start_bb (cond_bb);
2996 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
50674e96 2997
726a989a
RB
2998 gsi = gsi_start_bb (then_bb);
2999 stmt = gimple_build_assign (tmp_then, val);
3000 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
50674e96 3001
726a989a
RB
3002 gsi = gsi_start_bb (else_bb);
3003 stmt = gimple_build_assign
3004 (tmp_else, build_int_cst (unsigned_type_node, 1));
3005 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
50674e96
DN
3006
3007 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3008 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
917948d3
ZD
3009 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3010 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
50674e96 3011
917948d3
ZD
3012 if (gimple_in_ssa_p (cfun))
3013 {
726a989a 3014 gimple phi = create_phi_node (tmp_join, bb);
917948d3 3015 SSA_NAME_DEF_STMT (tmp_join) = phi;
f5045c96
AM
3016 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3017 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
917948d3
ZD
3018 }
3019
3020 val = tmp_join;
50674e96
DN
3021 }
3022
726a989a
RB
3023 gsi = gsi_start_bb (bb);
3024 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3025 false, GSI_CONTINUE_LINKING);
953ff289
DN
3026 }
3027
726a989a
RB
3028 gsi = gsi_last_bb (bb);
3029 t = gimple_omp_parallel_data_arg (entry_stmt);
953ff289 3030 if (t == NULL)
5039610b 3031 t1 = null_pointer_node;
953ff289 3032 else
5039610b 3033 t1 = build_fold_addr_expr (t);
726a989a 3034 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
50674e96
DN
3035
3036 if (ws_args)
5039610b
SL
3037 {
3038 tree args = tree_cons (NULL, t2,
3039 tree_cons (NULL, t1,
3040 tree_cons (NULL, val, ws_args)));
db3927fb
AH
3041 t = build_function_call_expr (UNKNOWN_LOCATION,
3042 built_in_decls[start_ix], args);
5039610b
SL
3043 }
3044 else
3045 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
50674e96 3046
726a989a
RB
3047 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3048 false, GSI_CONTINUE_LINKING);
953ff289 3049
726a989a 3050 t = gimple_omp_parallel_data_arg (entry_stmt);
953ff289
DN
3051 if (t == NULL)
3052 t = null_pointer_node;
3053 else
3054 t = build_fold_addr_expr (t);
db3927fb
AH
3055 t = build_call_expr_loc (gimple_location (entry_stmt),
3056 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
726a989a
RB
3057 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3058 false, GSI_CONTINUE_LINKING);
953ff289 3059
db3927fb
AH
3060 t = build_call_expr_loc (gimple_location (entry_stmt),
3061 built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
726a989a
RB
3062 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3063 false, GSI_CONTINUE_LINKING);
953ff289
DN
3064}
3065
50674e96 3066
a68ab351
JJ
3067/* Build the function call to GOMP_task to actually
3068 generate the task operation. BB is the block where to insert the code. */
3069
3070static void
726a989a 3071expand_task_call (basic_block bb, gimple entry_stmt)
a68ab351
JJ
3072{
3073 tree t, t1, t2, t3, flags, cond, c, clauses;
726a989a 3074 gimple_stmt_iterator gsi;
db3927fb 3075 location_t loc = gimple_location (entry_stmt);
a68ab351 3076
726a989a 3077 clauses = gimple_omp_task_clauses (entry_stmt);
a68ab351 3078
a68ab351
JJ
3079 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3080 if (c)
3081 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3082 else
3083 cond = boolean_true_node;
3084
3085 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3086 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3087
726a989a
RB
3088 gsi = gsi_last_bb (bb);
3089 t = gimple_omp_task_data_arg (entry_stmt);
a68ab351
JJ
3090 if (t == NULL)
3091 t2 = null_pointer_node;
3092 else
db3927fb
AH
3093 t2 = build_fold_addr_expr_loc (loc, t);
3094 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
726a989a 3095 t = gimple_omp_task_copy_fn (entry_stmt);
a68ab351
JJ
3096 if (t == NULL)
3097 t3 = null_pointer_node;
3098 else
db3927fb 3099 t3 = build_fold_addr_expr_loc (loc, t);
a68ab351
JJ
3100
3101 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
726a989a
RB
3102 gimple_omp_task_arg_size (entry_stmt),
3103 gimple_omp_task_arg_align (entry_stmt), cond, flags);
a68ab351 3104
726a989a
RB
3105 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3106 false, GSI_CONTINUE_LINKING);
a68ab351
JJ
3107}
3108
3109
726a989a
RB
3110/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3111 catch handler and return it. This prevents programs from violating the
3112 structured block semantics with throws. */
953ff289 3113
726a989a
RB
3114static gimple_seq
3115maybe_catch_exception (gimple_seq body)
953ff289 3116{
1d65f45c
RH
3117 gimple g;
3118 tree decl;
953ff289
DN
3119
3120 if (!flag_exceptions)
726a989a 3121 return body;
953ff289
DN
3122
3123 if (lang_protect_cleanup_actions)
1d65f45c 3124 decl = lang_protect_cleanup_actions ();
953ff289 3125 else
1d65f45c 3126 decl = built_in_decls[BUILT_IN_TRAP];
726a989a 3127
1d65f45c
RH
3128 g = gimple_build_eh_must_not_throw (decl);
3129 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
726a989a 3130 GIMPLE_TRY_CATCH);
953ff289 3131
1d65f45c 3132 return gimple_seq_alloc_with_stmt (g);
953ff289
DN
3133}
3134
50674e96 3135/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
953ff289 3136
50674e96
DN
3137static tree
3138list2chain (tree list)
953ff289 3139{
50674e96 3140 tree t;
953ff289 3141
50674e96
DN
3142 for (t = list; t; t = TREE_CHAIN (t))
3143 {
3144 tree var = TREE_VALUE (t);
3145 if (TREE_CHAIN (t))
3146 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3147 else
3148 TREE_CHAIN (var) = NULL_TREE;
3149 }
953ff289 3150
50674e96
DN
3151 return list ? TREE_VALUE (list) : NULL_TREE;
3152}
953ff289 3153
953ff289 3154
50674e96 3155/* Remove barriers in REGION->EXIT's block. Note that this is only
726a989a
RB
3156 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3157 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3158 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
50674e96 3159 removed. */
953ff289 3160
50674e96
DN
3161static void
3162remove_exit_barrier (struct omp_region *region)
3163{
726a989a 3164 gimple_stmt_iterator gsi;
50674e96 3165 basic_block exit_bb;
777f7f9a
RH
3166 edge_iterator ei;
3167 edge e;
726a989a 3168 gimple stmt;
03742a9b 3169 int any_addressable_vars = -1;
953ff289 3170
777f7f9a 3171 exit_bb = region->exit;
953ff289 3172
2aee3e57
JJ
3173 /* If the parallel region doesn't return, we don't have REGION->EXIT
3174 block at all. */
3175 if (! exit_bb)
3176 return;
3177
726a989a
RB
3178 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3179 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
777f7f9a
RH
3180 statements that can appear in between are extremely limited -- no
3181 memory operations at all. Here, we allow nothing at all, so the
726a989a
RB
3182 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3183 gsi = gsi_last_bb (exit_bb);
3184 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3185 gsi_prev (&gsi);
3186 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
50674e96 3187 return;
953ff289 3188
777f7f9a
RH
3189 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3190 {
726a989a
RB
3191 gsi = gsi_last_bb (e->src);
3192 if (gsi_end_p (gsi))
777f7f9a 3193 continue;
726a989a 3194 stmt = gsi_stmt (gsi);
03742a9b
JJ
3195 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3196 && !gimple_omp_return_nowait_p (stmt))
3197 {
3198 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3199 in many cases. If there could be tasks queued, the barrier
3200 might be needed to let the tasks run before some local
3201 variable of the parallel that the task uses as shared
3202 runs out of scope. The task can be spawned either
3203 from within current function (this would be easy to check)
3204 or from some function it calls and gets passed an address
3205 of such a variable. */
3206 if (any_addressable_vars < 0)
3207 {
3208 gimple parallel_stmt = last_stmt (region->entry);
3209 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3210 tree local_decls = DECL_STRUCT_FUNCTION (child_fun)->local_decls;
3211 tree block;
3212
3213 any_addressable_vars = 0;
3214 for (; local_decls; local_decls = TREE_CHAIN (local_decls))
3215 if (TREE_ADDRESSABLE (TREE_VALUE (local_decls)))
3216 {
3217 any_addressable_vars = 1;
3218 break;
3219 }
3220 for (block = gimple_block (stmt);
3221 !any_addressable_vars
3222 && block
3223 && TREE_CODE (block) == BLOCK;
3224 block = BLOCK_SUPERCONTEXT (block))
3225 {
3226 for (local_decls = BLOCK_VARS (block);
3227 local_decls;
3228 local_decls = TREE_CHAIN (local_decls))
3229 if (TREE_ADDRESSABLE (local_decls))
3230 {
3231 any_addressable_vars = 1;
3232 break;
3233 }
3234 if (block == gimple_block (parallel_stmt))
3235 break;
3236 }
3237 }
3238 if (!any_addressable_vars)
3239 gimple_omp_return_set_nowait (stmt);
3240 }
777f7f9a 3241 }
953ff289
DN
3242}
3243
777f7f9a
RH
3244static void
3245remove_exit_barriers (struct omp_region *region)
3246{
726a989a 3247 if (region->type == GIMPLE_OMP_PARALLEL)
777f7f9a
RH
3248 remove_exit_barrier (region);
3249
3250 if (region->inner)
3251 {
3252 region = region->inner;
3253 remove_exit_barriers (region);
3254 while (region->next)
3255 {
3256 region = region->next;
3257 remove_exit_barriers (region);
3258 }
3259 }
3260}
50674e96 3261
2b4cf991
JJ
3262/* Optimize omp_get_thread_num () and omp_get_num_threads ()
3263 calls. These can't be declared as const functions, but
3264 within one parallel body they are constant, so they can be
3265 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
a68ab351
JJ
3266 which are declared const. Similarly for task body, except
3267 that in untied task omp_get_thread_num () can change at any task
3268 scheduling point. */
2b4cf991
JJ
3269
3270static void
726a989a 3271optimize_omp_library_calls (gimple entry_stmt)
2b4cf991
JJ
3272{
3273 basic_block bb;
726a989a 3274 gimple_stmt_iterator gsi;
2b4cf991
JJ
3275 tree thr_num_id
3276 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3277 tree num_thr_id
3278 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
726a989a
RB
3279 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3280 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
a68ab351 3281 OMP_CLAUSE_UNTIED) != NULL);
2b4cf991
JJ
3282
3283 FOR_EACH_BB (bb)
726a989a 3284 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2b4cf991 3285 {
726a989a 3286 gimple call = gsi_stmt (gsi);
2b4cf991
JJ
3287 tree decl;
3288
726a989a
RB
3289 if (is_gimple_call (call)
3290 && (decl = gimple_call_fndecl (call))
2b4cf991
JJ
3291 && DECL_EXTERNAL (decl)
3292 && TREE_PUBLIC (decl)
3293 && DECL_INITIAL (decl) == NULL)
3294 {
3295 tree built_in;
3296
3297 if (DECL_NAME (decl) == thr_num_id)
a68ab351
JJ
3298 {
3299 /* In #pragma omp task untied omp_get_thread_num () can change
3300 during the execution of the task region. */
3301 if (untied_task)
3302 continue;
3303 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3304 }
2b4cf991
JJ
3305 else if (DECL_NAME (decl) == num_thr_id)
3306 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3307 else
3308 continue;
3309
3310 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
726a989a 3311 || gimple_call_num_args (call) != 0)
2b4cf991
JJ
3312 continue;
3313
3314 if (flag_exceptions && !TREE_NOTHROW (decl))
3315 continue;
3316
3317 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
9600efe1
MM
3318 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3319 TREE_TYPE (TREE_TYPE (built_in))))
2b4cf991
JJ
3320 continue;
3321
7c9577be 3322 gimple_call_set_fndecl (call, built_in);
2b4cf991
JJ
3323 }
3324 }
3325}
3326
a68ab351 3327/* Expand the OpenMP parallel or task directive starting at REGION. */
953ff289
DN
3328
3329static void
a68ab351 3330expand_omp_taskreg (struct omp_region *region)
953ff289 3331{
50674e96 3332 basic_block entry_bb, exit_bb, new_bb;
db2960f4 3333 struct function *child_cfun;
b357f682 3334 tree child_fn, block, t, ws_args, *tp;
133f9369 3335 tree save_current;
726a989a
RB
3336 gimple_stmt_iterator gsi;
3337 gimple entry_stmt, stmt;
50674e96
DN
3338 edge e;
3339
777f7f9a 3340 entry_stmt = last_stmt (region->entry);
726a989a 3341 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
50674e96 3342 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
135a171d
JJ
3343 /* If this function has been already instrumented, make sure
3344 the child function isn't instrumented again. */
3345 child_cfun->after_tree_profile = cfun->after_tree_profile;
50674e96 3346
777f7f9a
RH
3347 entry_bb = region->entry;
3348 exit_bb = region->exit;
50674e96 3349
50674e96 3350 if (is_combined_parallel (region))
777f7f9a 3351 ws_args = region->ws_args;
50674e96
DN
3352 else
3353 ws_args = NULL_TREE;
953ff289 3354
777f7f9a 3355 if (child_cfun->cfg)
953ff289 3356 {
50674e96
DN
3357 /* Due to inlining, it may happen that we have already outlined
3358 the region, in which case all we need to do is make the
3359 sub-graph unreachable and emit the parallel call. */
3360 edge entry_succ_e, exit_succ_e;
726a989a 3361 gimple_stmt_iterator gsi;
50674e96
DN
3362
3363 entry_succ_e = single_succ_edge (entry_bb);
50674e96 3364
726a989a
RB
3365 gsi = gsi_last_bb (entry_bb);
3366 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3367 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3368 gsi_remove (&gsi, true);
50674e96
DN
3369
3370 new_bb = entry_bb;
d3c673c7
JJ
3371 if (exit_bb)
3372 {
3373 exit_succ_e = single_succ_edge (exit_bb);
3374 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3375 }
917948d3 3376 remove_edge_and_dominated_blocks (entry_succ_e);
953ff289 3377 }
50674e96
DN
3378 else
3379 {
3380 /* If the parallel region needs data sent from the parent
b570947c
JJ
3381 function, then the very first statement (except possible
3382 tree profile counter updates) of the parallel body
50674e96
DN
3383 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3384 &.OMP_DATA_O is passed as an argument to the child function,
3385 we need to replace it with the argument as seen by the child
3386 function.
3387
3388 In most cases, this will end up being the identity assignment
3389 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3390 a function call that has been inlined, the original PARM_DECL
3391 .OMP_DATA_I may have been converted into a different local
3392 variable. In which case, we need to keep the assignment. */
726a989a 3393 if (gimple_omp_taskreg_data_arg (entry_stmt))
50674e96
DN
3394 {
3395 basic_block entry_succ_bb = single_succ (entry_bb);
726a989a
RB
3396 gimple_stmt_iterator gsi;
3397 tree arg, narg;
3398 gimple parcopy_stmt = NULL;
953ff289 3399
726a989a 3400 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
b570947c 3401 {
726a989a 3402 gimple stmt;
b570947c 3403
726a989a
RB
3404 gcc_assert (!gsi_end_p (gsi));
3405 stmt = gsi_stmt (gsi);
3406 if (gimple_code (stmt) != GIMPLE_ASSIGN)
018b899b
JJ
3407 continue;
3408
726a989a 3409 if (gimple_num_ops (stmt) == 2)
b570947c 3410 {
726a989a
RB
3411 tree arg = gimple_assign_rhs1 (stmt);
3412
3413 /* We're ignore the subcode because we're
3414 effectively doing a STRIP_NOPS. */
3415
3416 if (TREE_CODE (arg) == ADDR_EXPR
3417 && TREE_OPERAND (arg, 0)
3418 == gimple_omp_taskreg_data_arg (entry_stmt))
3419 {
3420 parcopy_stmt = stmt;
3421 break;
3422 }
b570947c
JJ
3423 }
3424 }
917948d3 3425
726a989a 3426 gcc_assert (parcopy_stmt != NULL);
917948d3
ZD
3427 arg = DECL_ARGUMENTS (child_fn);
3428
3429 if (!gimple_in_ssa_p (cfun))
3430 {
726a989a
RB
3431 if (gimple_assign_lhs (parcopy_stmt) == arg)
3432 gsi_remove (&gsi, true);
917948d3 3433 else
726a989a
RB
3434 {
3435 /* ?? Is setting the subcode really necessary ?? */
3436 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3437 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3438 }
917948d3
ZD
3439 }
3440 else
3441 {
3442 /* If we are in ssa form, we must load the value from the default
3443 definition of the argument. That should not be defined now,
3444 since the argument is not used uninitialized. */
3445 gcc_assert (gimple_default_def (cfun, arg) == NULL);
726a989a 3446 narg = make_ssa_name (arg, gimple_build_nop ());
917948d3 3447 set_default_def (arg, narg);
726a989a
RB
3448 /* ?? Is setting the subcode really necessary ?? */
3449 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3450 gimple_assign_set_rhs1 (parcopy_stmt, narg);
917948d3
ZD
3451 update_stmt (parcopy_stmt);
3452 }
50674e96
DN
3453 }
3454
3455 /* Declare local variables needed in CHILD_CFUN. */
3456 block = DECL_INITIAL (child_fn);
cb91fab0 3457 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
4f0ae266
JJ
3458 /* The gimplifier could record temporaries in parallel/task block
3459 rather than in containing function's local_decls chain,
3460 which would mean cgraph missed finalizing them. Do it now. */
3461 for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
3462 if (TREE_CODE (t) == VAR_DECL
3463 && TREE_STATIC (t)
3464 && !DECL_EXTERNAL (t))
3465 varpool_finalize_decl (t);
726a989a
RB
3466 DECL_SAVED_TREE (child_fn) = NULL;
3467 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
b357f682 3468 TREE_USED (block) = 1;
50674e96 3469
917948d3 3470 /* Reset DECL_CONTEXT on function arguments. */
50674e96
DN
3471 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3472 DECL_CONTEXT (t) = child_fn;
3473
726a989a
RB
3474 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3475 so that it can be moved to the child function. */
3476 gsi = gsi_last_bb (entry_bb);
3477 stmt = gsi_stmt (gsi);
3478 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3479 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3480 gsi_remove (&gsi, true);
3481 e = split_block (entry_bb, stmt);
50674e96
DN
3482 entry_bb = e->dest;
3483 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3484
726a989a 3485 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
2aee3e57
JJ
3486 if (exit_bb)
3487 {
726a989a
RB
3488 gsi = gsi_last_bb (exit_bb);
3489 gcc_assert (!gsi_end_p (gsi)
3490 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3491 stmt = gimple_build_return (NULL);
3492 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3493 gsi_remove (&gsi, true);
2aee3e57 3494 }
917948d3
ZD
3495
3496 /* Move the parallel region into CHILD_CFUN. */
3497
3498 if (gimple_in_ssa_p (cfun))
3499 {
3500 push_cfun (child_cfun);
5db9ba0c 3501 init_tree_ssa (child_cfun);
917948d3
ZD
3502 init_ssa_operands ();
3503 cfun->gimple_df->in_ssa_p = true;
3504 pop_cfun ();
b357f682 3505 block = NULL_TREE;
917948d3 3506 }
b357f682 3507 else
726a989a 3508 block = gimple_block (entry_stmt);
b357f682
JJ
3509
3510 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
917948d3
ZD
3511 if (exit_bb)
3512 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3513
b357f682
JJ
3514 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3515 for (tp = &child_cfun->local_decls; *tp; )
3516 if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3517 tp = &TREE_CHAIN (*tp);
3518 else
3519 *tp = TREE_CHAIN (*tp);
3520
917948d3
ZD
3521 /* Inform the callgraph about the new function. */
3522 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3523 = cfun->curr_properties;
3524 cgraph_add_new_function (child_fn, true);
3525
3526 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3527 fixed in a following pass. */
3528 push_cfun (child_cfun);
133f9369
MJ
3529 save_current = current_function_decl;
3530 current_function_decl = child_fn;
2b4cf991 3531 if (optimize)
a68ab351 3532 optimize_omp_library_calls (entry_stmt);
917948d3 3533 rebuild_cgraph_edges ();
99819c63
JJ
3534
3535 /* Some EH regions might become dead, see PR34608. If
3536 pass_cleanup_cfg isn't the first pass to happen with the
3537 new child, these dead EH edges might cause problems.
3538 Clean them up now. */
3539 if (flag_exceptions)
3540 {
3541 basic_block bb;
99819c63
JJ
3542 bool changed = false;
3543
99819c63 3544 FOR_EACH_BB (bb)
726a989a 3545 changed |= gimple_purge_dead_eh_edges (bb);
99819c63
JJ
3546 if (changed)
3547 cleanup_tree_cfg ();
99819c63 3548 }
5006671f
RG
3549 if (gimple_in_ssa_p (cfun))
3550 update_ssa (TODO_update_ssa);
133f9369 3551 current_function_decl = save_current;
917948d3 3552 pop_cfun ();
50674e96 3553 }
a509ebb5 3554
50674e96 3555 /* Emit a library call to launch the children threads. */
726a989a 3556 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
a68ab351
JJ
3557 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3558 else
3559 expand_task_call (new_bb, entry_stmt);
5f40b3cb 3560 update_ssa (TODO_update_ssa_only_virtuals);
953ff289
DN
3561}
3562
50674e96
DN
3563
3564/* A subroutine of expand_omp_for. Generate code for a parallel
953ff289
DN
3565 loop with any schedule. Given parameters:
3566
3567 for (V = N1; V cond N2; V += STEP) BODY;
3568
3569 where COND is "<" or ">", we generate pseudocode
3570
3571 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
50674e96 3572 if (more) goto L0; else goto L3;
953ff289
DN
3573 L0:
3574 V = istart0;
3575 iend = iend0;
3576 L1:
3577 BODY;
3578 V += STEP;
50674e96 3579 if (V cond iend) goto L1; else goto L2;
953ff289 3580 L2:
50674e96
DN
3581 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3582 L3:
953ff289 3583
50674e96 3584 If this is a combined omp parallel loop, instead of the call to
a68ab351
JJ
3585 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3586
3587 For collapsed loops, given parameters:
3588 collapse(3)
3589 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3590 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3591 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3592 BODY;
3593
3594 we generate pseudocode
3595
3596 if (cond3 is <)
3597 adj = STEP3 - 1;
3598 else
3599 adj = STEP3 + 1;
3600 count3 = (adj + N32 - N31) / STEP3;
3601 if (cond2 is <)
3602 adj = STEP2 - 1;
3603 else
3604 adj = STEP2 + 1;
3605 count2 = (adj + N22 - N21) / STEP2;
3606 if (cond1 is <)
3607 adj = STEP1 - 1;
3608 else
3609 adj = STEP1 + 1;
3610 count1 = (adj + N12 - N11) / STEP1;
3611 count = count1 * count2 * count3;
3612 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3613 if (more) goto L0; else goto L3;
3614 L0:
3615 V = istart0;
3616 T = V;
3617 V3 = N31 + (T % count3) * STEP3;
3618 T = T / count3;
3619 V2 = N21 + (T % count2) * STEP2;
3620 T = T / count2;
3621 V1 = N11 + T * STEP1;
3622 iend = iend0;
3623 L1:
3624 BODY;
3625 V += 1;
3626 if (V < iend) goto L10; else goto L2;
3627 L10:
3628 V3 += STEP3;
3629 if (V3 cond3 N32) goto L1; else goto L11;
3630 L11:
3631 V3 = N31;
3632 V2 += STEP2;
3633 if (V2 cond2 N22) goto L1; else goto L12;
3634 L12:
3635 V2 = N21;
3636 V1 += STEP1;
3637 goto L1;
3638 L2:
3639 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3640 L3:
3641
3642 */
953ff289 3643
777f7f9a 3644static void
50674e96
DN
3645expand_omp_for_generic (struct omp_region *region,
3646 struct omp_for_data *fd,
953ff289
DN
3647 enum built_in_function start_fn,
3648 enum built_in_function next_fn)
3649{
726a989a 3650 tree type, istart0, iend0, iend;
a68ab351
JJ
3651 tree t, vmain, vback, bias = NULL_TREE;
3652 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
d3c673c7 3653 basic_block l2_bb = NULL, l3_bb = NULL;
726a989a
RB
3654 gimple_stmt_iterator gsi;
3655 gimple stmt;
50674e96 3656 bool in_combined_parallel = is_combined_parallel (region);
e5c95afe 3657 bool broken_loop = region->cont == NULL;
917948d3 3658 edge e, ne;
a68ab351
JJ
3659 tree *counts = NULL;
3660 int i;
e5c95afe
ZD
3661
3662 gcc_assert (!broken_loop || !in_combined_parallel);
a68ab351
JJ
3663 gcc_assert (fd->iter_type == long_integer_type_node
3664 || !in_combined_parallel);
953ff289 3665
a68ab351
JJ
3666 type = TREE_TYPE (fd->loop.v);
3667 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3668 iend0 = create_tmp_var (fd->iter_type, ".iend0");
5b4fc8fb
JJ
3669 TREE_ADDRESSABLE (istart0) = 1;
3670 TREE_ADDRESSABLE (iend0) = 1;
917948d3
ZD
3671 if (gimple_in_ssa_p (cfun))
3672 {
3673 add_referenced_var (istart0);
3674 add_referenced_var (iend0);
3675 }
953ff289 3676
a68ab351
JJ
3677 /* See if we need to bias by LLONG_MIN. */
3678 if (fd->iter_type == long_long_unsigned_type_node
3679 && TREE_CODE (type) == INTEGER_TYPE
3680 && !TYPE_UNSIGNED (type))
3681 {
3682 tree n1, n2;
3683
3684 if (fd->loop.cond_code == LT_EXPR)
3685 {
3686 n1 = fd->loop.n1;
3687 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3688 }
3689 else
3690 {
3691 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3692 n2 = fd->loop.n1;
3693 }
3694 if (TREE_CODE (n1) != INTEGER_CST
3695 || TREE_CODE (n2) != INTEGER_CST
3696 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3697 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3698 }
3699
777f7f9a 3700 entry_bb = region->entry;
d3c673c7 3701 cont_bb = region->cont;
a68ab351 3702 collapse_bb = NULL;
e5c95afe
ZD
3703 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3704 gcc_assert (broken_loop
3705 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3706 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3707 l1_bb = single_succ (l0_bb);
3708 if (!broken_loop)
d3c673c7
JJ
3709 {
3710 l2_bb = create_empty_bb (cont_bb);
e5c95afe
ZD
3711 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3712 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
d3c673c7 3713 }
e5c95afe
ZD
3714 else
3715 l2_bb = NULL;
3716 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3717 exit_bb = region->exit;
50674e96 3718
726a989a 3719 gsi = gsi_last_bb (entry_bb);
a68ab351 3720
726a989a 3721 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
a68ab351
JJ
3722 if (fd->collapse > 1)
3723 {
3724 /* collapsed loops need work for expansion in SSA form. */
3725 gcc_assert (!gimple_in_ssa_p (cfun));
3726 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3727 for (i = 0; i < fd->collapse; i++)
3728 {
3729 tree itype = TREE_TYPE (fd->loops[i].v);
3730
3731 if (POINTER_TYPE_P (itype))
3732 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3733 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3734 ? -1 : 1));
3735 t = fold_build2 (PLUS_EXPR, itype,
3736 fold_convert (itype, fd->loops[i].step), t);
3737 t = fold_build2 (PLUS_EXPR, itype, t,
3738 fold_convert (itype, fd->loops[i].n2));
3739 t = fold_build2 (MINUS_EXPR, itype, t,
3740 fold_convert (itype, fd->loops[i].n1));
3741 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3742 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3743 fold_build1 (NEGATE_EXPR, itype, t),
3744 fold_build1 (NEGATE_EXPR, itype,
3745 fold_convert (itype,
3746 fd->loops[i].step)));
3747 else
3748 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3749 fold_convert (itype, fd->loops[i].step));
3750 t = fold_convert (type, t);
3751 if (TREE_CODE (t) == INTEGER_CST)
3752 counts[i] = t;
3753 else
3754 {
3755 counts[i] = create_tmp_var (type, ".count");
726a989a
RB
3756 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3757 true, GSI_SAME_STMT);
3758 stmt = gimple_build_assign (counts[i], t);
3759 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
a68ab351
JJ
3760 }
3761 if (SSA_VAR_P (fd->loop.n2))
3762 {
3763 if (i == 0)
726a989a 3764 t = counts[0];
a68ab351
JJ
3765 else
3766 {
3767 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
726a989a
RB
3768 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3769 true, GSI_SAME_STMT);
a68ab351 3770 }
726a989a
RB
3771 stmt = gimple_build_assign (fd->loop.n2, t);
3772 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
a68ab351
JJ
3773 }
3774 }
3775 }
917948d3
ZD
3776 if (in_combined_parallel)
3777 {
3778 /* In a combined parallel loop, emit a call to
3779 GOMP_loop_foo_next. */
3780 t = build_call_expr (built_in_decls[next_fn], 2,
3781 build_fold_addr_expr (istart0),
3782 build_fold_addr_expr (iend0));
3783 }
3784 else
953ff289 3785 {
5039610b 3786 tree t0, t1, t2, t3, t4;
50674e96
DN
3787 /* If this is not a combined parallel loop, emit a call to
3788 GOMP_loop_foo_start in ENTRY_BB. */
5039610b
SL
3789 t4 = build_fold_addr_expr (iend0);
3790 t3 = build_fold_addr_expr (istart0);
a68ab351 3791 t2 = fold_convert (fd->iter_type, fd->loop.step);
c6ff4493
SE
3792 if (POINTER_TYPE_P (type)
3793 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3794 {
3795 /* Avoid casting pointers to integer of a different size. */
3796 tree itype
3797 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3798 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3799 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3800 }
3801 else
3802 {
3803 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3804 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3805 }
a68ab351 3806 if (bias)
953ff289 3807 {
a68ab351
JJ
3808 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3809 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3810 }
3811 if (fd->iter_type == long_integer_type_node)
3812 {
3813 if (fd->chunk_size)
3814 {
3815 t = fold_convert (fd->iter_type, fd->chunk_size);
3816 t = build_call_expr (built_in_decls[start_fn], 6,
3817 t0, t1, t2, t, t3, t4);
3818 }
3819 else
3820 t = build_call_expr (built_in_decls[start_fn], 5,
3821 t0, t1, t2, t3, t4);
953ff289 3822 }
5039610b 3823 else
a68ab351
JJ
3824 {
3825 tree t5;
3826 tree c_bool_type;
3827
3828 /* The GOMP_loop_ull_*start functions have additional boolean
3829 argument, true for < loops and false for > loops.
3830 In Fortran, the C bool type can be different from
3831 boolean_type_node. */
3832 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3833 t5 = build_int_cst (c_bool_type,
3834 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3835 if (fd->chunk_size)
3836 {
3837 t = fold_convert (fd->iter_type, fd->chunk_size);
3838 t = build_call_expr (built_in_decls[start_fn], 7,
3839 t5, t0, t1, t2, t, t3, t4);
3840 }
3841 else
3842 t = build_call_expr (built_in_decls[start_fn], 6,
3843 t5, t0, t1, t2, t3, t4);
3844 }
953ff289 3845 }
a68ab351
JJ
3846 if (TREE_TYPE (t) != boolean_type_node)
3847 t = fold_build2 (NE_EXPR, boolean_type_node,
3848 t, build_int_cst (TREE_TYPE (t), 0));
726a989a
RB
3849 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3850 true, GSI_SAME_STMT);
3851 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
917948d3 3852
726a989a
RB
3853 /* Remove the GIMPLE_OMP_FOR statement. */
3854 gsi_remove (&gsi, true);
953ff289 3855
50674e96 3856 /* Iteration setup for sequential loop goes in L0_BB. */
726a989a 3857 gsi = gsi_start_bb (l0_bb);
550918ca 3858 t = istart0;
a68ab351 3859 if (bias)
550918ca
RG
3860 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3861 if (POINTER_TYPE_P (type))
3862 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3863 0), t);
3864 t = fold_convert (type, t);
726a989a
RB
3865 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3866 false, GSI_CONTINUE_LINKING);
3867 stmt = gimple_build_assign (fd->loop.v, t);
3868 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
953ff289 3869
550918ca 3870 t = iend0;
a68ab351 3871 if (bias)
550918ca
RG
3872 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3873 if (POINTER_TYPE_P (type))
3874 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3875 0), t);
3876 t = fold_convert (type, t);
726a989a
RB
3877 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3878 false, GSI_CONTINUE_LINKING);
a68ab351
JJ
3879 if (fd->collapse > 1)
3880 {
3881 tree tem = create_tmp_var (type, ".tem");
3882
726a989a
RB
3883 stmt = gimple_build_assign (tem, fd->loop.v);
3884 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
3885 for (i = fd->collapse - 1; i >= 0; i--)
3886 {
3887 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3888 itype = vtype;
3889 if (POINTER_TYPE_P (vtype))
3890 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3891 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3892 t = fold_convert (itype, t);
ada39f0b
RG
3893 t = fold_build2 (MULT_EXPR, itype, t,
3894 fold_convert (itype, fd->loops[i].step));
a68ab351
JJ
3895 if (POINTER_TYPE_P (vtype))
3896 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3897 fd->loops[i].n1, fold_convert (sizetype, t));
3898 else
3899 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
726a989a
RB
3900 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3901 false, GSI_CONTINUE_LINKING);
3902 stmt = gimple_build_assign (fd->loops[i].v, t);
3903 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
3904 if (i != 0)
3905 {
3906 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
726a989a
RB
3907 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3908 false, GSI_CONTINUE_LINKING);
3909 stmt = gimple_build_assign (tem, t);
3910 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
3911 }
3912 }
3913 }
50674e96 3914
e5c95afe 3915 if (!broken_loop)
d3c673c7 3916 {
e5c95afe
ZD
3917 /* Code to control the increment and predicate for the sequential
3918 loop goes in the CONT_BB. */
726a989a
RB
3919 gsi = gsi_last_bb (cont_bb);
3920 stmt = gsi_stmt (gsi);
3921 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3922 vmain = gimple_omp_continue_control_use (stmt);
3923 vback = gimple_omp_continue_control_def (stmt);
917948d3 3924
a68ab351
JJ
3925 if (POINTER_TYPE_P (type))
3926 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3927 fold_convert (sizetype, fd->loop.step));
3928 else
3929 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
726a989a
RB
3930 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3931 true, GSI_SAME_STMT);
3932 stmt = gimple_build_assign (vback, t);
3933 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3934
a68ab351 3935 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
726a989a
RB
3936 stmt = gimple_build_cond_empty (t);
3937 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
50674e96 3938
726a989a
RB
3939 /* Remove GIMPLE_OMP_CONTINUE. */
3940 gsi_remove (&gsi, true);
50674e96 3941
a68ab351
JJ
3942 if (fd->collapse > 1)
3943 {
3944 basic_block last_bb, bb;
3945
3946 last_bb = cont_bb;
3947 for (i = fd->collapse - 1; i >= 0; i--)
3948 {
3949 tree vtype = TREE_TYPE (fd->loops[i].v);
3950
3951 bb = create_empty_bb (last_bb);
726a989a 3952 gsi = gsi_start_bb (bb);
a68ab351
JJ
3953
3954 if (i < fd->collapse - 1)
3955 {
3956 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3957 e->probability = REG_BR_PROB_BASE / 8;
3958
726a989a
RB
3959 t = fd->loops[i + 1].n1;
3960 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3961 false, GSI_CONTINUE_LINKING);
3962 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3963 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
3964 }
3965 else
3966 collapse_bb = bb;
3967
3968 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3969
3970 if (POINTER_TYPE_P (vtype))
3971 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3972 fd->loops[i].v,
3973 fold_convert (sizetype, fd->loops[i].step));
3974 else
3975 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3976 fd->loops[i].step);
726a989a
RB
3977 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3978 false, GSI_CONTINUE_LINKING);
3979 stmt = gimple_build_assign (fd->loops[i].v, t);
3980 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
3981
3982 if (i > 0)
3983 {
726a989a
RB
3984 t = fd->loops[i].n2;
3985 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3986 false, GSI_CONTINUE_LINKING);
a68ab351 3987 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
726a989a
RB
3988 fd->loops[i].v, t);
3989 stmt = gimple_build_cond_empty (t);
3990 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
a68ab351
JJ
3991 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3992 e->probability = REG_BR_PROB_BASE * 7 / 8;
3993 }
3994 else
3995 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3996 last_bb = bb;
3997 }
3998 }
3999
e5c95afe 4000 /* Emit code to get the next parallel iteration in L2_BB. */
726a989a 4001 gsi = gsi_start_bb (l2_bb);
50674e96 4002
e5c95afe
ZD
4003 t = build_call_expr (built_in_decls[next_fn], 2,
4004 build_fold_addr_expr (istart0),
4005 build_fold_addr_expr (iend0));
726a989a
RB
4006 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4007 false, GSI_CONTINUE_LINKING);
a68ab351
JJ
4008 if (TREE_TYPE (t) != boolean_type_node)
4009 t = fold_build2 (NE_EXPR, boolean_type_node,
4010 t, build_int_cst (TREE_TYPE (t), 0));
726a989a
RB
4011 stmt = gimple_build_cond_empty (t);
4012 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
e5c95afe 4013 }
953ff289 4014
777f7f9a 4015 /* Add the loop cleanup function. */
726a989a
RB
4016 gsi = gsi_last_bb (exit_bb);
4017 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
777f7f9a
RH
4018 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
4019 else
4020 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
726a989a
RB
4021 stmt = gimple_build_call (t, 0);
4022 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4023 gsi_remove (&gsi, true);
50674e96
DN
4024
4025 /* Connect the new blocks. */
917948d3
ZD
4026 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4027 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
953ff289 4028
e5c95afe
ZD
4029 if (!broken_loop)
4030 {
726a989a
RB
4031 gimple_seq phis;
4032
917948d3
ZD
4033 e = find_edge (cont_bb, l3_bb);
4034 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4035
726a989a
RB
4036 phis = phi_nodes (l3_bb);
4037 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4038 {
4039 gimple phi = gsi_stmt (gsi);
4040 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4041 PHI_ARG_DEF_FROM_EDGE (phi, e));
4042 }
917948d3
ZD
4043 remove_edge (e);
4044
e5c95afe 4045 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
a68ab351
JJ
4046 if (fd->collapse > 1)
4047 {
4048 e = find_edge (cont_bb, l1_bb);
4049 remove_edge (e);
4050 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4051 }
4052 else
4053 {
4054 e = find_edge (cont_bb, l1_bb);
4055 e->flags = EDGE_TRUE_VALUE;
4056 }
4057 e->probability = REG_BR_PROB_BASE * 7 / 8;
4058 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
e5c95afe 4059 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
917948d3
ZD
4060
4061 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4062 recompute_dominator (CDI_DOMINATORS, l2_bb));
4063 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4064 recompute_dominator (CDI_DOMINATORS, l3_bb));
4065 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4066 recompute_dominator (CDI_DOMINATORS, l0_bb));
4067 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4068 recompute_dominator (CDI_DOMINATORS, l1_bb));
e5c95afe 4069 }
953ff289
DN
4070}
4071
4072
50674e96
DN
4073/* A subroutine of expand_omp_for. Generate code for a parallel
4074 loop with static schedule and no specified chunk size. Given
4075 parameters:
953ff289
DN
4076
4077 for (V = N1; V cond N2; V += STEP) BODY;
4078
4079 where COND is "<" or ">", we generate pseudocode
4080
4081 if (cond is <)
4082 adj = STEP - 1;
4083 else
4084 adj = STEP + 1;
a68ab351
JJ
4085 if ((__typeof (V)) -1 > 0 && cond is >)
4086 n = -(adj + N2 - N1) / -STEP;
4087 else
4088 n = (adj + N2 - N1) / STEP;
953ff289
DN
4089 q = n / nthreads;
4090 q += (q * nthreads != n);
4091 s0 = q * threadid;
4092 e0 = min(s0 + q, n);
917948d3 4093 V = s0 * STEP + N1;
953ff289
DN
4094 if (s0 >= e0) goto L2; else goto L0;
4095 L0:
953ff289
DN
4096 e = e0 * STEP + N1;
4097 L1:
4098 BODY;
4099 V += STEP;
4100 if (V cond e) goto L1;
953ff289
DN
4101 L2:
4102*/
4103
777f7f9a 4104static void
50674e96
DN
4105expand_omp_for_static_nochunk (struct omp_region *region,
4106 struct omp_for_data *fd)
953ff289 4107{
a9b77cd1 4108 tree n, q, s0, e0, e, t, nthreads, threadid;
a68ab351 4109 tree type, itype, vmain, vback;
777f7f9a
RH
4110 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4111 basic_block fin_bb;
726a989a
RB
4112 gimple_stmt_iterator gsi;
4113 gimple stmt;
953ff289 4114
a68ab351
JJ
4115 itype = type = TREE_TYPE (fd->loop.v);
4116 if (POINTER_TYPE_P (type))
4117 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
953ff289 4118
777f7f9a 4119 entry_bb = region->entry;
777f7f9a 4120 cont_bb = region->cont;
e5c95afe
ZD
4121 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4122 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4123 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4124 body_bb = single_succ (seq_start_bb);
4125 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4126 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4127 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
777f7f9a
RH
4128 exit_bb = region->exit;
4129
50674e96 4130 /* Iteration space partitioning goes in ENTRY_BB. */
726a989a
RB
4131 gsi = gsi_last_bb (entry_bb);
4132 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
777f7f9a 4133
5039610b 4134 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
a68ab351 4135 t = fold_convert (itype, t);
726a989a
RB
4136 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4137 true, GSI_SAME_STMT);
953ff289 4138
5039610b 4139 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
a68ab351 4140 t = fold_convert (itype, t);
726a989a
RB
4141 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4142 true, GSI_SAME_STMT);
953ff289 4143
a68ab351 4144 fd->loop.n1
726a989a
RB
4145 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4146 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351 4147 fd->loop.n2
726a989a
RB
4148 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4149 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351 4150 fd->loop.step
726a989a
RB
4151 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4152 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351
JJ
4153
4154 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4155 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4156 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4157 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4158 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4159 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4160 fold_build1 (NEGATE_EXPR, itype, t),
4161 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4162 else
4163 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4164 t = fold_convert (itype, t);
726a989a 4165 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
953ff289 4166
a68ab351 4167 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
726a989a 4168 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
953ff289 4169
a68ab351
JJ
4170 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4171 t = fold_build2 (NE_EXPR, itype, t, n);
4172 t = fold_build2 (PLUS_EXPR, itype, q, t);
726a989a 4173 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
953ff289 4174
a68ab351 4175 t = build2 (MULT_EXPR, itype, q, threadid);
726a989a 4176 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
953ff289 4177
a68ab351
JJ
4178 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4179 t = fold_build2 (MIN_EXPR, itype, t, n);
726a989a 4180 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
917948d3 4181
953ff289 4182 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
726a989a 4183 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
50674e96 4184
726a989a
RB
4185 /* Remove the GIMPLE_OMP_FOR statement. */
4186 gsi_remove (&gsi, true);
50674e96
DN
4187
4188 /* Setup code for sequential iteration goes in SEQ_START_BB. */
726a989a 4189 gsi = gsi_start_bb (seq_start_bb);
953ff289 4190
a68ab351
JJ
4191 t = fold_convert (itype, s0);
4192 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4193 if (POINTER_TYPE_P (type))
4194 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4195 fold_convert (sizetype, t));
4196 else
4197 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
726a989a
RB
4198 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4199 false, GSI_CONTINUE_LINKING);
4200 stmt = gimple_build_assign (fd->loop.v, t);
4201 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4202
a68ab351
JJ
4203 t = fold_convert (itype, e0);
4204 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4205 if (POINTER_TYPE_P (type))
4206 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4207 fold_convert (sizetype, t));
4208 else
4209 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
726a989a
RB
4210 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4211 false, GSI_CONTINUE_LINKING);
953ff289 4212
726a989a
RB
4213 /* The code controlling the sequential loop replaces the
4214 GIMPLE_OMP_CONTINUE. */
4215 gsi = gsi_last_bb (cont_bb);
4216 stmt = gsi_stmt (gsi);
4217 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4218 vmain = gimple_omp_continue_control_use (stmt);
4219 vback = gimple_omp_continue_control_def (stmt);
917948d3 4220
a68ab351
JJ
4221 if (POINTER_TYPE_P (type))
4222 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4223 fold_convert (sizetype, fd->loop.step));
4224 else
4225 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
726a989a
RB
4226 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4227 true, GSI_SAME_STMT);
4228 stmt = gimple_build_assign (vback, t);
4229 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
917948d3 4230
a68ab351 4231 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
726a989a 4232 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
953ff289 4233
726a989a
RB
4234 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4235 gsi_remove (&gsi, true);
50674e96 4236
726a989a
RB
4237 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4238 gsi = gsi_last_bb (exit_bb);
4239 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4240 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4241 false, GSI_SAME_STMT);
4242 gsi_remove (&gsi, true);
50674e96
DN
4243
4244 /* Connect all the blocks. */
e5c95afe
ZD
4245 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4246 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
917948d3 4247
e5c95afe 4248 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
777f7f9a 4249 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
917948d3
ZD
4250
4251 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4252 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4253 recompute_dominator (CDI_DOMINATORS, body_bb));
4254 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4255 recompute_dominator (CDI_DOMINATORS, fin_bb));
953ff289
DN
4256}
4257
50674e96
DN
4258
4259/* A subroutine of expand_omp_for. Generate code for a parallel
4260 loop with static schedule and a specified chunk size. Given
4261 parameters:
953ff289
DN
4262
4263 for (V = N1; V cond N2; V += STEP) BODY;
4264
4265 where COND is "<" or ">", we generate pseudocode
4266
4267 if (cond is <)
4268 adj = STEP - 1;
4269 else
4270 adj = STEP + 1;
a68ab351
JJ
4271 if ((__typeof (V)) -1 > 0 && cond is >)
4272 n = -(adj + N2 - N1) / -STEP;
4273 else
4274 n = (adj + N2 - N1) / STEP;
953ff289 4275 trip = 0;
917948d3
ZD
4276 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4277 here so that V is defined
4278 if the loop is not entered
953ff289
DN
4279 L0:
4280 s0 = (trip * nthreads + threadid) * CHUNK;
4281 e0 = min(s0 + CHUNK, n);
4282 if (s0 < n) goto L1; else goto L4;
4283 L1:
4284 V = s0 * STEP + N1;
4285 e = e0 * STEP + N1;
4286 L2:
4287 BODY;
4288 V += STEP;
4289 if (V cond e) goto L2; else goto L3;
4290 L3:
4291 trip += 1;
4292 goto L0;
4293 L4:
953ff289
DN
4294*/
4295
777f7f9a 4296static void
726a989a 4297expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
953ff289 4298{
726a989a 4299 tree n, s0, e0, e, t;
917948d3 4300 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
726a989a 4301 tree type, itype, v_main, v_back, v_extra;
50674e96 4302 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
777f7f9a 4303 basic_block trip_update_bb, cont_bb, fin_bb;
726a989a
RB
4304 gimple_stmt_iterator si;
4305 gimple stmt;
4306 edge se;
953ff289 4307
a68ab351
JJ
4308 itype = type = TREE_TYPE (fd->loop.v);
4309 if (POINTER_TYPE_P (type))
4310 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
953ff289 4311
777f7f9a 4312 entry_bb = region->entry;
e5c95afe
ZD
4313 se = split_block (entry_bb, last_stmt (entry_bb));
4314 entry_bb = se->src;
4315 iter_part_bb = se->dest;
777f7f9a 4316 cont_bb = region->cont;
e5c95afe
ZD
4317 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4318 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4319 == FALLTHRU_EDGE (cont_bb)->dest);
4320 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4321 body_bb = single_succ (seq_start_bb);
4322 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4323 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4324 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4325 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
777f7f9a 4326 exit_bb = region->exit;
50674e96 4327
50674e96 4328 /* Trip and adjustment setup goes in ENTRY_BB. */
726a989a
RB
4329 si = gsi_last_bb (entry_bb);
4330 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
50674e96 4331
5039610b 4332 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
a68ab351 4333 t = fold_convert (itype, t);
726a989a
RB
4334 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4335 true, GSI_SAME_STMT);
953ff289 4336
5039610b 4337 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
a68ab351 4338 t = fold_convert (itype, t);
726a989a
RB
4339 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4340 true, GSI_SAME_STMT);
917948d3 4341
a68ab351 4342 fd->loop.n1
726a989a
RB
4343 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4344 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351 4345 fd->loop.n2
726a989a
RB
4346 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4347 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351 4348 fd->loop.step
726a989a
RB
4349 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4350 true, NULL_TREE, true, GSI_SAME_STMT);
917948d3 4351 fd->chunk_size
726a989a
RB
4352 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4353 true, NULL_TREE, true, GSI_SAME_STMT);
a68ab351
JJ
4354
4355 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4356 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4357 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4358 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4359 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4360 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4361 fold_build1 (NEGATE_EXPR, itype, t),
4362 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4363 else
4364 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4365 t = fold_convert (itype, t);
726a989a
RB
4366 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4367 true, GSI_SAME_STMT);
917948d3 4368
a68ab351 4369 trip_var = create_tmp_var (itype, ".trip");
917948d3
ZD
4370 if (gimple_in_ssa_p (cfun))
4371 {
4372 add_referenced_var (trip_var);
726a989a
RB
4373 trip_init = make_ssa_name (trip_var, NULL);
4374 trip_main = make_ssa_name (trip_var, NULL);
4375 trip_back = make_ssa_name (trip_var, NULL);
917948d3 4376 }
953ff289 4377 else
917948d3
ZD
4378 {
4379 trip_init = trip_var;
4380 trip_main = trip_var;
4381 trip_back = trip_var;
4382 }
953ff289 4383
726a989a
RB
4384 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4385 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
50674e96 4386
a68ab351
JJ
4387 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4388 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4389 if (POINTER_TYPE_P (type))
4390 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4391 fold_convert (sizetype, t));
4392 else
4393 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
726a989a
RB
4394 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4395 true, GSI_SAME_STMT);
917948d3 4396
726a989a
RB
4397 /* Remove the GIMPLE_OMP_FOR. */
4398 gsi_remove (&si, true);
50674e96
DN
4399
4400 /* Iteration space partitioning goes in ITER_PART_BB. */
726a989a 4401 si = gsi_last_bb (iter_part_bb);
953ff289 4402
a68ab351
JJ
4403 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4404 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4405 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
726a989a
RB
4406 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4407 false, GSI_CONTINUE_LINKING);
953ff289 4408
a68ab351
JJ
4409 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4410 t = fold_build2 (MIN_EXPR, itype, t, n);
726a989a
RB
4411 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4412 false, GSI_CONTINUE_LINKING);
953ff289
DN
4413
4414 t = build2 (LT_EXPR, boolean_type_node, s0, n);
726a989a 4415 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
50674e96
DN
4416
4417 /* Setup code for sequential iteration goes in SEQ_START_BB. */
726a989a 4418 si = gsi_start_bb (seq_start_bb);
953ff289 4419
a68ab351
JJ
4420 t = fold_convert (itype, s0);
4421 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4422 if (POINTER_TYPE_P (type))
4423 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4424 fold_convert (sizetype, t));
4425 else
4426 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
726a989a
RB
4427 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4428 false, GSI_CONTINUE_LINKING);
4429 stmt = gimple_build_assign (fd->loop.v, t);
4430 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
953ff289 4431
a68ab351
JJ
4432 t = fold_convert (itype, e0);
4433 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4434 if (POINTER_TYPE_P (type))
4435 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4436 fold_convert (sizetype, t));
4437 else
4438 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
726a989a
RB
4439 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4440 false, GSI_CONTINUE_LINKING);
953ff289 4441
777f7f9a 4442 /* The code controlling the sequential loop goes in CONT_BB,
726a989a
RB
4443 replacing the GIMPLE_OMP_CONTINUE. */
4444 si = gsi_last_bb (cont_bb);
4445 stmt = gsi_stmt (si);
4446 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4447 v_main = gimple_omp_continue_control_use (stmt);
4448 v_back = gimple_omp_continue_control_def (stmt);
917948d3 4449
a68ab351
JJ
4450 if (POINTER_TYPE_P (type))
4451 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4452 fold_convert (sizetype, fd->loop.step));
4453 else
726a989a
RB
4454 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4455 stmt = gimple_build_assign (v_back, t);
4456 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
917948d3 4457
a68ab351 4458 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
726a989a 4459 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
50674e96 4460
726a989a
RB
4461 /* Remove GIMPLE_OMP_CONTINUE. */
4462 gsi_remove (&si, true);
50674e96
DN
4463
4464 /* Trip update code goes into TRIP_UPDATE_BB. */
726a989a 4465 si = gsi_start_bb (trip_update_bb);
953ff289 4466
a68ab351
JJ
4467 t = build_int_cst (itype, 1);
4468 t = build2 (PLUS_EXPR, itype, trip_main, t);
726a989a
RB
4469 stmt = gimple_build_assign (trip_back, t);
4470 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
953ff289 4471
726a989a
RB
4472 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4473 si = gsi_last_bb (exit_bb);
4474 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4475 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4476 false, GSI_SAME_STMT);
4477 gsi_remove (&si, true);
953ff289 4478
50674e96 4479 /* Connect the new blocks. */
e5c95afe
ZD
4480 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4481 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
917948d3 4482
e5c95afe
ZD
4483 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4484 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
917948d3 4485
e5c95afe 4486 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
917948d3
ZD
4487
4488 if (gimple_in_ssa_p (cfun))
4489 {
726a989a
RB
4490 gimple_stmt_iterator psi;
4491 gimple phi;
4492 edge re, ene;
4493 edge_var_map_vector head;
4494 edge_var_map *vm;
4495 size_t i;
4496
917948d3
ZD
4497 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4498 remove arguments of the phi nodes in fin_bb. We need to create
4499 appropriate phi nodes in iter_part_bb instead. */
4500 se = single_pred_edge (fin_bb);
4501 re = single_succ_edge (trip_update_bb);
726a989a 4502 head = redirect_edge_var_map_vector (re);
917948d3
ZD
4503 ene = single_succ_edge (entry_bb);
4504
726a989a
RB
4505 psi = gsi_start_phis (fin_bb);
4506 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4507 gsi_next (&psi), ++i)
917948d3 4508 {
726a989a 4509 gimple nphi;
f5045c96 4510 source_location locus;
726a989a
RB
4511
4512 phi = gsi_stmt (psi);
4513 t = gimple_phi_result (phi);
4514 gcc_assert (t == redirect_edge_var_map_result (vm));
917948d3
ZD
4515 nphi = create_phi_node (t, iter_part_bb);
4516 SSA_NAME_DEF_STMT (t) = nphi;
4517
4518 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
f5045c96
AM
4519 locus = gimple_phi_arg_location_from_edge (phi, se);
4520
a68ab351
JJ
4521 /* A special case -- fd->loop.v is not yet computed in
4522 iter_part_bb, we need to use v_extra instead. */
4523 if (t == fd->loop.v)
917948d3 4524 t = v_extra;
f5045c96
AM
4525 add_phi_arg (nphi, t, ene, locus);
4526 locus = redirect_edge_var_map_location (vm);
4527 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
726a989a
RB
4528 }
4529 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4530 redirect_edge_var_map_clear (re);
4531 while (1)
4532 {
4533 psi = gsi_start_phis (fin_bb);
4534 if (gsi_end_p (psi))
4535 break;
4536 remove_phi_node (&psi, false);
917948d3 4537 }
917948d3
ZD
4538
4539 /* Make phi node for trip. */
4540 phi = create_phi_node (trip_main, iter_part_bb);
4541 SSA_NAME_DEF_STMT (trip_main) = phi;
f5045c96
AM
4542 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4543 UNKNOWN_LOCATION);
4544 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4545 UNKNOWN_LOCATION);
917948d3
ZD
4546 }
4547
4548 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4549 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4550 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4551 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4552 recompute_dominator (CDI_DOMINATORS, fin_bb));
4553 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4554 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4555 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4556 recompute_dominator (CDI_DOMINATORS, body_bb));
953ff289
DN
4557}
4558
953ff289 4559
50674e96 4560/* Expand the OpenMP loop defined by REGION. */
953ff289 4561
50674e96
DN
4562static void
4563expand_omp_for (struct omp_region *region)
4564{
4565 struct omp_for_data fd;
a68ab351 4566 struct omp_for_data_loop *loops;
953ff289 4567
a68ab351
JJ
4568 loops
4569 = (struct omp_for_data_loop *)
726a989a 4570 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
a68ab351 4571 * sizeof (struct omp_for_data_loop));
a68ab351 4572 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
21a66e91 4573 region->sched_kind = fd.sched_kind;
953ff289 4574
135a171d
JJ
4575 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4576 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4577 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4578 if (region->cont)
4579 {
4580 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4581 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4582 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4583 }
4584
d3c673c7
JJ
4585 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4586 && !fd.have_ordered
a68ab351 4587 && fd.collapse == 1
e5c95afe 4588 && region->cont != NULL)
953ff289
DN
4589 {
4590 if (fd.chunk_size == NULL)
777f7f9a 4591 expand_omp_for_static_nochunk (region, &fd);
953ff289 4592 else
777f7f9a 4593 expand_omp_for_static_chunk (region, &fd);
953ff289
DN
4594 }
4595 else
4596 {
a68ab351
JJ
4597 int fn_index, start_ix, next_ix;
4598
4599 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4600 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
726a989a 4601 ? 3 : fd.sched_kind;
a68ab351
JJ
4602 fn_index += fd.have_ordered * 4;
4603 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4604 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4605 if (fd.iter_type == long_long_unsigned_type_node)
4606 {
4607 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4608 - BUILT_IN_GOMP_LOOP_STATIC_START;
4609 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4610 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4611 }
bbbbb16a
ILT
4612 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4613 (enum built_in_function) next_ix);
953ff289 4614 }
5f40b3cb
ZD
4615
4616 update_ssa (TODO_update_ssa_only_virtuals);
953ff289
DN
4617}
4618
953ff289
DN
4619
4620/* Expand code for an OpenMP sections directive. In pseudo code, we generate
4621
953ff289
DN
4622 v = GOMP_sections_start (n);
4623 L0:
4624 switch (v)
4625 {
4626 case 0:
4627 goto L2;
4628 case 1:
4629 section 1;
4630 goto L1;
4631 case 2:
4632 ...
4633 case n:
4634 ...
953ff289
DN
4635 default:
4636 abort ();
4637 }
4638 L1:
4639 v = GOMP_sections_next ();
4640 goto L0;
4641 L2:
4642 reduction;
4643
50674e96 4644 If this is a combined parallel sections, replace the call to
917948d3 4645 GOMP_sections_start with call to GOMP_sections_next. */
953ff289
DN
4646
4647static void
50674e96 4648expand_omp_sections (struct omp_region *region)
953ff289 4649{
726a989a
RB
4650 tree t, u, vin = NULL, vmain, vnext, l1, l2;
4651 VEC (tree,heap) *label_vec;
4652 unsigned len;
e5c95afe 4653 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
726a989a
RB
4654 gimple_stmt_iterator si, switch_si;
4655 gimple sections_stmt, stmt, cont;
c34938a8
JJ
4656 edge_iterator ei;
4657 edge e;
777f7f9a 4658 struct omp_region *inner;
726a989a 4659 unsigned i, casei;
e5c95afe 4660 bool exit_reachable = region->cont != NULL;
953ff289 4661
e5c95afe 4662 gcc_assert (exit_reachable == (region->exit != NULL));
777f7f9a 4663 entry_bb = region->entry;
e5c95afe 4664 l0_bb = single_succ (entry_bb);
777f7f9a 4665 l1_bb = region->cont;
e5c95afe
ZD
4666 l2_bb = region->exit;
4667 if (exit_reachable)
d3c673c7 4668 {
c34938a8 4669 if (single_pred (l2_bb) == l0_bb)
726a989a 4670 l2 = gimple_block_label (l2_bb);
c34938a8
JJ
4671 else
4672 {
4673 /* This can happen if there are reductions. */
4674 len = EDGE_COUNT (l0_bb->succs);
4675 gcc_assert (len > 0);
4676 e = EDGE_SUCC (l0_bb, len - 1);
726a989a 4677 si = gsi_last_bb (e->dest);
3ac4c44a 4678 l2 = NULL_TREE;
726a989a
RB
4679 if (gsi_end_p (si)
4680 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4681 l2 = gimple_block_label (e->dest);
c34938a8
JJ
4682 else
4683 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4684 {
726a989a
RB
4685 si = gsi_last_bb (e->dest);
4686 if (gsi_end_p (si)
4687 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
c34938a8 4688 {
726a989a 4689 l2 = gimple_block_label (e->dest);
c34938a8
JJ
4690 break;
4691 }
4692 }
4693 }
d3c673c7 4694 default_bb = create_empty_bb (l1_bb->prev_bb);
726a989a 4695 l1 = gimple_block_label (l1_bb);
d3c673c7
JJ
4696 }
4697 else
4698 {
e5c95afe
ZD
4699 default_bb = create_empty_bb (l0_bb);
4700 l1 = NULL_TREE;
726a989a 4701 l2 = gimple_block_label (default_bb);
d3c673c7 4702 }
50674e96
DN
4703
4704 /* We will build a switch() with enough cases for all the
726a989a 4705 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
50674e96 4706 and a default case to abort if something goes wrong. */
e5c95afe 4707 len = EDGE_COUNT (l0_bb->succs);
726a989a
RB
4708
4709 /* Use VEC_quick_push on label_vec throughout, since we know the size
4710 in advance. */
4711 label_vec = VEC_alloc (tree, heap, len);
953ff289 4712
777f7f9a 4713 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
726a989a
RB
4714 GIMPLE_OMP_SECTIONS statement. */
4715 si = gsi_last_bb (entry_bb);
4716 sections_stmt = gsi_stmt (si);
4717 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4718 vin = gimple_omp_sections_control (sections_stmt);
50674e96 4719 if (!is_combined_parallel (region))
953ff289 4720 {
50674e96
DN
4721 /* If we are not inside a combined parallel+sections region,
4722 call GOMP_sections_start. */
e5c95afe
ZD
4723 t = build_int_cst (unsigned_type_node,
4724 exit_reachable ? len - 1 : len);
953ff289 4725 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
726a989a 4726 stmt = gimple_build_call (u, 1, t);
953ff289 4727 }
917948d3
ZD
4728 else
4729 {
4730 /* Otherwise, call GOMP_sections_next. */
4731 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
726a989a 4732 stmt = gimple_build_call (u, 0);
917948d3 4733 }
726a989a
RB
4734 gimple_call_set_lhs (stmt, vin);
4735 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4736 gsi_remove (&si, true);
4737
4738 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4739 L0_BB. */
4740 switch_si = gsi_last_bb (l0_bb);
4741 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
917948d3
ZD
4742 if (exit_reachable)
4743 {
4744 cont = last_stmt (l1_bb);
726a989a
RB
4745 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4746 vmain = gimple_omp_continue_control_use (cont);
4747 vnext = gimple_omp_continue_control_def (cont);
917948d3
ZD
4748 }
4749 else
4750 {
4751 vmain = vin;
4752 vnext = NULL_TREE;
4753 }
953ff289 4754
e5c95afe
ZD
4755 i = 0;
4756 if (exit_reachable)
4757 {
4758 t = build3 (CASE_LABEL_EXPR, void_type_node,
4759 build_int_cst (unsigned_type_node, 0), NULL, l2);
726a989a 4760 VEC_quick_push (tree, label_vec, t);
e5c95afe
ZD
4761 i++;
4762 }
d3c673c7 4763
726a989a 4764 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
e5c95afe
ZD
4765 for (inner = region->inner, casei = 1;
4766 inner;
4767 inner = inner->next, i++, casei++)
953ff289 4768 {
50674e96
DN
4769 basic_block s_entry_bb, s_exit_bb;
4770
c34938a8 4771 /* Skip optional reduction region. */
726a989a 4772 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
c34938a8
JJ
4773 {
4774 --i;
4775 --casei;
4776 continue;
4777 }
4778
777f7f9a
RH
4779 s_entry_bb = inner->entry;
4780 s_exit_bb = inner->exit;
953ff289 4781
726a989a 4782 t = gimple_block_label (s_entry_bb);
e5c95afe 4783 u = build_int_cst (unsigned_type_node, casei);
953ff289 4784 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
726a989a 4785 VEC_quick_push (tree, label_vec, u);
777f7f9a 4786
726a989a
RB
4787 si = gsi_last_bb (s_entry_bb);
4788 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4789 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4790 gsi_remove (&si, true);
777f7f9a 4791 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
d3c673c7
JJ
4792
4793 if (s_exit_bb == NULL)
4794 continue;
4795
726a989a
RB
4796 si = gsi_last_bb (s_exit_bb);
4797 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4798 gsi_remove (&si, true);
d3c673c7 4799
50674e96 4800 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
953ff289
DN
4801 }
4802
50674e96 4803 /* Error handling code goes in DEFAULT_BB. */
726a989a 4804 t = gimple_block_label (default_bb);
953ff289 4805 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
777f7f9a 4806 make_edge (l0_bb, default_bb, 0);
953ff289 4807
726a989a
RB
4808 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4809 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4810 gsi_remove (&switch_si, true);
4811 VEC_free (tree, heap, label_vec);
4812
4813 si = gsi_start_bb (default_bb);
4814 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4815 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
50674e96 4816
e5c95afe 4817 if (exit_reachable)
d3c673c7 4818 {
e5c95afe 4819 /* Code to get the next section goes in L1_BB. */
726a989a
RB
4820 si = gsi_last_bb (l1_bb);
4821 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
953ff289 4822
726a989a
RB
4823 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4824 gimple_call_set_lhs (stmt, vnext);
4825 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4826 gsi_remove (&si, true);
50674e96 4827
e5c95afe
ZD
4828 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4829
726a989a
RB
4830 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4831 si = gsi_last_bb (l2_bb);
4832 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
d3c673c7
JJ
4833 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4834 else
4835 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
726a989a
RB
4836 stmt = gimple_build_call (t, 0);
4837 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4838 gsi_remove (&si, true);
d3c673c7 4839 }
50674e96 4840
917948d3 4841 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
50674e96 4842}
953ff289 4843
953ff289 4844
777f7f9a
RH
4845/* Expand code for an OpenMP single directive. We've already expanded
4846 much of the code, here we simply place the GOMP_barrier call. */
4847
4848static void
4849expand_omp_single (struct omp_region *region)
4850{
4851 basic_block entry_bb, exit_bb;
726a989a 4852 gimple_stmt_iterator si;
777f7f9a
RH
4853 bool need_barrier = false;
4854
4855 entry_bb = region->entry;
4856 exit_bb = region->exit;
4857
726a989a 4858 si = gsi_last_bb (entry_bb);
777f7f9a
RH
4859 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4860 be removed. We need to ensure that the thread that entered the single
4861 does not exit before the data is copied out by the other threads. */
726a989a 4862 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
777f7f9a
RH
4863 OMP_CLAUSE_COPYPRIVATE))
4864 need_barrier = true;
726a989a
RB
4865 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4866 gsi_remove (&si, true);
777f7f9a
RH
4867 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4868
726a989a
RB
4869 si = gsi_last_bb (exit_bb);
4870 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4871 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4872 false, GSI_SAME_STMT);
4873 gsi_remove (&si, true);
777f7f9a
RH
4874 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4875}
4876
4877
4878/* Generic expansion for OpenMP synchronization directives: master,
4879 ordered and critical. All we need to do here is remove the entry
4880 and exit markers for REGION. */
50674e96
DN
4881
4882static void
4883expand_omp_synch (struct omp_region *region)
4884{
4885 basic_block entry_bb, exit_bb;
726a989a 4886 gimple_stmt_iterator si;
50674e96 4887
777f7f9a
RH
4888 entry_bb = region->entry;
4889 exit_bb = region->exit;
50674e96 4890
726a989a
RB
4891 si = gsi_last_bb (entry_bb);
4892 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4893 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4894 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4895 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4896 gsi_remove (&si, true);
50674e96
DN
4897 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4898
d3c673c7
JJ
4899 if (exit_bb)
4900 {
726a989a
RB
4901 si = gsi_last_bb (exit_bb);
4902 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4903 gsi_remove (&si, true);
d3c673c7
JJ
4904 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4905 }
50674e96 4906}
953ff289 4907
a509ebb5
RL
4908/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4909 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4910 size of the data type, and thus usable to find the index of the builtin
4911 decl. Returns false if the expression is not of the proper form. */
4912
4913static bool
4914expand_omp_atomic_fetch_op (basic_block load_bb,
4915 tree addr, tree loaded_val,
4916 tree stored_val, int index)
4917{
4918 enum built_in_function base;
4919 tree decl, itype, call;
4920 enum insn_code *optab;
4921 tree rhs;
4922 basic_block store_bb = single_succ (load_bb);
726a989a
RB
4923 gimple_stmt_iterator gsi;
4924 gimple stmt;
db3927fb 4925 location_t loc;
a509ebb5
RL
4926
4927 /* We expect to find the following sequences:
4928
4929 load_bb:
726a989a 4930 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
a509ebb5
RL
4931
4932 store_bb:
4933 val = tmp OP something; (or: something OP tmp)
726a989a 4934 GIMPLE_OMP_STORE (val)
a509ebb5
RL
4935
4936 ???FIXME: Allow a more flexible sequence.
4937 Perhaps use data flow to pick the statements.
4938
4939 */
4940
726a989a
RB
4941 gsi = gsi_after_labels (store_bb);
4942 stmt = gsi_stmt (gsi);
db3927fb 4943 loc = gimple_location (stmt);
726a989a 4944 if (!is_gimple_assign (stmt))
a509ebb5 4945 return false;
726a989a
RB
4946 gsi_next (&gsi);
4947 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
a509ebb5
RL
4948 return false;
4949
726a989a 4950 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
a509ebb5
RL
4951 return false;
4952
a509ebb5 4953 /* Check for one of the supported fetch-op operations. */
726a989a 4954 switch (gimple_assign_rhs_code (stmt))
a509ebb5
RL
4955 {
4956 case PLUS_EXPR:
4957 case POINTER_PLUS_EXPR:
4958 base = BUILT_IN_FETCH_AND_ADD_N;
4959 optab = sync_add_optab;
4960 break;
4961 case MINUS_EXPR:
4962 base = BUILT_IN_FETCH_AND_SUB_N;
4963 optab = sync_add_optab;
4964 break;
4965 case BIT_AND_EXPR:
4966 base = BUILT_IN_FETCH_AND_AND_N;
4967 optab = sync_and_optab;
4968 break;
4969 case BIT_IOR_EXPR:
4970 base = BUILT_IN_FETCH_AND_OR_N;
4971 optab = sync_ior_optab;
4972 break;
4973 case BIT_XOR_EXPR:
4974 base = BUILT_IN_FETCH_AND_XOR_N;
4975 optab = sync_xor_optab;
4976 break;
4977 default:
4978 return false;
4979 }
4980 /* Make sure the expression is of the proper form. */
726a989a
RB
4981 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4982 rhs = gimple_assign_rhs2 (stmt);
4983 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4984 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4985 rhs = gimple_assign_rhs1 (stmt);
a509ebb5
RL
4986 else
4987 return false;
4988
4989 decl = built_in_decls[base + index + 1];
4990 itype = TREE_TYPE (TREE_TYPE (decl));
4991
4992 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4993 return false;
4994
726a989a
RB
4995 gsi = gsi_last_bb (load_bb);
4996 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
db3927fb
AH
4997 call = build_call_expr_loc (loc,
4998 decl, 2, addr,
4999 fold_convert_loc (loc, itype, rhs));
5000 call = fold_convert_loc (loc, void_type_node, call);
726a989a
RB
5001 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5002 gsi_remove (&gsi, true);
a509ebb5 5003
726a989a
RB
5004 gsi = gsi_last_bb (store_bb);
5005 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5006 gsi_remove (&gsi, true);
5007 gsi = gsi_last_bb (store_bb);
5008 gsi_remove (&gsi, true);
a509ebb5
RL
5009
5010 if (gimple_in_ssa_p (cfun))
5011 update_ssa (TODO_update_ssa_no_phi);
5012
5013 return true;
5014}
5015
5016/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5017
5018 oldval = *addr;
5019 repeat:
5020 newval = rhs; // with oldval replacing *addr in rhs
5021 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5022 if (oldval != newval)
5023 goto repeat;
5024
5025 INDEX is log2 of the size of the data type, and thus usable to find the
5026 index of the builtin decl. */
5027
5028static bool
5029expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5030 tree addr, tree loaded_val, tree stored_val,
5031 int index)
5032{
c18c98c0 5033 tree loadedi, storedi, initial, new_storedi, old_vali;
a509ebb5 5034 tree type, itype, cmpxchg, iaddr;
726a989a 5035 gimple_stmt_iterator si;
a509ebb5 5036 basic_block loop_header = single_succ (load_bb);
726a989a 5037 gimple phi, stmt;
a509ebb5
RL
5038 edge e;
5039
5040 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
5041 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5042 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5043
5044 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
5045 return false;
5046
726a989a
RB
5047 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5048 si = gsi_last_bb (load_bb);
5049 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5050
c18c98c0
JJ
5051 /* For floating-point values, we'll need to view-convert them to integers
5052 so that we can perform the atomic compare and swap. Simplify the
5053 following code by always setting up the "i"ntegral variables. */
5054 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5055 {
726a989a
RB
5056 tree iaddr_val;
5057
5b21f0f3
RG
5058 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5059 true), NULL);
726a989a
RB
5060 iaddr_val
5061 = force_gimple_operand_gsi (&si,
5062 fold_convert (TREE_TYPE (iaddr), addr),
5063 false, NULL_TREE, true, GSI_SAME_STMT);
5064 stmt = gimple_build_assign (iaddr, iaddr_val);
5065 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
c18c98c0
JJ
5066 loadedi = create_tmp_var (itype, NULL);
5067 if (gimple_in_ssa_p (cfun))
5068 {
5069 add_referenced_var (iaddr);
5070 add_referenced_var (loadedi);
5071 loadedi = make_ssa_name (loadedi, NULL);
5072 }
5073 }
5074 else
5075 {
5076 iaddr = addr;
5077 loadedi = loaded_val;
5078 }
726a989a
RB
5079
5080 initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
5081 true, NULL_TREE, true, GSI_SAME_STMT);
c18c98c0
JJ
5082
5083 /* Move the value to the LOADEDI temporary. */
a509ebb5
RL
5084 if (gimple_in_ssa_p (cfun))
5085 {
726a989a 5086 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
c18c98c0
JJ
5087 phi = create_phi_node (loadedi, loop_header);
5088 SSA_NAME_DEF_STMT (loadedi) = phi;
a509ebb5
RL
5089 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5090 initial);
5091 }
5092 else
726a989a
RB
5093 gsi_insert_before (&si,
5094 gimple_build_assign (loadedi, initial),
5095 GSI_SAME_STMT);
c18c98c0
JJ
5096 if (loadedi != loaded_val)
5097 {
726a989a
RB
5098 gimple_stmt_iterator gsi2;
5099 tree x;
c18c98c0
JJ
5100
5101 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
726a989a 5102 gsi2 = gsi_start_bb (loop_header);
c18c98c0
JJ
5103 if (gimple_in_ssa_p (cfun))
5104 {
726a989a
RB
5105 gimple stmt;
5106 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5107 true, GSI_SAME_STMT);
5108 stmt = gimple_build_assign (loaded_val, x);
5109 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
c18c98c0
JJ
5110 }
5111 else
5112 {
726a989a
RB
5113 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5114 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5115 true, GSI_SAME_STMT);
c18c98c0
JJ
5116 }
5117 }
726a989a 5118 gsi_remove (&si, true);
a509ebb5 5119
726a989a
RB
5120 si = gsi_last_bb (store_bb);
5121 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
a509ebb5 5122
c18c98c0
JJ
5123 if (iaddr == addr)
5124 storedi = stored_val;
a509ebb5 5125 else
c18c98c0 5126 storedi =
726a989a 5127 force_gimple_operand_gsi (&si,
c18c98c0
JJ
5128 build1 (VIEW_CONVERT_EXPR, itype,
5129 stored_val), true, NULL_TREE, true,
726a989a 5130 GSI_SAME_STMT);
a509ebb5
RL
5131
5132 /* Build the compare&swap statement. */
5133 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
726a989a 5134 new_storedi = force_gimple_operand_gsi (&si,
587aa063
RG
5135 fold_convert (TREE_TYPE (loadedi),
5136 new_storedi),
a509ebb5 5137 true, NULL_TREE,
726a989a 5138 true, GSI_SAME_STMT);
a509ebb5
RL
5139
5140 if (gimple_in_ssa_p (cfun))
5141 old_vali = loadedi;
5142 else
5143 {
587aa063 5144 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
c18c98c0
JJ
5145 if (gimple_in_ssa_p (cfun))
5146 add_referenced_var (old_vali);
726a989a
RB
5147 stmt = gimple_build_assign (old_vali, loadedi);
5148 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
a509ebb5 5149
726a989a
RB
5150 stmt = gimple_build_assign (loadedi, new_storedi);
5151 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
a509ebb5
RL
5152 }
5153
5154 /* Note that we always perform the comparison as an integer, even for
5155 floating point. This allows the atomic operation to properly
5156 succeed even with NaNs and -0.0. */
726a989a
RB
5157 stmt = gimple_build_cond_empty
5158 (build2 (NE_EXPR, boolean_type_node,
5159 new_storedi, old_vali));
5160 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
a509ebb5
RL
5161
5162 /* Update cfg. */
5163 e = single_succ_edge (store_bb);
5164 e->flags &= ~EDGE_FALLTHRU;
5165 e->flags |= EDGE_FALSE_VALUE;
5166
5167 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5168
c18c98c0 5169 /* Copy the new value to loadedi (we already did that before the condition
a509ebb5
RL
5170 if we are not in SSA). */
5171 if (gimple_in_ssa_p (cfun))
5172 {
726a989a 5173 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
c18c98c0 5174 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
a509ebb5
RL
5175 }
5176
726a989a
RB
5177 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5178 gsi_remove (&si, true);
a509ebb5
RL
5179
5180 if (gimple_in_ssa_p (cfun))
5181 update_ssa (TODO_update_ssa_no_phi);
5182
5183 return true;
5184}
5185
5186/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5187
5188 GOMP_atomic_start ();
5189 *addr = rhs;
5190 GOMP_atomic_end ();
5191
5192 The result is not globally atomic, but works so long as all parallel
5193 references are within #pragma omp atomic directives. According to
5194 responses received from omp@openmp.org, appears to be within spec.
5195 Which makes sense, since that's how several other compilers handle
5196 this situation as well.
726a989a
RB
5197 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5198 expanding. STORED_VAL is the operand of the matching
5199 GIMPLE_OMP_ATOMIC_STORE.
a509ebb5
RL
5200
5201 We replace
726a989a 5202 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
a509ebb5
RL
5203 loaded_val = *addr;
5204
5205 and replace
726a989a 5206 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
a509ebb5
RL
5207 *addr = stored_val;
5208*/
5209
5210static bool
5211expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5212 tree addr, tree loaded_val, tree stored_val)
5213{
726a989a
RB
5214 gimple_stmt_iterator si;
5215 gimple stmt;
a509ebb5
RL
5216 tree t;
5217
726a989a
RB
5218 si = gsi_last_bb (load_bb);
5219 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
a509ebb5
RL
5220
5221 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
db3927fb 5222 t = build_function_call_expr (UNKNOWN_LOCATION, t, 0);
726a989a 5223 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
a509ebb5 5224
726a989a
RB
5225 stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
5226 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5227 gsi_remove (&si, true);
a509ebb5 5228
726a989a
RB
5229 si = gsi_last_bb (store_bb);
5230 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
a509ebb5 5231
726a989a 5232 stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
a509ebb5 5233 stored_val);
726a989a 5234 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
a509ebb5
RL
5235
5236 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
db3927fb 5237 t = build_function_call_expr (UNKNOWN_LOCATION, t, 0);
726a989a
RB
5238 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5239 gsi_remove (&si, true);
a509ebb5
RL
5240
5241 if (gimple_in_ssa_p (cfun))
5242 update_ssa (TODO_update_ssa_no_phi);
5243 return true;
5244}
5245
726a989a 5246/* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
a509ebb5
RL
5247 using expand_omp_atomic_fetch_op. If it failed, we try to
5248 call expand_omp_atomic_pipeline, and if it fails too, the
5249 ultimate fallback is wrapping the operation in a mutex
5250 (expand_omp_atomic_mutex). REGION is the atomic region built
5251 by build_omp_regions_1(). */
5252
5253static void
5254expand_omp_atomic (struct omp_region *region)
5255{
5256 basic_block load_bb = region->entry, store_bb = region->exit;
726a989a
RB
5257 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5258 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5259 tree addr = gimple_omp_atomic_load_rhs (load);
5260 tree stored_val = gimple_omp_atomic_store_val (store);
a509ebb5
RL
5261 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5262 HOST_WIDE_INT index;
5263
5264 /* Make sure the type is one of the supported sizes. */
5265 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5266 index = exact_log2 (index);
5267 if (index >= 0 && index <= 4)
5268 {
5269 unsigned int align = TYPE_ALIGN_UNIT (type);
5270
5271 /* __sync builtins require strict data alignment. */
5272 if (exact_log2 (align) >= index)
5273 {
5274 /* When possible, use specialized atomic update functions. */
5275 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5276 && store_bb == single_succ (load_bb))
5277 {
5278 if (expand_omp_atomic_fetch_op (load_bb, addr,
5279 loaded_val, stored_val, index))
5280 return;
5281 }
5282
5283 /* If we don't have specialized __sync builtins, try and implement
5284 as a compare and swap loop. */
5285 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5286 loaded_val, stored_val, index))
5287 return;
5288 }
5289 }
5290
5291 /* The ultimate fallback is wrapping the operation in a mutex. */
5292 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5293}
5294
953ff289 5295
50674e96
DN
5296/* Expand the parallel region tree rooted at REGION. Expansion
5297 proceeds in depth-first order. Innermost regions are expanded
5298 first. This way, parallel regions that require a new function to
726a989a 5299 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
50674e96
DN
5300 internal dependencies in their body. */
5301
5302static void
5303expand_omp (struct omp_region *region)
5304{
5305 while (region)
5306 {
b357f682
JJ
5307 location_t saved_location;
5308
068e1875
ZD
5309 /* First, determine whether this is a combined parallel+workshare
5310 region. */
726a989a 5311 if (region->type == GIMPLE_OMP_PARALLEL)
068e1875
ZD
5312 determine_parallel_type (region);
5313
50674e96
DN
5314 if (region->inner)
5315 expand_omp (region->inner);
5316
b357f682 5317 saved_location = input_location;
726a989a
RB
5318 if (gimple_has_location (last_stmt (region->entry)))
5319 input_location = gimple_location (last_stmt (region->entry));
b357f682 5320
777f7f9a 5321 switch (region->type)
50674e96 5322 {
726a989a
RB
5323 case GIMPLE_OMP_PARALLEL:
5324 case GIMPLE_OMP_TASK:
a68ab351
JJ
5325 expand_omp_taskreg (region);
5326 break;
5327
726a989a 5328 case GIMPLE_OMP_FOR:
777f7f9a
RH
5329 expand_omp_for (region);
5330 break;
50674e96 5331
726a989a 5332 case GIMPLE_OMP_SECTIONS:
777f7f9a
RH
5333 expand_omp_sections (region);
5334 break;
50674e96 5335
726a989a 5336 case GIMPLE_OMP_SECTION:
777f7f9a 5337 /* Individual omp sections are handled together with their
726a989a 5338 parent GIMPLE_OMP_SECTIONS region. */
777f7f9a 5339 break;
50674e96 5340
726a989a 5341 case GIMPLE_OMP_SINGLE:
777f7f9a
RH
5342 expand_omp_single (region);
5343 break;
50674e96 5344
726a989a
RB
5345 case GIMPLE_OMP_MASTER:
5346 case GIMPLE_OMP_ORDERED:
5347 case GIMPLE_OMP_CRITICAL:
777f7f9a
RH
5348 expand_omp_synch (region);
5349 break;
50674e96 5350
726a989a 5351 case GIMPLE_OMP_ATOMIC_LOAD:
a509ebb5
RL
5352 expand_omp_atomic (region);
5353 break;
5354
777f7f9a
RH
5355 default:
5356 gcc_unreachable ();
5357 }
8d9c1aec 5358
b357f682 5359 input_location = saved_location;
50674e96
DN
5360 region = region->next;
5361 }
5362}
5363
5364
5365/* Helper for build_omp_regions. Scan the dominator tree starting at
5f40b3cb
ZD
5366 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5367 true, the function ends once a single tree is built (otherwise, whole
5368 forest of OMP constructs may be built). */
50674e96
DN
5369
5370static void
5f40b3cb
ZD
5371build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5372 bool single_tree)
50674e96 5373{
726a989a
RB
5374 gimple_stmt_iterator gsi;
5375 gimple stmt;
50674e96
DN
5376 basic_block son;
5377
726a989a
RB
5378 gsi = gsi_last_bb (bb);
5379 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
50674e96
DN
5380 {
5381 struct omp_region *region;
726a989a 5382 enum gimple_code code;
50674e96 5383
726a989a
RB
5384 stmt = gsi_stmt (gsi);
5385 code = gimple_code (stmt);
5386 if (code == GIMPLE_OMP_RETURN)
50674e96
DN
5387 {
5388 /* STMT is the return point out of region PARENT. Mark it
5389 as the exit point and make PARENT the immediately
5390 enclosing region. */
5391 gcc_assert (parent);
5392 region = parent;
777f7f9a 5393 region->exit = bb;
50674e96 5394 parent = parent->outer;
50674e96 5395 }
726a989a 5396 else if (code == GIMPLE_OMP_ATOMIC_STORE)
a509ebb5 5397 {
726a989a
RB
5398 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5399 GIMPLE_OMP_RETURN, but matches with
5400 GIMPLE_OMP_ATOMIC_LOAD. */
a509ebb5 5401 gcc_assert (parent);
726a989a 5402 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
a509ebb5
RL
5403 region = parent;
5404 region->exit = bb;
5405 parent = parent->outer;
5406 }
5407
726a989a 5408 else if (code == GIMPLE_OMP_CONTINUE)
777f7f9a
RH
5409 {
5410 gcc_assert (parent);
5411 parent->cont = bb;
5412 }
726a989a 5413 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
e5c95afe 5414 {
726a989a
RB
5415 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5416 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5417 ;
e5c95afe 5418 }
50674e96
DN
5419 else
5420 {
5421 /* Otherwise, this directive becomes the parent for a new
5422 region. */
777f7f9a 5423 region = new_omp_region (bb, code, parent);
50674e96
DN
5424 parent = region;
5425 }
50674e96
DN
5426 }
5427
5f40b3cb
ZD
5428 if (single_tree && !parent)
5429 return;
5430
50674e96
DN
5431 for (son = first_dom_son (CDI_DOMINATORS, bb);
5432 son;
5433 son = next_dom_son (CDI_DOMINATORS, son))
5f40b3cb
ZD
5434 build_omp_regions_1 (son, parent, single_tree);
5435}
5436
5437/* Builds the tree of OMP regions rooted at ROOT, storing it to
5438 root_omp_region. */
5439
5440static void
5441build_omp_regions_root (basic_block root)
5442{
5443 gcc_assert (root_omp_region == NULL);
5444 build_omp_regions_1 (root, NULL, true);
5445 gcc_assert (root_omp_region != NULL);
50674e96
DN
5446}
5447
5f40b3cb
ZD
5448/* Expands omp construct (and its subconstructs) starting in HEAD. */
5449
5450void
5451omp_expand_local (basic_block head)
5452{
5453 build_omp_regions_root (head);
5454 if (dump_file && (dump_flags & TDF_DETAILS))
5455 {
5456 fprintf (dump_file, "\nOMP region tree\n\n");
5457 dump_omp_region (dump_file, root_omp_region, 0);
5458 fprintf (dump_file, "\n");
5459 }
5460
5461 remove_exit_barriers (root_omp_region);
5462 expand_omp (root_omp_region);
5463
5464 free_omp_regions ();
5465}
50674e96
DN
5466
5467/* Scan the CFG and build a tree of OMP regions. Return the root of
5468 the OMP region tree. */
5469
5470static void
5471build_omp_regions (void)
5472{
777f7f9a 5473 gcc_assert (root_omp_region == NULL);
50674e96 5474 calculate_dominance_info (CDI_DOMINATORS);
5f40b3cb 5475 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
50674e96
DN
5476}
5477
50674e96
DN
5478/* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5479
c2924966 5480static unsigned int
50674e96
DN
5481execute_expand_omp (void)
5482{
5483 build_omp_regions ();
5484
777f7f9a
RH
5485 if (!root_omp_region)
5486 return 0;
50674e96 5487
777f7f9a
RH
5488 if (dump_file)
5489 {
5490 fprintf (dump_file, "\nOMP region tree\n\n");
5491 dump_omp_region (dump_file, root_omp_region, 0);
5492 fprintf (dump_file, "\n");
50674e96 5493 }
777f7f9a
RH
5494
5495 remove_exit_barriers (root_omp_region);
5496
5497 expand_omp (root_omp_region);
5498
777f7f9a
RH
5499 cleanup_tree_cfg ();
5500
5501 free_omp_regions ();
5502
c2924966 5503 return 0;
50674e96
DN
5504}
5505
917948d3
ZD
5506/* OMP expansion -- the default pass, run before creation of SSA form. */
5507
50674e96
DN
5508static bool
5509gate_expand_omp (void)
5510{
c63cac47 5511 return (flag_openmp != 0 && errorcount == 0);
50674e96
DN
5512}
5513
8ddbbcae 5514struct gimple_opt_pass pass_expand_omp =
50674e96 5515{
8ddbbcae
JH
5516 {
5517 GIMPLE_PASS,
50674e96
DN
5518 "ompexp", /* name */
5519 gate_expand_omp, /* gate */
5520 execute_expand_omp, /* execute */
5521 NULL, /* sub */
5522 NULL, /* next */
5523 0, /* static_pass_number */
7072a650 5524 TV_NONE, /* tv_id */
50674e96 5525 PROP_gimple_any, /* properties_required */
535b544a 5526 0, /* properties_provided */
50674e96
DN
5527 0, /* properties_destroyed */
5528 0, /* todo_flags_start */
8ddbbcae
JH
5529 TODO_dump_func /* todo_flags_finish */
5530 }
50674e96
DN
5531};
5532\f
5533/* Routines to lower OpenMP directives into OMP-GIMPLE. */
5534
726a989a
RB
5535/* Lower the OpenMP sections directive in the current statement in GSI_P.
5536 CTX is the enclosing OMP context for the current statement. */
50674e96
DN
5537
5538static void
726a989a 5539lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
50674e96 5540{
726a989a
RB
5541 tree block, control;
5542 gimple_stmt_iterator tgsi;
50674e96 5543 unsigned i, len;
726a989a
RB
5544 gimple stmt, new_stmt, bind, t;
5545 gimple_seq ilist, dlist, olist, new_body, body;
d406b663 5546 struct gimplify_ctx gctx;
50674e96 5547
726a989a 5548 stmt = gsi_stmt (*gsi_p);
50674e96 5549
d406b663 5550 push_gimplify_context (&gctx);
50674e96
DN
5551
5552 dlist = NULL;
5553 ilist = NULL;
726a989a
RB
5554 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5555 &ilist, &dlist, ctx);
50674e96 5556
726a989a
RB
5557 tgsi = gsi_start (gimple_omp_body (stmt));
5558 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
50674e96
DN
5559 continue;
5560
726a989a
RB
5561 tgsi = gsi_start (gimple_omp_body (stmt));
5562 body = NULL;
5563 for (i = 0; i < len; i++, gsi_next (&tgsi))
50674e96
DN
5564 {
5565 omp_context *sctx;
726a989a 5566 gimple sec_start;
50674e96 5567
726a989a 5568 sec_start = gsi_stmt (tgsi);
50674e96
DN
5569 sctx = maybe_lookup_ctx (sec_start);
5570 gcc_assert (sctx);
5571
726a989a 5572 gimple_seq_add_stmt (&body, sec_start);
777f7f9a 5573
726a989a
RB
5574 lower_omp (gimple_omp_body (sec_start), sctx);
5575 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5576 gimple_omp_set_body (sec_start, NULL);
50674e96
DN
5577
5578 if (i == len - 1)
5579 {
726a989a
RB
5580 gimple_seq l = NULL;
5581 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
50674e96 5582 &l, ctx);
726a989a
RB
5583 gimple_seq_add_seq (&body, l);
5584 gimple_omp_section_set_last (sec_start);
50674e96
DN
5585 }
5586
726a989a 5587 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
50674e96 5588 }
953ff289
DN
5589
5590 block = make_node (BLOCK);
726a989a 5591 bind = gimple_build_bind (NULL, body, block);
953ff289 5592
726a989a
RB
5593 olist = NULL;
5594 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
50674e96 5595
b357f682 5596 block = make_node (BLOCK);
726a989a 5597 new_stmt = gimple_build_bind (NULL, NULL, block);
50674e96 5598
b357f682 5599 pop_gimplify_context (new_stmt);
726a989a
RB
5600 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5601 BLOCK_VARS (block) = gimple_bind_vars (bind);
b357f682
JJ
5602 if (BLOCK_VARS (block))
5603 TREE_USED (block) = 1;
5604
726a989a
RB
5605 new_body = NULL;
5606 gimple_seq_add_seq (&new_body, ilist);
5607 gimple_seq_add_stmt (&new_body, stmt);
5608 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5609 gimple_seq_add_stmt (&new_body, bind);
777f7f9a 5610
e5c95afe 5611 control = create_tmp_var (unsigned_type_node, ".section");
726a989a
RB
5612 t = gimple_build_omp_continue (control, control);
5613 gimple_omp_sections_set_control (stmt, control);
5614 gimple_seq_add_stmt (&new_body, t);
777f7f9a 5615
726a989a
RB
5616 gimple_seq_add_seq (&new_body, olist);
5617 gimple_seq_add_seq (&new_body, dlist);
50674e96 5618
726a989a 5619 new_body = maybe_catch_exception (new_body);
4a31b7ee 5620
726a989a
RB
5621 t = gimple_build_omp_return
5622 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5623 OMP_CLAUSE_NOWAIT));
5624 gimple_seq_add_stmt (&new_body, t);
777f7f9a 5625
726a989a
RB
5626 gimple_bind_set_body (new_stmt, new_body);
5627 gimple_omp_set_body (stmt, NULL);
50674e96 5628
726a989a 5629 gsi_replace (gsi_p, new_stmt, true);
953ff289
DN
5630}
5631
5632
50674e96 5633/* A subroutine of lower_omp_single. Expand the simple form of
726a989a 5634 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
953ff289
DN
5635
5636 if (GOMP_single_start ())
5637 BODY;
5638 [ GOMP_barrier (); ] -> unless 'nowait' is present.
50674e96
DN
5639
5640 FIXME. It may be better to delay expanding the logic of this until
5641 pass_expand_omp. The expanded logic may make the job more difficult
5642 to a synchronization analysis pass. */
953ff289
DN
5643
5644static void
726a989a 5645lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
953ff289 5646{
c2255bc4
AH
5647 location_t loc = gimple_location (single_stmt);
5648 tree tlabel = create_artificial_label (loc);
5649 tree flabel = create_artificial_label (loc);
726a989a
RB
5650 gimple call, cond;
5651 tree lhs, decl;
5652
5653 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5654 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5655 call = gimple_build_call (decl, 0);
5656 gimple_call_set_lhs (call, lhs);
5657 gimple_seq_add_stmt (pre_p, call);
5658
5659 cond = gimple_build_cond (EQ_EXPR, lhs,
db3927fb
AH
5660 fold_convert_loc (loc, TREE_TYPE (lhs),
5661 boolean_true_node),
726a989a
RB
5662 tlabel, flabel);
5663 gimple_seq_add_stmt (pre_p, cond);
5664 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5665 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5666 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
953ff289
DN
5667}
5668
50674e96
DN
5669
5670/* A subroutine of lower_omp_single. Expand the simple form of
726a989a 5671 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
953ff289
DN
5672
5673 #pragma omp single copyprivate (a, b, c)
5674
5675 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5676
5677 {
5678 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5679 {
5680 BODY;
5681 copyout.a = a;
5682 copyout.b = b;
5683 copyout.c = c;
5684 GOMP_single_copy_end (&copyout);
5685 }
5686 else
5687 {
5688 a = copyout_p->a;
5689 b = copyout_p->b;
5690 c = copyout_p->c;
5691 }
5692 GOMP_barrier ();
5693 }
50674e96
DN
5694
5695 FIXME. It may be better to delay expanding the logic of this until
5696 pass_expand_omp. The expanded logic may make the job more difficult
5697 to a synchronization analysis pass. */
953ff289
DN
5698
5699static void
726a989a 5700lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
953ff289 5701{
726a989a
RB
5702 tree ptr_type, t, l0, l1, l2;
5703 gimple_seq copyin_seq;
c2255bc4 5704 location_t loc = gimple_location (single_stmt);
953ff289
DN
5705
5706 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5707
5708 ptr_type = build_pointer_type (ctx->record_type);
5709 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5710
c2255bc4
AH
5711 l0 = create_artificial_label (loc);
5712 l1 = create_artificial_label (loc);
5713 l2 = create_artificial_label (loc);
953ff289 5714
db3927fb
AH
5715 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5716 t = fold_convert_loc (loc, ptr_type, t);
726a989a 5717 gimplify_assign (ctx->receiver_decl, t, pre_p);
953ff289
DN
5718
5719 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5720 build_int_cst (ptr_type, 0));
5721 t = build3 (COND_EXPR, void_type_node, t,
5722 build_and_jump (&l0), build_and_jump (&l1));
5723 gimplify_and_add (t, pre_p);
5724
726a989a 5725 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
953ff289 5726
726a989a 5727 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
953ff289
DN
5728
5729 copyin_seq = NULL;
726a989a 5730 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
953ff289
DN
5731 &copyin_seq, ctx);
5732
db3927fb
AH
5733 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5734 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
5735 1, t);
953ff289
DN
5736 gimplify_and_add (t, pre_p);
5737
5738 t = build_and_jump (&l2);
5739 gimplify_and_add (t, pre_p);
5740
726a989a 5741 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
953ff289 5742
726a989a 5743 gimple_seq_add_seq (pre_p, copyin_seq);
953ff289 5744
726a989a 5745 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
953ff289
DN
5746}
5747
50674e96 5748
953ff289
DN
5749/* Expand code for an OpenMP single directive. */
5750
5751static void
726a989a 5752lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
953ff289 5753{
726a989a
RB
5754 tree block;
5755 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5756 gimple_seq bind_body, dlist;
d406b663 5757 struct gimplify_ctx gctx;
953ff289 5758
d406b663 5759 push_gimplify_context (&gctx);
953ff289 5760
726a989a
RB
5761 bind_body = NULL;
5762 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5763 &bind_body, &dlist, ctx);
5764 lower_omp (gimple_omp_body (single_stmt), ctx);
953ff289 5765
726a989a 5766 gimple_seq_add_stmt (&bind_body, single_stmt);
953ff289
DN
5767
5768 if (ctx->record_type)
726a989a 5769 lower_omp_single_copy (single_stmt, &bind_body, ctx);
953ff289 5770 else
726a989a
RB
5771 lower_omp_single_simple (single_stmt, &bind_body);
5772
5773 gimple_omp_set_body (single_stmt, NULL);
953ff289 5774
726a989a 5775 gimple_seq_add_seq (&bind_body, dlist);
777f7f9a 5776
726a989a 5777 bind_body = maybe_catch_exception (bind_body);
777f7f9a 5778
726a989a
RB
5779 t = gimple_build_omp_return
5780 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5781 OMP_CLAUSE_NOWAIT));
5782 gimple_seq_add_stmt (&bind_body, t);
4a31b7ee 5783
726a989a
RB
5784 block = make_node (BLOCK);
5785 bind = gimple_build_bind (NULL, bind_body, block);
777f7f9a 5786
953ff289 5787 pop_gimplify_context (bind);
50674e96 5788
726a989a
RB
5789 gimple_bind_append_vars (bind, ctx->block_vars);
5790 BLOCK_VARS (block) = ctx->block_vars;
5791 gsi_replace (gsi_p, bind, true);
b357f682
JJ
5792 if (BLOCK_VARS (block))
5793 TREE_USED (block) = 1;
953ff289
DN
5794}
5795
50674e96 5796
953ff289
DN
5797/* Expand code for an OpenMP master directive. */
5798
5799static void
726a989a 5800lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
953ff289 5801{
726a989a
RB
5802 tree block, lab = NULL, x;
5803 gimple stmt = gsi_stmt (*gsi_p), bind;
db3927fb 5804 location_t loc = gimple_location (stmt);
726a989a 5805 gimple_seq tseq;
d406b663 5806 struct gimplify_ctx gctx;
953ff289 5807
d406b663 5808 push_gimplify_context (&gctx);
953ff289
DN
5809
5810 block = make_node (BLOCK);
726a989a
RB
5811 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5812 block);
777f7f9a 5813
db3927fb 5814 x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
953ff289
DN
5815 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5816 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
726a989a
RB
5817 tseq = NULL;
5818 gimplify_and_add (x, &tseq);
5819 gimple_bind_add_seq (bind, tseq);
953ff289 5820
726a989a
RB
5821 lower_omp (gimple_omp_body (stmt), ctx);
5822 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5823 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5824 gimple_omp_set_body (stmt, NULL);
953ff289 5825
726a989a 5826 gimple_bind_add_stmt (bind, gimple_build_label (lab));
777f7f9a 5827
726a989a 5828 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
777f7f9a 5829
953ff289 5830 pop_gimplify_context (bind);
50674e96 5831
726a989a
RB
5832 gimple_bind_append_vars (bind, ctx->block_vars);
5833 BLOCK_VARS (block) = ctx->block_vars;
5834 gsi_replace (gsi_p, bind, true);
953ff289
DN
5835}
5836
50674e96 5837
953ff289
DN
5838/* Expand code for an OpenMP ordered directive. */
5839
5840static void
726a989a 5841lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
953ff289 5842{
726a989a
RB
5843 tree block;
5844 gimple stmt = gsi_stmt (*gsi_p), bind, x;
d406b663 5845 struct gimplify_ctx gctx;
953ff289 5846
d406b663 5847 push_gimplify_context (&gctx);
953ff289
DN
5848
5849 block = make_node (BLOCK);
726a989a
RB
5850 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5851 block);
777f7f9a 5852
726a989a
RB
5853 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5854 gimple_bind_add_stmt (bind, x);
953ff289 5855
726a989a
RB
5856 lower_omp (gimple_omp_body (stmt), ctx);
5857 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5858 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5859 gimple_omp_set_body (stmt, NULL);
953ff289 5860
726a989a
RB
5861 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5862 gimple_bind_add_stmt (bind, x);
777f7f9a 5863
726a989a 5864 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
777f7f9a 5865
953ff289 5866 pop_gimplify_context (bind);
50674e96 5867
726a989a
RB
5868 gimple_bind_append_vars (bind, ctx->block_vars);
5869 BLOCK_VARS (block) = gimple_bind_vars (bind);
5870 gsi_replace (gsi_p, bind, true);
953ff289
DN
5871}
5872
953ff289 5873
726a989a 5874/* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
953ff289
DN
5875 substitution of a couple of function calls. But in the NAMED case,
5876 requires that languages coordinate a symbol name. It is therefore
5877 best put here in common code. */
5878
5879static GTY((param1_is (tree), param2_is (tree)))
5880 splay_tree critical_name_mutexes;
5881
5882static void
726a989a 5883lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
953ff289 5884{
726a989a
RB
5885 tree block;
5886 tree name, lock, unlock;
5887 gimple stmt = gsi_stmt (*gsi_p), bind;
db3927fb 5888 location_t loc = gimple_location (stmt);
726a989a 5889 gimple_seq tbody;
d406b663 5890 struct gimplify_ctx gctx;
953ff289 5891
726a989a 5892 name = gimple_omp_critical_name (stmt);
953ff289
DN
5893 if (name)
5894 {
5039610b 5895 tree decl;
953ff289
DN
5896 splay_tree_node n;
5897
5898 if (!critical_name_mutexes)
5899 critical_name_mutexes
5900 = splay_tree_new_ggc (splay_tree_compare_pointers);
5901
5902 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5903 if (n == NULL)
5904 {
5905 char *new_str;
5906
5907 decl = create_tmp_var_raw (ptr_type_node, NULL);
5908
5909 new_str = ACONCAT ((".gomp_critical_user_",
5910 IDENTIFIER_POINTER (name), NULL));
5911 DECL_NAME (decl) = get_identifier (new_str);
5912 TREE_PUBLIC (decl) = 1;
5913 TREE_STATIC (decl) = 1;
5914 DECL_COMMON (decl) = 1;
5915 DECL_ARTIFICIAL (decl) = 1;
5916 DECL_IGNORED_P (decl) = 1;
8a4a83ed 5917 varpool_finalize_decl (decl);
953ff289
DN
5918
5919 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5920 (splay_tree_value) decl);
5921 }
5922 else
5923 decl = (tree) n->value;
5924
953ff289 5925 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
db3927fb 5926 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
953ff289 5927
953ff289 5928 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
db3927fb
AH
5929 unlock = build_call_expr_loc (loc, unlock, 1,
5930 build_fold_addr_expr_loc (loc, decl));
953ff289
DN
5931 }
5932 else
5933 {
5934 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
db3927fb 5935 lock = build_call_expr_loc (loc, lock, 0);
953ff289
DN
5936
5937 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
db3927fb 5938 unlock = build_call_expr_loc (loc, unlock, 0);
953ff289
DN
5939 }
5940
d406b663 5941 push_gimplify_context (&gctx);
953ff289
DN
5942
5943 block = make_node (BLOCK);
726a989a 5944 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
777f7f9a 5945
726a989a
RB
5946 tbody = gimple_bind_body (bind);
5947 gimplify_and_add (lock, &tbody);
5948 gimple_bind_set_body (bind, tbody);
953ff289 5949
726a989a
RB
5950 lower_omp (gimple_omp_body (stmt), ctx);
5951 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5952 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5953 gimple_omp_set_body (stmt, NULL);
953ff289 5954
726a989a
RB
5955 tbody = gimple_bind_body (bind);
5956 gimplify_and_add (unlock, &tbody);
5957 gimple_bind_set_body (bind, tbody);
777f7f9a 5958
726a989a 5959 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
953ff289
DN
5960
5961 pop_gimplify_context (bind);
726a989a
RB
5962 gimple_bind_append_vars (bind, ctx->block_vars);
5963 BLOCK_VARS (block) = gimple_bind_vars (bind);
5964 gsi_replace (gsi_p, bind, true);
50674e96
DN
5965}
5966
5967
5968/* A subroutine of lower_omp_for. Generate code to emit the predicate
5969 for a lastprivate clause. Given a loop control predicate of (V
5970 cond N2), we gate the clause on (!(V cond N2)). The lowered form
3d55c64b
JJ
5971 is appended to *DLIST, iterator initialization is appended to
5972 *BODY_P. */
50674e96
DN
5973
5974static void
726a989a
RB
5975lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5976 gimple_seq *dlist, struct omp_context *ctx)
50674e96 5977{
726a989a 5978 tree clauses, cond, vinit;
50674e96 5979 enum tree_code cond_code;
726a989a 5980 gimple_seq stmts;
50674e96 5981
a68ab351 5982 cond_code = fd->loop.cond_code;
50674e96
DN
5983 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5984
5985 /* When possible, use a strict equality expression. This can let VRP
5986 type optimizations deduce the value and remove a copy. */
a68ab351 5987 if (host_integerp (fd->loop.step, 0))
50674e96 5988 {
a68ab351 5989 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
50674e96
DN
5990 if (step == 1 || step == -1)
5991 cond_code = EQ_EXPR;
5992 }
5993
a68ab351 5994 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
50674e96 5995
726a989a 5996 clauses = gimple_omp_for_clauses (fd->for_stmt);
3d55c64b
JJ
5997 stmts = NULL;
5998 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
726a989a 5999 if (!gimple_seq_empty_p (stmts))
3d55c64b 6000 {
726a989a 6001 gimple_seq_add_seq (&stmts, *dlist);
a68ab351 6002 *dlist = stmts;
3d55c64b
JJ
6003
6004 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
a68ab351 6005 vinit = fd->loop.n1;
3d55c64b 6006 if (cond_code == EQ_EXPR
a68ab351
JJ
6007 && host_integerp (fd->loop.n2, 0)
6008 && ! integer_zerop (fd->loop.n2))
6009 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
3d55c64b
JJ
6010
6011 /* Initialize the iterator variable, so that threads that don't execute
6012 any iterations don't execute the lastprivate clauses by accident. */
726a989a 6013 gimplify_assign (fd->loop.v, vinit, body_p);
3d55c64b 6014 }
50674e96
DN
6015}
6016
6017
6018/* Lower code for an OpenMP loop directive. */
6019
6020static void
726a989a 6021lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
50674e96 6022{
726a989a 6023 tree *rhs_p, block;
50674e96 6024 struct omp_for_data fd;
726a989a
RB
6025 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6026 gimple_seq omp_for_body, body, dlist, ilist;
6027 size_t i;
d406b663 6028 struct gimplify_ctx gctx;
50674e96 6029
d406b663 6030 push_gimplify_context (&gctx);
50674e96 6031
726a989a
RB
6032 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6033 lower_omp (gimple_omp_body (stmt), ctx);
50674e96 6034
b357f682 6035 block = make_node (BLOCK);
726a989a 6036 new_stmt = gimple_build_bind (NULL, NULL, block);
b357f682 6037
50674e96
DN
6038 /* Move declaration of temporaries in the loop body before we make
6039 it go away. */
726a989a
RB
6040 omp_for_body = gimple_omp_body (stmt);
6041 if (!gimple_seq_empty_p (omp_for_body)
6042 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6043 {
6044 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6045 gimple_bind_append_vars (new_stmt, vars);
6046 }
50674e96 6047
726a989a 6048 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
50674e96
DN
6049 ilist = NULL;
6050 dlist = NULL;
726a989a
RB
6051 body = NULL;
6052 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6053 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
50674e96
DN
6054
6055 /* Lower the header expressions. At this point, we can assume that
6056 the header is of the form:
6057
6058 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6059
6060 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6061 using the .omp_data_s mapping, if needed. */
726a989a 6062 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
a68ab351 6063 {
726a989a 6064 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
a68ab351 6065 if (!is_gimple_min_invariant (*rhs_p))
726a989a 6066 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
a68ab351 6067
726a989a 6068 rhs_p = gimple_omp_for_final_ptr (stmt, i);
a68ab351 6069 if (!is_gimple_min_invariant (*rhs_p))
726a989a 6070 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
a68ab351 6071
726a989a 6072 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
a68ab351 6073 if (!is_gimple_min_invariant (*rhs_p))
726a989a 6074 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
a68ab351 6075 }
50674e96
DN
6076
6077 /* Once lowered, extract the bounds and clauses. */
a68ab351 6078 extract_omp_for_data (stmt, &fd, NULL);
50674e96 6079
726a989a 6080 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
50674e96 6081
726a989a
RB
6082 gimple_seq_add_stmt (&body, stmt);
6083 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
777f7f9a 6084
726a989a
RB
6085 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6086 fd.loop.v));
777f7f9a 6087
50674e96 6088 /* After the loop, add exit clauses. */
726a989a
RB
6089 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6090 gimple_seq_add_seq (&body, dlist);
50674e96 6091
726a989a 6092 body = maybe_catch_exception (body);
4a31b7ee 6093
777f7f9a 6094 /* Region exit marker goes at the end of the loop body. */
726a989a 6095 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
50674e96 6096
b357f682 6097 pop_gimplify_context (new_stmt);
726a989a
RB
6098
6099 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6100 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
b357f682
JJ
6101 if (BLOCK_VARS (block))
6102 TREE_USED (block) = 1;
50674e96 6103
726a989a
RB
6104 gimple_bind_set_body (new_stmt, body);
6105 gimple_omp_set_body (stmt, NULL);
6106 gimple_omp_for_set_pre_body (stmt, NULL);
6107 gsi_replace (gsi_p, new_stmt, true);
953ff289
DN
6108}
6109
726a989a
RB
6110/* Callback for walk_stmts. Check if the current statement only contains
6111 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
69f1837b
JJ
6112
6113static tree
726a989a
RB
6114check_combined_parallel (gimple_stmt_iterator *gsi_p,
6115 bool *handled_ops_p,
6116 struct walk_stmt_info *wi)
69f1837b 6117{
d3bfe4de 6118 int *info = (int *) wi->info;
726a989a 6119 gimple stmt = gsi_stmt (*gsi_p);
69f1837b 6120
726a989a
RB
6121 *handled_ops_p = true;
6122 switch (gimple_code (stmt))
69f1837b 6123 {
726a989a
RB
6124 WALK_SUBSTMTS;
6125
6126 case GIMPLE_OMP_FOR:
6127 case GIMPLE_OMP_SECTIONS:
69f1837b
JJ
6128 *info = *info == 0 ? 1 : -1;
6129 break;
6130 default:
6131 *info = -1;
6132 break;
6133 }
6134 return NULL;
6135}
50674e96 6136
a68ab351
JJ
6137struct omp_taskcopy_context
6138{
6139 /* This field must be at the beginning, as we do "inheritance": Some
6140 callback functions for tree-inline.c (e.g., omp_copy_decl)
6141 receive a copy_body_data pointer that is up-casted to an
6142 omp_context pointer. */
6143 copy_body_data cb;
6144 omp_context *ctx;
6145};
6146
6147static tree
6148task_copyfn_copy_decl (tree var, copy_body_data *cb)
6149{
6150 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6151
6152 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6153 return create_tmp_var (TREE_TYPE (var), NULL);
6154
6155 return var;
6156}
6157
6158static tree
6159task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6160{
6161 tree name, new_fields = NULL, type, f;
6162
6163 type = lang_hooks.types.make_type (RECORD_TYPE);
6164 name = DECL_NAME (TYPE_NAME (orig_type));
c2255bc4
AH
6165 name = build_decl (gimple_location (tcctx->ctx->stmt),
6166 TYPE_DECL, name, type);
a68ab351
JJ
6167 TYPE_NAME (type) = name;
6168
6169 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6170 {
6171 tree new_f = copy_node (f);
6172 DECL_CONTEXT (new_f) = type;
6173 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6174 TREE_CHAIN (new_f) = new_fields;
726a989a
RB
6175 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6176 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6177 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6178 &tcctx->cb, NULL);
a68ab351
JJ
6179 new_fields = new_f;
6180 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6181 }
6182 TYPE_FIELDS (type) = nreverse (new_fields);
6183 layout_type (type);
6184 return type;
6185}
6186
6187/* Create task copyfn. */
6188
6189static void
726a989a 6190create_task_copyfn (gimple task_stmt, omp_context *ctx)
a68ab351
JJ
6191{
6192 struct function *child_cfun;
6193 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6194 tree record_type, srecord_type, bind, list;
6195 bool record_needs_remap = false, srecord_needs_remap = false;
6196 splay_tree_node n;
6197 struct omp_taskcopy_context tcctx;
d406b663 6198 struct gimplify_ctx gctx;
db3927fb 6199 location_t loc = gimple_location (task_stmt);
a68ab351 6200
726a989a 6201 child_fn = gimple_omp_task_copy_fn (task_stmt);
a68ab351
JJ
6202 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6203 gcc_assert (child_cfun->cfg == NULL);
6204 child_cfun->dont_save_pending_sizes_p = 1;
6205 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6206
6207 /* Reset DECL_CONTEXT on function arguments. */
6208 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6209 DECL_CONTEXT (t) = child_fn;
6210
6211 /* Populate the function. */
d406b663 6212 push_gimplify_context (&gctx);
a68ab351
JJ
6213 current_function_decl = child_fn;
6214
6215 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6216 TREE_SIDE_EFFECTS (bind) = 1;
6217 list = NULL;
6218 DECL_SAVED_TREE (child_fn) = bind;
726a989a 6219 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
a68ab351
JJ
6220
6221 /* Remap src and dst argument types if needed. */
6222 record_type = ctx->record_type;
6223 srecord_type = ctx->srecord_type;
6224 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6225 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6226 {
6227 record_needs_remap = true;
6228 break;
6229 }
6230 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6231 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6232 {
6233 srecord_needs_remap = true;
6234 break;
6235 }
6236
6237 if (record_needs_remap || srecord_needs_remap)
6238 {
6239 memset (&tcctx, '\0', sizeof (tcctx));
6240 tcctx.cb.src_fn = ctx->cb.src_fn;
6241 tcctx.cb.dst_fn = child_fn;
6242 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6243 tcctx.cb.dst_node = tcctx.cb.src_node;
6244 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6245 tcctx.cb.copy_decl = task_copyfn_copy_decl;
1d65f45c 6246 tcctx.cb.eh_lp_nr = 0;
a68ab351
JJ
6247 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6248 tcctx.cb.decl_map = pointer_map_create ();
6249 tcctx.ctx = ctx;
6250
6251 if (record_needs_remap)
6252 record_type = task_copyfn_remap_type (&tcctx, record_type);
6253 if (srecord_needs_remap)
6254 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6255 }
6256 else
6257 tcctx.cb.decl_map = NULL;
6258
6259 push_cfun (child_cfun);
6260
6261 arg = DECL_ARGUMENTS (child_fn);
6262 TREE_TYPE (arg) = build_pointer_type (record_type);
6263 sarg = TREE_CHAIN (arg);
6264 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6265
6266 /* First pass: initialize temporaries used in record_type and srecord_type
6267 sizes and field offsets. */
6268 if (tcctx.cb.decl_map)
726a989a 6269 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
a68ab351
JJ
6270 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6271 {
6272 tree *p;
6273
6274 decl = OMP_CLAUSE_DECL (c);
6275 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6276 if (p == NULL)
6277 continue;
6278 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6279 sf = (tree) n->value;
6280 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
db3927fb 6281 src = build_fold_indirect_ref_loc (loc, sarg);
a68ab351 6282 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
726a989a 6283 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
a68ab351
JJ
6284 append_to_statement_list (t, &list);
6285 }
6286
6287 /* Second pass: copy shared var pointers and copy construct non-VLA
6288 firstprivate vars. */
726a989a 6289 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
a68ab351
JJ
6290 switch (OMP_CLAUSE_CODE (c))
6291 {
6292 case OMP_CLAUSE_SHARED:
6293 decl = OMP_CLAUSE_DECL (c);
6294 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6295 if (n == NULL)
6296 break;
6297 f = (tree) n->value;
6298 if (tcctx.cb.decl_map)
6299 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6300 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6301 sf = (tree) n->value;
6302 if (tcctx.cb.decl_map)
6303 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
db3927fb 6304 src = build_fold_indirect_ref_loc (loc, sarg);
a68ab351 6305 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
db3927fb 6306 dst = build_fold_indirect_ref_loc (loc, arg);
a68ab351 6307 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
726a989a 6308 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
a68ab351
JJ
6309 append_to_statement_list (t, &list);
6310 break;
6311 case OMP_CLAUSE_FIRSTPRIVATE:
6312 decl = OMP_CLAUSE_DECL (c);
6313 if (is_variable_sized (decl))
6314 break;
6315 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6316 if (n == NULL)
6317 break;
6318 f = (tree) n->value;
6319 if (tcctx.cb.decl_map)
6320 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6321 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6322 if (n != NULL)
6323 {
6324 sf = (tree) n->value;
6325 if (tcctx.cb.decl_map)
6326 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
db3927fb 6327 src = build_fold_indirect_ref_loc (loc, sarg);
a68ab351
JJ
6328 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6329 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
db3927fb 6330 src = build_fold_indirect_ref_loc (loc, src);
a68ab351
JJ
6331 }
6332 else
6333 src = decl;
db3927fb 6334 dst = build_fold_indirect_ref_loc (loc, arg);
a68ab351
JJ
6335 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6336 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6337 append_to_statement_list (t, &list);
6338 break;
6339 case OMP_CLAUSE_PRIVATE:
6340 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6341 break;
6342 decl = OMP_CLAUSE_DECL (c);
6343 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6344 f = (tree) n->value;
6345 if (tcctx.cb.decl_map)
6346 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6347 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6348 if (n != NULL)
6349 {
6350 sf = (tree) n->value;
6351 if (tcctx.cb.decl_map)
6352 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
db3927fb 6353 src = build_fold_indirect_ref_loc (loc, sarg);
a68ab351
JJ
6354 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6355 if (use_pointer_for_field (decl, NULL))
db3927fb 6356 src = build_fold_indirect_ref_loc (loc, src);
a68ab351
JJ
6357 }
6358 else
6359 src = decl;
db3927fb 6360 dst = build_fold_indirect_ref_loc (loc, arg);
a68ab351 6361 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
726a989a 6362 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
a68ab351
JJ
6363 append_to_statement_list (t, &list);
6364 break;
6365 default:
6366 break;
6367 }
6368
6369 /* Last pass: handle VLA firstprivates. */
6370 if (tcctx.cb.decl_map)
726a989a 6371 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
a68ab351
JJ
6372 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6373 {
6374 tree ind, ptr, df;
6375
6376 decl = OMP_CLAUSE_DECL (c);
6377 if (!is_variable_sized (decl))
6378 continue;
6379 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6380 if (n == NULL)
6381 continue;
6382 f = (tree) n->value;
6383 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6384 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6385 ind = DECL_VALUE_EXPR (decl);
6386 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6387 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6388 n = splay_tree_lookup (ctx->sfield_map,
6389 (splay_tree_key) TREE_OPERAND (ind, 0));
6390 sf = (tree) n->value;
6391 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
db3927fb 6392 src = build_fold_indirect_ref_loc (loc, sarg);
a68ab351 6393 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
db3927fb
AH
6394 src = build_fold_indirect_ref_loc (loc, src);
6395 dst = build_fold_indirect_ref_loc (loc, arg);
a68ab351
JJ
6396 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6397 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6398 append_to_statement_list (t, &list);
6399 n = splay_tree_lookup (ctx->field_map,
6400 (splay_tree_key) TREE_OPERAND (ind, 0));
6401 df = (tree) n->value;
6402 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
db3927fb 6403 ptr = build_fold_indirect_ref_loc (loc, arg);
a68ab351 6404 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
726a989a 6405 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
db3927fb 6406 build_fold_addr_expr_loc (loc, dst));
a68ab351
JJ
6407 append_to_statement_list (t, &list);
6408 }
6409
6410 t = build1 (RETURN_EXPR, void_type_node, NULL);
6411 append_to_statement_list (t, &list);
6412
6413 if (tcctx.cb.decl_map)
6414 pointer_map_destroy (tcctx.cb.decl_map);
6415 pop_gimplify_context (NULL);
6416 BIND_EXPR_BODY (bind) = list;
6417 pop_cfun ();
6418 current_function_decl = ctx->cb.src_fn;
6419}
6420
726a989a
RB
6421/* Lower the OpenMP parallel or task directive in the current statement
6422 in GSI_P. CTX holds context information for the directive. */
50674e96
DN
6423
6424static void
726a989a 6425lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
50674e96 6426{
726a989a
RB
6427 tree clauses;
6428 tree child_fn, t;
6429 gimple stmt = gsi_stmt (*gsi_p);
6430 gimple par_bind, bind;
6431 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
d406b663 6432 struct gimplify_ctx gctx;
db3927fb 6433 location_t loc = gimple_location (stmt);
50674e96 6434
726a989a
RB
6435 clauses = gimple_omp_taskreg_clauses (stmt);
6436 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6437 par_body = gimple_bind_body (par_bind);
50674e96 6438 child_fn = ctx->cb.dst_fn;
726a989a
RB
6439 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6440 && !gimple_omp_parallel_combined_p (stmt))
69f1837b
JJ
6441 {
6442 struct walk_stmt_info wi;
6443 int ws_num = 0;
6444
6445 memset (&wi, 0, sizeof (wi));
69f1837b
JJ
6446 wi.info = &ws_num;
6447 wi.val_only = true;
726a989a 6448 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
69f1837b 6449 if (ws_num == 1)
726a989a 6450 gimple_omp_parallel_set_combined_p (stmt, true);
69f1837b 6451 }
a68ab351
JJ
6452 if (ctx->srecord_type)
6453 create_task_copyfn (stmt, ctx);
50674e96 6454
d406b663 6455 push_gimplify_context (&gctx);
50674e96 6456
726a989a
RB
6457 par_olist = NULL;
6458 par_ilist = NULL;
50674e96 6459 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
726a989a
RB
6460 lower_omp (par_body, ctx);
6461 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
a68ab351 6462 lower_reduction_clauses (clauses, &par_olist, ctx);
50674e96
DN
6463
6464 /* Declare all the variables created by mapping and the variables
6465 declared in the scope of the parallel body. */
6466 record_vars_into (ctx->block_vars, child_fn);
726a989a 6467 record_vars_into (gimple_bind_vars (par_bind), child_fn);
50674e96
DN
6468
6469 if (ctx->record_type)
6470 {
a68ab351
JJ
6471 ctx->sender_decl
6472 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6473 : ctx->record_type, ".omp_data_o");
628c189e 6474 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
726a989a 6475 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
50674e96
DN
6476 }
6477
726a989a
RB
6478 olist = NULL;
6479 ilist = NULL;
50674e96
DN
6480 lower_send_clauses (clauses, &ilist, &olist, ctx);
6481 lower_send_shared_vars (&ilist, &olist, ctx);
6482
6483 /* Once all the expansions are done, sequence all the different
726a989a 6484 fragments inside gimple_omp_body. */
50674e96 6485
726a989a 6486 new_body = NULL;
50674e96
DN
6487
6488 if (ctx->record_type)
6489 {
db3927fb 6490 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
018b899b 6491 /* fixup_child_record_type might have changed receiver_decl's type. */
db3927fb 6492 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
726a989a
RB
6493 gimple_seq_add_stmt (&new_body,
6494 gimple_build_assign (ctx->receiver_decl, t));
50674e96
DN
6495 }
6496
726a989a
RB
6497 gimple_seq_add_seq (&new_body, par_ilist);
6498 gimple_seq_add_seq (&new_body, par_body);
6499 gimple_seq_add_seq (&new_body, par_olist);
6500 new_body = maybe_catch_exception (new_body);
6501 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6502 gimple_omp_set_body (stmt, new_body);
50674e96 6503
726a989a
RB
6504 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6505 gimple_bind_add_stmt (bind, stmt);
b357f682
JJ
6506 if (ilist || olist)
6507 {
726a989a
RB
6508 gimple_seq_add_stmt (&ilist, bind);
6509 gimple_seq_add_seq (&ilist, olist);
6510 bind = gimple_build_bind (NULL, ilist, NULL);
b357f682 6511 }
50674e96 6512
726a989a 6513 gsi_replace (gsi_p, bind, true);
50674e96 6514
726a989a 6515 pop_gimplify_context (NULL);
50674e96
DN
6516}
6517
d0fb20be 6518/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
726a989a
RB
6519 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6520 of OpenMP context, but with task_shared_vars set. */
75a4c3c1
AP
6521
6522static tree
726a989a
RB
6523lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6524 void *data)
75a4c3c1 6525{
d0fb20be 6526 tree t = *tp;
75a4c3c1 6527
d0fb20be 6528 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
726a989a 6529 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
b826efd9
JJ
6530 return t;
6531
6532 if (task_shared_vars
6533 && DECL_P (t)
6534 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
d0fb20be 6535 return t;
75a4c3c1 6536
d0fb20be
JJ
6537 /* If a global variable has been privatized, TREE_CONSTANT on
6538 ADDR_EXPR might be wrong. */
726a989a 6539 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
d0fb20be 6540 recompute_tree_invariant_for_addr_expr (t);
75a4c3c1 6541
d0fb20be
JJ
6542 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6543 return NULL_TREE;
75a4c3c1 6544}
50674e96 6545
d0fb20be 6546static void
726a989a 6547lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
953ff289 6548{
726a989a
RB
6549 gimple stmt = gsi_stmt (*gsi_p);
6550 struct walk_stmt_info wi;
953ff289 6551
726a989a
RB
6552 if (gimple_has_location (stmt))
6553 input_location = gimple_location (stmt);
d0fb20be 6554
726a989a
RB
6555 if (task_shared_vars)
6556 memset (&wi, '\0', sizeof (wi));
d0fb20be 6557
50674e96
DN
6558 /* If we have issued syntax errors, avoid doing any heavy lifting.
6559 Just replace the OpenMP directives with a NOP to avoid
6560 confusing RTL expansion. */
726a989a 6561 if (errorcount && is_gimple_omp (stmt))
50674e96 6562 {
726a989a 6563 gsi_replace (gsi_p, gimple_build_nop (), true);
d0fb20be 6564 return;
50674e96
DN
6565 }
6566
726a989a 6567 switch (gimple_code (stmt))
953ff289 6568 {
726a989a 6569 case GIMPLE_COND:
a68ab351 6570 if ((ctx || task_shared_vars)
726a989a
RB
6571 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6572 ctx ? NULL : &wi, NULL)
6573 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6574 ctx ? NULL : &wi, NULL)))
6575 gimple_regimplify_operands (stmt, gsi_p);
d0fb20be 6576 break;
726a989a
RB
6577 case GIMPLE_CATCH:
6578 lower_omp (gimple_catch_handler (stmt), ctx);
d0fb20be 6579 break;
726a989a
RB
6580 case GIMPLE_EH_FILTER:
6581 lower_omp (gimple_eh_filter_failure (stmt), ctx);
d0fb20be 6582 break;
726a989a
RB
6583 case GIMPLE_TRY:
6584 lower_omp (gimple_try_eval (stmt), ctx);
6585 lower_omp (gimple_try_cleanup (stmt), ctx);
d0fb20be 6586 break;
726a989a
RB
6587 case GIMPLE_BIND:
6588 lower_omp (gimple_bind_body (stmt), ctx);
d0fb20be 6589 break;
726a989a
RB
6590 case GIMPLE_OMP_PARALLEL:
6591 case GIMPLE_OMP_TASK:
6592 ctx = maybe_lookup_ctx (stmt);
6593 lower_omp_taskreg (gsi_p, ctx);
d0fb20be 6594 break;
726a989a
RB
6595 case GIMPLE_OMP_FOR:
6596 ctx = maybe_lookup_ctx (stmt);
953ff289 6597 gcc_assert (ctx);
726a989a 6598 lower_omp_for (gsi_p, ctx);
953ff289 6599 break;
726a989a
RB
6600 case GIMPLE_OMP_SECTIONS:
6601 ctx = maybe_lookup_ctx (stmt);
953ff289 6602 gcc_assert (ctx);
726a989a 6603 lower_omp_sections (gsi_p, ctx);
953ff289 6604 break;
726a989a
RB
6605 case GIMPLE_OMP_SINGLE:
6606 ctx = maybe_lookup_ctx (stmt);
953ff289 6607 gcc_assert (ctx);
726a989a 6608 lower_omp_single (gsi_p, ctx);
953ff289 6609 break;
726a989a
RB
6610 case GIMPLE_OMP_MASTER:
6611 ctx = maybe_lookup_ctx (stmt);
953ff289 6612 gcc_assert (ctx);
726a989a 6613 lower_omp_master (gsi_p, ctx);
953ff289 6614 break;
726a989a
RB
6615 case GIMPLE_OMP_ORDERED:
6616 ctx = maybe_lookup_ctx (stmt);
953ff289 6617 gcc_assert (ctx);
726a989a 6618 lower_omp_ordered (gsi_p, ctx);
953ff289 6619 break;
726a989a
RB
6620 case GIMPLE_OMP_CRITICAL:
6621 ctx = maybe_lookup_ctx (stmt);
953ff289 6622 gcc_assert (ctx);
726a989a
RB
6623 lower_omp_critical (gsi_p, ctx);
6624 break;
6625 case GIMPLE_OMP_ATOMIC_LOAD:
6626 if ((ctx || task_shared_vars)
6627 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6628 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6629 gimple_regimplify_operands (stmt, gsi_p);
953ff289 6630 break;
d0fb20be 6631 default:
a68ab351 6632 if ((ctx || task_shared_vars)
726a989a
RB
6633 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6634 ctx ? NULL : &wi))
6635 gimple_regimplify_operands (stmt, gsi_p);
953ff289 6636 break;
953ff289 6637 }
953ff289
DN
6638}
6639
6640static void
726a989a 6641lower_omp (gimple_seq body, omp_context *ctx)
953ff289 6642{
b357f682 6643 location_t saved_location = input_location;
726a989a
RB
6644 gimple_stmt_iterator gsi = gsi_start (body);
6645 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6646 lower_omp_1 (&gsi, ctx);
b357f682 6647 input_location = saved_location;
953ff289
DN
6648}
6649\f
6650/* Main entry point. */
6651
c2924966 6652static unsigned int
953ff289
DN
6653execute_lower_omp (void)
6654{
726a989a
RB
6655 gimple_seq body;
6656
535b544a
SB
6657 /* This pass always runs, to provide PROP_gimple_lomp.
6658 But there is nothing to do unless -fopenmp is given. */
6659 if (flag_openmp == 0)
6660 return 0;
6661
953ff289
DN
6662 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6663 delete_omp_context);
6664
726a989a
RB
6665 body = gimple_body (current_function_decl);
6666 scan_omp (body, NULL);
a68ab351 6667 gcc_assert (taskreg_nesting_level == 0);
953ff289
DN
6668
6669 if (all_contexts->root)
a68ab351 6670 {
d406b663
JJ
6671 struct gimplify_ctx gctx;
6672
a68ab351 6673 if (task_shared_vars)
d406b663 6674 push_gimplify_context (&gctx);
726a989a 6675 lower_omp (body, NULL);
a68ab351
JJ
6676 if (task_shared_vars)
6677 pop_gimplify_context (NULL);
6678 }
953ff289 6679
50674e96
DN
6680 if (all_contexts)
6681 {
6682 splay_tree_delete (all_contexts);
6683 all_contexts = NULL;
6684 }
a68ab351 6685 BITMAP_FREE (task_shared_vars);
c2924966 6686 return 0;
953ff289
DN
6687}
6688
8ddbbcae 6689struct gimple_opt_pass pass_lower_omp =
953ff289 6690{
8ddbbcae
JH
6691 {
6692 GIMPLE_PASS,
953ff289 6693 "omplower", /* name */
535b544a 6694 NULL, /* gate */
953ff289
DN
6695 execute_lower_omp, /* execute */
6696 NULL, /* sub */
6697 NULL, /* next */
6698 0, /* static_pass_number */
7072a650 6699 TV_NONE, /* tv_id */
953ff289
DN
6700 PROP_gimple_any, /* properties_required */
6701 PROP_gimple_lomp, /* properties_provided */
6702 0, /* properties_destroyed */
6703 0, /* todo_flags_start */
8ddbbcae
JH
6704 TODO_dump_func /* todo_flags_finish */
6705 }
953ff289 6706};
953ff289
DN
6707\f
6708/* The following is a utility to diagnose OpenMP structured block violations.
777f7f9a
RH
6709 It is not part of the "omplower" pass, as that's invoked too late. It
6710 should be invoked by the respective front ends after gimplification. */
953ff289
DN
6711
6712static splay_tree all_labels;
6713
6714/* Check for mismatched contexts and generate an error if needed. Return
6715 true if an error is detected. */
6716
6717static bool
726a989a
RB
6718diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6719 gimple branch_ctx, gimple label_ctx)
953ff289 6720{
726a989a 6721 if (label_ctx == branch_ctx)
953ff289
DN
6722 return false;
6723
726a989a
RB
6724
6725 /*
6726 Previously we kept track of the label's entire context in diagnose_sb_[12]
6727 so we could traverse it and issue a correct "exit" or "enter" error
6728 message upon a structured block violation.
6729
6730 We built the context by building a list with tree_cons'ing, but there is
6731 no easy counterpart in gimple tuples. It seems like far too much work
6732 for issuing exit/enter error messages. If someone really misses the
6733 distinct error message... patches welcome.
6734 */
6735
6736#if 0
953ff289 6737 /* Try to avoid confusing the user by producing and error message
fa10beec 6738 with correct "exit" or "enter" verbiage. We prefer "exit"
953ff289
DN
6739 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6740 if (branch_ctx == NULL)
6741 exit_p = false;
6742 else
6743 {
6744 while (label_ctx)
6745 {
6746 if (TREE_VALUE (label_ctx) == branch_ctx)
6747 {
6748 exit_p = false;
6749 break;
6750 }
6751 label_ctx = TREE_CHAIN (label_ctx);
6752 }
6753 }
6754
6755 if (exit_p)
6756 error ("invalid exit from OpenMP structured block");
6757 else
6758 error ("invalid entry to OpenMP structured block");
726a989a 6759#endif
953ff289 6760
726a989a
RB
6761 /* If it's obvious we have an invalid entry, be specific about the error. */
6762 if (branch_ctx == NULL)
6763 error ("invalid entry to OpenMP structured block");
6764 else
6765 /* Otherwise, be vague and lazy, but efficient. */
6766 error ("invalid branch to/from an OpenMP structured block");
6767
6768 gsi_replace (gsi_p, gimple_build_nop (), false);
953ff289
DN
6769 return true;
6770}
6771
6772/* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
726a989a 6773 where each label is found. */
953ff289
DN
6774
6775static tree
726a989a
RB
6776diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6777 struct walk_stmt_info *wi)
953ff289 6778{
726a989a
RB
6779 gimple context = (gimple) wi->info;
6780 gimple inner_context;
6781 gimple stmt = gsi_stmt (*gsi_p);
953ff289 6782
726a989a
RB
6783 *handled_ops_p = true;
6784
6785 switch (gimple_code (stmt))
953ff289 6786 {
726a989a
RB
6787 WALK_SUBSTMTS;
6788
6789 case GIMPLE_OMP_PARALLEL:
6790 case GIMPLE_OMP_TASK:
6791 case GIMPLE_OMP_SECTIONS:
6792 case GIMPLE_OMP_SINGLE:
6793 case GIMPLE_OMP_SECTION:
6794 case GIMPLE_OMP_MASTER:
6795 case GIMPLE_OMP_ORDERED:
6796 case GIMPLE_OMP_CRITICAL:
6797 /* The minimal context here is just the current OMP construct. */
6798 inner_context = stmt;
953ff289 6799 wi->info = inner_context;
726a989a 6800 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
953ff289
DN
6801 wi->info = context;
6802 break;
6803
726a989a
RB
6804 case GIMPLE_OMP_FOR:
6805 inner_context = stmt;
953ff289 6806 wi->info = inner_context;
726a989a
RB
6807 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6808 walk them. */
6809 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6810 diagnose_sb_1, NULL, wi);
6811 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
953ff289
DN
6812 wi->info = context;
6813 break;
6814
726a989a
RB
6815 case GIMPLE_LABEL:
6816 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
953ff289
DN
6817 (splay_tree_value) context);
6818 break;
6819
6820 default:
6821 break;
6822 }
6823
6824 return NULL_TREE;
6825}
6826
6827/* Pass 2: Check each branch and see if its context differs from that of
6828 the destination label's context. */
6829
6830static tree
726a989a
RB
6831diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6832 struct walk_stmt_info *wi)
953ff289 6833{
726a989a 6834 gimple context = (gimple) wi->info;
953ff289 6835 splay_tree_node n;
726a989a 6836 gimple stmt = gsi_stmt (*gsi_p);
953ff289 6837
726a989a
RB
6838 *handled_ops_p = true;
6839
6840 switch (gimple_code (stmt))
953ff289 6841 {
726a989a
RB
6842 WALK_SUBSTMTS;
6843
6844 case GIMPLE_OMP_PARALLEL:
6845 case GIMPLE_OMP_TASK:
6846 case GIMPLE_OMP_SECTIONS:
6847 case GIMPLE_OMP_SINGLE:
6848 case GIMPLE_OMP_SECTION:
6849 case GIMPLE_OMP_MASTER:
6850 case GIMPLE_OMP_ORDERED:
6851 case GIMPLE_OMP_CRITICAL:
6852 wi->info = stmt;
6853 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
953ff289
DN
6854 wi->info = context;
6855 break;
6856
726a989a
RB
6857 case GIMPLE_OMP_FOR:
6858 wi->info = stmt;
6859 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6860 walk them. */
6861 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6862 diagnose_sb_2, NULL, wi);
6863 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
953ff289
DN
6864 wi->info = context;
6865 break;
6866
726a989a 6867 case GIMPLE_GOTO:
953ff289 6868 {
726a989a 6869 tree lab = gimple_goto_dest (stmt);
953ff289
DN
6870 if (TREE_CODE (lab) != LABEL_DECL)
6871 break;
6872
6873 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
726a989a 6874 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
953ff289
DN
6875 }
6876 break;
6877
726a989a 6878 case GIMPLE_SWITCH:
953ff289 6879 {
726a989a
RB
6880 unsigned int i;
6881 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
953ff289 6882 {
726a989a 6883 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
953ff289 6884 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
726a989a 6885 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
953ff289
DN
6886 break;
6887 }
6888 }
6889 break;
6890
726a989a
RB
6891 case GIMPLE_RETURN:
6892 diagnose_sb_0 (gsi_p, context, NULL);
953ff289
DN
6893 break;
6894
6895 default:
6896 break;
6897 }
6898
6899 return NULL_TREE;
6900}
6901
a406865a
RG
6902static unsigned int
6903diagnose_omp_structured_block_errors (void)
953ff289 6904{
953ff289 6905 struct walk_stmt_info wi;
a406865a 6906 gimple_seq body = gimple_body (current_function_decl);
953ff289
DN
6907
6908 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6909
6910 memset (&wi, 0, sizeof (wi));
726a989a 6911 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
953ff289
DN
6912
6913 memset (&wi, 0, sizeof (wi));
953ff289 6914 wi.want_locations = true;
726a989a 6915 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
953ff289
DN
6916
6917 splay_tree_delete (all_labels);
6918 all_labels = NULL;
6919
a406865a 6920 return 0;
953ff289
DN
6921}
6922
a406865a
RG
6923static bool
6924gate_diagnose_omp_blocks (void)
6925{
6926 return flag_openmp != 0;
6927}
6928
6929struct gimple_opt_pass pass_diagnose_omp_blocks =
6930{
6931 {
6932 GIMPLE_PASS,
2329c6f5 6933 "*diagnose_omp_blocks", /* name */
a406865a
RG
6934 gate_diagnose_omp_blocks, /* gate */
6935 diagnose_omp_structured_block_errors, /* execute */
6936 NULL, /* sub */
6937 NULL, /* next */
6938 0, /* static_pass_number */
6939 TV_NONE, /* tv_id */
6940 PROP_gimple_any, /* properties_required */
6941 0, /* properties_provided */
6942 0, /* properties_destroyed */
6943 0, /* todo_flags_start */
6944 0, /* todo_flags_finish */
6945 }
6946};
6947
953ff289 6948#include "gt-omp-low.h"