]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/omp-low.c
Fix long lines.
[thirdparty/gcc.git] / gcc / omp-low.c
CommitLineData
1e8e9920 1/* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
fbe86b1b 6 Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
1e8e9920 7
8This file is part of GCC.
9
10GCC is free software; you can redistribute it and/or modify it under
11the terms of the GNU General Public License as published by the Free
8c4c00c1 12Software Foundation; either version 3, or (at your option) any later
1e8e9920 13version.
14
15GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16WARRANTY; without even the implied warranty of MERCHANTABILITY or
17FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18for more details.
19
20You should have received a copy of the GNU General Public License
8c4c00c1 21along with GCC; see the file COPYING3. If not see
22<http://www.gnu.org/licenses/>. */
1e8e9920 23
24#include "config.h"
25#include "system.h"
26#include "coretypes.h"
27#include "tm.h"
28#include "tree.h"
29#include "rtl.h"
75a70cf9 30#include "gimple.h"
31#include "tree-iterator.h"
1e8e9920 32#include "tree-inline.h"
33#include "langhooks.h"
34#include "diagnostic.h"
35#include "tree-flow.h"
36#include "timevar.h"
37#include "flags.h"
38#include "function.h"
39#include "expr.h"
40#include "toplev.h"
41#include "tree-pass.h"
42#include "ggc.h"
43#include "except.h"
e3022db7 44#include "splay-tree.h"
cb7f680b 45#include "optabs.h"
46#include "cfgloop.h"
1e8e9920 47
75a70cf9 48
1e8e9920 49/* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
334ec2d8 53 re-gimplifying things when variables have been replaced with complex
1e8e9920 54 expressions.
55
d134bccc 56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
1e8e9920 59
60/* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63typedef struct omp_context
64{
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
75a70cf9 73 gimple stmt;
1e8e9920 74
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
fd6481cf 82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
1e8e9920 90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
1e8e9920 103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
1e8e9920 105} omp_context;
106
107
fd6481cf 108struct omp_for_data_loop
109{
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112};
113
773c5ba7 114/* A structure describing the main elements of a parallel loop. */
1e8e9920 115
773c5ba7 116struct omp_for_data
1e8e9920 117{
fd6481cf 118 struct omp_for_data_loop loop;
75a70cf9 119 tree chunk_size;
120 gimple for_stmt;
fd6481cf 121 tree pre, iter_type;
122 int collapse;
1e8e9920 123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
fd6481cf 125 struct omp_for_data_loop *loops;
1e8e9920 126};
127
773c5ba7 128
1e8e9920 129static splay_tree all_contexts;
fd6481cf 130static int taskreg_nesting_level;
61e47ac8 131struct omp_region *root_omp_region;
fd6481cf 132static bitmap task_shared_vars;
1e8e9920 133
75a70cf9 134static void scan_omp (gimple_seq, omp_context *);
135static tree scan_omp_1_op (tree *, int *, void *);
136
137#define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
145
146/* Convenience function for calling scan_omp_1_op on tree operands. */
147
148static inline tree
149scan_omp_op (tree *tp, omp_context *ctx)
150{
151 struct walk_stmt_info wi;
152
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
156
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158}
159
160static void lower_omp (gimple_seq, omp_context *);
f49d7bb5 161static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
1e8e9920 163
164/* Find an OpenMP clause of type KIND within CLAUSES. */
165
79acaae1 166tree
590c3166 167find_omp_clause (tree clauses, enum omp_clause_code kind)
1e8e9920 168{
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
55d6e7cd 170 if (OMP_CLAUSE_CODE (clauses) == kind)
1e8e9920 171 return clauses;
172
173 return NULL_TREE;
174}
175
176/* Return true if CTX is for an omp parallel. */
177
178static inline bool
179is_parallel_ctx (omp_context *ctx)
180{
75a70cf9 181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
1e8e9920 182}
183
773c5ba7 184
fd6481cf 185/* Return true if CTX is for an omp task. */
186
187static inline bool
188is_task_ctx (omp_context *ctx)
189{
75a70cf9 190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
fd6481cf 191}
192
193
194/* Return true if CTX is for an omp parallel or omp task. */
195
196static inline bool
197is_taskreg_ctx (omp_context *ctx)
198{
75a70cf9 199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
fd6481cf 201}
202
203
773c5ba7 204/* Return true if REGION is a combined parallel+workshare region. */
1e8e9920 205
206static inline bool
773c5ba7 207is_combined_parallel (struct omp_region *region)
208{
209 return region->is_combined_parallel;
210}
211
212
213/* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
215
216static void
75a70cf9 217extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
fd6481cf 218 struct omp_for_data_loop *loops)
773c5ba7 219{
fd6481cf 220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
773c5ba7 225
226 fd->for_stmt = for_stmt;
227 fd->pre = NULL;
75a70cf9 228 fd->collapse = gimple_omp_for_collapse (for_stmt);
fd6481cf 229 if (fd->collapse > 1)
230 fd->loops = loops;
231 else
232 fd->loops = &fd->loop;
773c5ba7 233
234 fd->have_nowait = fd->have_ordered = false;
235 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
236 fd->chunk_size = NULL_TREE;
fd6481cf 237 collapse_iter = NULL;
238 collapse_count = NULL;
773c5ba7 239
75a70cf9 240 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
55d6e7cd 241 switch (OMP_CLAUSE_CODE (t))
773c5ba7 242 {
243 case OMP_CLAUSE_NOWAIT:
244 fd->have_nowait = true;
245 break;
246 case OMP_CLAUSE_ORDERED:
247 fd->have_ordered = true;
248 break;
249 case OMP_CLAUSE_SCHEDULE:
250 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
251 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
252 break;
fd6481cf 253 case OMP_CLAUSE_COLLAPSE:
254 if (fd->collapse > 1)
255 {
256 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
257 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
258 }
773c5ba7 259 default:
260 break;
261 }
262
fd6481cf 263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
bde357c8 266 is best) or if it varies (then schedule(dynamic,N) is better). */
fd6481cf 267 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
268 {
269 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
270 gcc_assert (fd->chunk_size == NULL);
271 }
272 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
773c5ba7 273 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
274 gcc_assert (fd->chunk_size == NULL);
275 else if (fd->chunk_size == NULL)
276 {
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
fd6481cf 279 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
280 || fd->have_ordered
281 || fd->collapse > 1)
773c5ba7 282 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
283 ? integer_zero_node : integer_one_node;
284 }
fd6481cf 285
286 for (i = 0; i < fd->collapse; i++)
287 {
288 if (fd->collapse == 1)
289 loop = &fd->loop;
290 else if (loops != NULL)
291 loop = loops + i;
292 else
293 loop = &dummy_loop;
294
75a70cf9 295
296 loop->v = gimple_omp_for_index (for_stmt, i);
fd6481cf 297 gcc_assert (SSA_VAR_P (loop->v));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
300 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
75a70cf9 301 loop->n1 = gimple_omp_for_initial (for_stmt, i);
fd6481cf 302
75a70cf9 303 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
304 loop->n2 = gimple_omp_for_final (for_stmt, i);
fd6481cf 305 switch (loop->cond_code)
306 {
307 case LT_EXPR:
308 case GT_EXPR:
309 break;
310 case LE_EXPR:
311 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
312 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
313 loop->n2, size_one_node);
314 else
315 loop->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
316 build_int_cst (TREE_TYPE (loop->n2), 1));
317 loop->cond_code = LT_EXPR;
318 break;
319 case GE_EXPR:
320 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
321 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
322 loop->n2, size_int (-1));
323 else
324 loop->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
325 build_int_cst (TREE_TYPE (loop->n2), 1));
326 loop->cond_code = GT_EXPR;
327 break;
328 default:
329 gcc_unreachable ();
330 }
331
75a70cf9 332 t = gimple_omp_for_incr (for_stmt, i);
fd6481cf 333 gcc_assert (TREE_OPERAND (t, 0) == var);
334 switch (TREE_CODE (t))
335 {
336 case PLUS_EXPR:
337 case POINTER_PLUS_EXPR:
338 loop->step = TREE_OPERAND (t, 1);
339 break;
340 case MINUS_EXPR:
341 loop->step = TREE_OPERAND (t, 1);
342 loop->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (loop->step),
343 loop->step);
344 break;
345 default:
346 gcc_unreachable ();
347 }
348
349 if (iter_type != long_long_unsigned_type_node)
350 {
351 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
352 iter_type = long_long_unsigned_type_node;
353 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
354 && TYPE_PRECISION (TREE_TYPE (loop->v))
355 >= TYPE_PRECISION (iter_type))
356 {
357 tree n;
358
359 if (loop->cond_code == LT_EXPR)
360 n = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
361 loop->n2, loop->step);
362 else
363 n = loop->n1;
364 if (TREE_CODE (n) != INTEGER_CST
365 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
366 iter_type = long_long_unsigned_type_node;
367 }
368 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
369 > TYPE_PRECISION (iter_type))
370 {
371 tree n1, n2;
372
373 if (loop->cond_code == LT_EXPR)
374 {
375 n1 = loop->n1;
376 n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
377 loop->n2, loop->step);
378 }
379 else
380 {
381 n1 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
383 n2 = loop->n1;
384 }
385 if (TREE_CODE (n1) != INTEGER_CST
386 || TREE_CODE (n2) != INTEGER_CST
387 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
388 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
389 iter_type = long_long_unsigned_type_node;
390 }
391 }
392
393 if (collapse_count && *collapse_count == NULL)
394 {
395 if ((i == 0 || count != NULL_TREE)
396 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
397 && TREE_CONSTANT (loop->n1)
398 && TREE_CONSTANT (loop->n2)
399 && TREE_CODE (loop->step) == INTEGER_CST)
400 {
401 tree itype = TREE_TYPE (loop->v);
402
403 if (POINTER_TYPE_P (itype))
404 itype
405 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
406 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
407 t = fold_build2 (PLUS_EXPR, itype,
408 fold_convert (itype, loop->step), t);
409 t = fold_build2 (PLUS_EXPR, itype, t,
410 fold_convert (itype, loop->n2));
411 t = fold_build2 (MINUS_EXPR, itype, t,
412 fold_convert (itype, loop->n1));
413 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
414 t = fold_build2 (TRUNC_DIV_EXPR, itype,
415 fold_build1 (NEGATE_EXPR, itype, t),
416 fold_build1 (NEGATE_EXPR, itype,
417 fold_convert (itype,
418 loop->step)));
419 else
420 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
421 fold_convert (itype, loop->step));
422 t = fold_convert (long_long_unsigned_type_node, t);
423 if (count != NULL_TREE)
424 count = fold_build2 (MULT_EXPR, long_long_unsigned_type_node,
425 count, t);
426 else
427 count = t;
428 if (TREE_CODE (count) != INTEGER_CST)
429 count = NULL_TREE;
430 }
431 else
432 count = NULL_TREE;
433 }
434 }
435
436 if (count)
437 {
438 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
439 iter_type = long_long_unsigned_type_node;
440 else
441 iter_type = long_integer_type_node;
442 }
443 else if (collapse_iter && *collapse_iter != NULL)
444 iter_type = TREE_TYPE (*collapse_iter);
445 fd->iter_type = iter_type;
446 if (collapse_iter && *collapse_iter == NULL)
447 *collapse_iter = create_tmp_var (iter_type, ".iter");
448 if (collapse_count && *collapse_count == NULL)
449 {
450 if (count)
451 *collapse_count = fold_convert (iter_type, count);
452 else
453 *collapse_count = create_tmp_var (iter_type, ".count");
454 }
455
456 if (fd->collapse > 1)
457 {
458 fd->loop.v = *collapse_iter;
459 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
460 fd->loop.n2 = *collapse_count;
461 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
462 fd->loop.cond_code = LT_EXPR;
463 }
773c5ba7 464}
465
466
467/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
468 is the immediate dominator of PAR_ENTRY_BB, return true if there
469 are no data dependencies that would prevent expanding the parallel
470 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
471
472 When expanding a combined parallel+workshare region, the call to
473 the child function may need additional arguments in the case of
75a70cf9 474 GIMPLE_OMP_FOR regions. In some cases, these arguments are
475 computed out of variables passed in from the parent to the child
476 via 'struct .omp_data_s'. For instance:
773c5ba7 477
478 #pragma omp parallel for schedule (guided, i * 4)
479 for (j ...)
480
481 Is lowered into:
482
483 # BLOCK 2 (PAR_ENTRY_BB)
484 .omp_data_o.i = i;
485 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
486
487 # BLOCK 3 (WS_ENTRY_BB)
488 .omp_data_i = &.omp_data_o;
489 D.1667 = .omp_data_i->i;
490 D.1598 = D.1667 * 4;
491 #pragma omp for schedule (guided, D.1598)
492
493 When we outline the parallel region, the call to the child function
494 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
495 that value is computed *after* the call site. So, in principle we
496 cannot do the transformation.
497
498 To see whether the code in WS_ENTRY_BB blocks the combined
499 parallel+workshare call, we collect all the variables used in the
75a70cf9 500 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
773c5ba7 501 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
502 call.
503
504 FIXME. If we had the SSA form built at this point, we could merely
505 hoist the code in block 3 into block 2 and be done with it. But at
506 this point we don't have dataflow information and though we could
507 hack something up here, it is really not worth the aggravation. */
508
509static bool
510workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
511{
512 struct omp_for_data fd;
75a70cf9 513 gimple par_stmt, ws_stmt;
773c5ba7 514
515 par_stmt = last_stmt (par_entry_bb);
516 ws_stmt = last_stmt (ws_entry_bb);
517
75a70cf9 518 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
773c5ba7 519 return true;
520
75a70cf9 521 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
773c5ba7 522
fd6481cf 523 extract_omp_for_data (ws_stmt, &fd, NULL);
524
525 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
526 return false;
527 if (fd.iter_type != long_integer_type_node)
528 return false;
773c5ba7 529
530 /* FIXME. We give up too easily here. If any of these arguments
531 are not constants, they will likely involve variables that have
532 been mapped into fields of .omp_data_s for sharing with the child
533 function. With appropriate data flow, it would be possible to
534 see through this. */
fd6481cf 535 if (!is_gimple_min_invariant (fd.loop.n1)
536 || !is_gimple_min_invariant (fd.loop.n2)
537 || !is_gimple_min_invariant (fd.loop.step)
773c5ba7 538 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
539 return false;
540
541 return true;
542}
543
544
545/* Collect additional arguments needed to emit a combined
546 parallel+workshare call. WS_STMT is the workshare directive being
547 expanded. */
548
549static tree
75a70cf9 550get_ws_args_for (gimple ws_stmt)
773c5ba7 551{
552 tree t;
553
75a70cf9 554 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
773c5ba7 555 {
556 struct omp_for_data fd;
557 tree ws_args;
558
fd6481cf 559 extract_omp_for_data (ws_stmt, &fd, NULL);
773c5ba7 560
561 ws_args = NULL_TREE;
562 if (fd.chunk_size)
563 {
564 t = fold_convert (long_integer_type_node, fd.chunk_size);
565 ws_args = tree_cons (NULL, t, ws_args);
566 }
567
fd6481cf 568 t = fold_convert (long_integer_type_node, fd.loop.step);
773c5ba7 569 ws_args = tree_cons (NULL, t, ws_args);
570
fd6481cf 571 t = fold_convert (long_integer_type_node, fd.loop.n2);
773c5ba7 572 ws_args = tree_cons (NULL, t, ws_args);
573
fd6481cf 574 t = fold_convert (long_integer_type_node, fd.loop.n1);
773c5ba7 575 ws_args = tree_cons (NULL, t, ws_args);
576
577 return ws_args;
578 }
75a70cf9 579 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
773c5ba7 580 {
ac6e3339 581 /* Number of sections is equal to the number of edges from the
75a70cf9 582 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
583 the exit of the sections region. */
584 basic_block bb = single_succ (gimple_bb (ws_stmt));
ac6e3339 585 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
773c5ba7 586 t = tree_cons (NULL, t, NULL);
587 return t;
588 }
589
590 gcc_unreachable ();
591}
592
593
594/* Discover whether REGION is a combined parallel+workshare region. */
595
596static void
597determine_parallel_type (struct omp_region *region)
1e8e9920 598{
773c5ba7 599 basic_block par_entry_bb, par_exit_bb;
600 basic_block ws_entry_bb, ws_exit_bb;
601
03ed154b 602 if (region == NULL || region->inner == NULL
ac6e3339 603 || region->exit == NULL || region->inner->exit == NULL
604 || region->inner->cont == NULL)
773c5ba7 605 return;
606
607 /* We only support parallel+for and parallel+sections. */
75a70cf9 608 if (region->type != GIMPLE_OMP_PARALLEL
609 || (region->inner->type != GIMPLE_OMP_FOR
610 && region->inner->type != GIMPLE_OMP_SECTIONS))
773c5ba7 611 return;
612
613 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
614 WS_EXIT_BB -> PAR_EXIT_BB. */
61e47ac8 615 par_entry_bb = region->entry;
616 par_exit_bb = region->exit;
617 ws_entry_bb = region->inner->entry;
618 ws_exit_bb = region->inner->exit;
773c5ba7 619
620 if (single_succ (par_entry_bb) == ws_entry_bb
621 && single_succ (ws_exit_bb) == par_exit_bb
de7ef844 622 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
75a70cf9 623 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
de7ef844 624 || (last_and_only_stmt (ws_entry_bb)
625 && last_and_only_stmt (par_exit_bb))))
773c5ba7 626 {
75a70cf9 627 gimple ws_stmt = last_stmt (ws_entry_bb);
61e47ac8 628
75a70cf9 629 if (region->inner->type == GIMPLE_OMP_FOR)
773c5ba7 630 {
631 /* If this is a combined parallel loop, we need to determine
632 whether or not to use the combined library calls. There
633 are two cases where we do not apply the transformation:
634 static loops and any kind of ordered loop. In the first
635 case, we already open code the loop so there is no need
636 to do anything else. In the latter case, the combined
637 parallel loop call would still need extra synchronization
638 to implement ordered semantics, so there would not be any
639 gain in using the combined call. */
75a70cf9 640 tree clauses = gimple_omp_for_clauses (ws_stmt);
773c5ba7 641 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
642 if (c == NULL
643 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
644 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
645 {
646 region->is_combined_parallel = false;
647 region->inner->is_combined_parallel = false;
648 return;
649 }
650 }
651
652 region->is_combined_parallel = true;
653 region->inner->is_combined_parallel = true;
61e47ac8 654 region->ws_args = get_ws_args_for (ws_stmt);
773c5ba7 655 }
1e8e9920 656}
657
773c5ba7 658
1e8e9920 659/* Return true if EXPR is variable sized. */
660
661static inline bool
1f1872fd 662is_variable_sized (const_tree expr)
1e8e9920 663{
664 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
665}
666
667/* Return true if DECL is a reference type. */
668
669static inline bool
670is_reference (tree decl)
671{
672 return lang_hooks.decls.omp_privatize_by_reference (decl);
673}
674
675/* Lookup variables in the decl or field splay trees. The "maybe" form
676 allows for the variable form to not have been entered, otherwise we
677 assert that the variable must have been entered. */
678
679static inline tree
680lookup_decl (tree var, omp_context *ctx)
681{
e3022db7 682 tree *n;
683 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
684 return *n;
1e8e9920 685}
686
687static inline tree
e8a588af 688maybe_lookup_decl (const_tree var, omp_context *ctx)
1e8e9920 689{
e3022db7 690 tree *n;
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return n ? *n : NULL_TREE;
1e8e9920 693}
694
695static inline tree
696lookup_field (tree var, omp_context *ctx)
697{
698 splay_tree_node n;
699 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
700 return (tree) n->value;
701}
702
fd6481cf 703static inline tree
704lookup_sfield (tree var, omp_context *ctx)
705{
706 splay_tree_node n;
707 n = splay_tree_lookup (ctx->sfield_map
708 ? ctx->sfield_map : ctx->field_map,
709 (splay_tree_key) var);
710 return (tree) n->value;
711}
712
1e8e9920 713static inline tree
714maybe_lookup_field (tree var, omp_context *ctx)
715{
716 splay_tree_node n;
717 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
718 return n ? (tree) n->value : NULL_TREE;
719}
720
e8a588af 721/* Return true if DECL should be copied by pointer. SHARED_CTX is
722 the parallel context if DECL is to be shared. */
1e8e9920 723
724static bool
fd6481cf 725use_pointer_for_field (tree decl, omp_context *shared_ctx)
1e8e9920 726{
727 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
728 return true;
729
554f2707 730 /* We can only use copy-in/copy-out semantics for shared variables
1e8e9920 731 when we know the value is not accessible from an outer scope. */
e8a588af 732 if (shared_ctx)
1e8e9920 733 {
734 /* ??? Trivially accessible from anywhere. But why would we even
735 be passing an address in this case? Should we simply assert
736 this to be false, or should we have a cleanup pass that removes
737 these from the list of mappings? */
738 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
739 return true;
740
741 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
742 without analyzing the expression whether or not its location
743 is accessible to anyone else. In the case of nested parallel
744 regions it certainly may be. */
df2c34fc 745 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
1e8e9920 746 return true;
747
748 /* Do not use copy-in/copy-out for variables that have their
749 address taken. */
750 if (TREE_ADDRESSABLE (decl))
751 return true;
e8a588af 752
753 /* Disallow copy-in/out in nested parallel if
754 decl is shared in outer parallel, otherwise
755 each thread could store the shared variable
756 in its own copy-in location, making the
757 variable no longer really shared. */
758 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
759 {
760 omp_context *up;
761
762 for (up = shared_ctx->outer; up; up = up->outer)
0cb159ec 763 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
e8a588af 764 break;
765
0cb159ec 766 if (up)
e8a588af 767 {
768 tree c;
769
75a70cf9 770 for (c = gimple_omp_taskreg_clauses (up->stmt);
e8a588af 771 c; c = OMP_CLAUSE_CHAIN (c))
772 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
773 && OMP_CLAUSE_DECL (c) == decl)
774 break;
775
776 if (c)
777 return true;
778 }
779 }
fd6481cf 780
781 /* For tasks avoid using copy-in/out, unless they are readonly
782 (in which case just copy-in is used). As tasks can be
783 deferred or executed in different thread, when GOMP_task
784 returns, the task hasn't necessarily terminated. */
785 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
786 {
787 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
788 if (is_gimple_reg (outer))
789 {
790 /* Taking address of OUTER in lower_send_shared_vars
791 might need regimplification of everything that uses the
792 variable. */
793 if (!task_shared_vars)
794 task_shared_vars = BITMAP_ALLOC (NULL);
795 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
796 TREE_ADDRESSABLE (outer) = 1;
797 }
798 return true;
799 }
1e8e9920 800 }
801
802 return false;
803}
804
79acaae1 805/* Create a new VAR_DECL and copy information from VAR to it. */
1e8e9920 806
79acaae1 807tree
808copy_var_decl (tree var, tree name, tree type)
1e8e9920 809{
810 tree copy = build_decl (VAR_DECL, name, type);
811
812 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
79acaae1 813 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
8ea8de24 814 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
66723563 815 DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
1e8e9920 816 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
817 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
79acaae1 818 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
d7c85fac 819 DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
1e8e9920 820 TREE_USED (copy) = 1;
1e8e9920 821 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
822
79acaae1 823 return copy;
824}
825
826/* Construct a new automatic decl similar to VAR. */
827
828static tree
829omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
830{
831 tree copy = copy_var_decl (var, name, type);
832
833 DECL_CONTEXT (copy) = current_function_decl;
1e8e9920 834 TREE_CHAIN (copy) = ctx->block_vars;
835 ctx->block_vars = copy;
836
837 return copy;
838}
839
840static tree
841omp_copy_decl_1 (tree var, omp_context *ctx)
842{
843 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
844}
845
846/* Build tree nodes to access the field for VAR on the receiver side. */
847
848static tree
849build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
850{
851 tree x, field = lookup_field (var, ctx);
852
853 /* If the receiver record type was remapped in the child function,
854 remap the field into the new record type. */
855 x = maybe_lookup_field (field, ctx);
856 if (x != NULL)
857 field = x;
858
859 x = build_fold_indirect_ref (ctx->receiver_decl);
860 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
861 if (by_ref)
862 x = build_fold_indirect_ref (x);
863
864 return x;
865}
866
867/* Build tree nodes to access VAR in the scope outer to CTX. In the case
868 of a parallel, this is a component reference; for workshare constructs
869 this is some variable. */
870
871static tree
872build_outer_var_ref (tree var, omp_context *ctx)
873{
874 tree x;
875
f49d7bb5 876 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1e8e9920 877 x = var;
878 else if (is_variable_sized (var))
879 {
880 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
881 x = build_outer_var_ref (x, ctx);
882 x = build_fold_indirect_ref (x);
883 }
fd6481cf 884 else if (is_taskreg_ctx (ctx))
1e8e9920 885 {
e8a588af 886 bool by_ref = use_pointer_for_field (var, NULL);
1e8e9920 887 x = build_receiver_ref (var, by_ref, ctx);
888 }
889 else if (ctx->outer)
890 x = lookup_decl (var, ctx->outer);
9438af57 891 else if (is_reference (var))
892 /* This can happen with orphaned constructs. If var is reference, it is
893 possible it is shared and as such valid. */
894 x = var;
1e8e9920 895 else
896 gcc_unreachable ();
897
898 if (is_reference (var))
899 x = build_fold_indirect_ref (x);
900
901 return x;
902}
903
904/* Build tree nodes to access the field for VAR on the sender side. */
905
906static tree
907build_sender_ref (tree var, omp_context *ctx)
908{
fd6481cf 909 tree field = lookup_sfield (var, ctx);
1e8e9920 910 return build3 (COMPONENT_REF, TREE_TYPE (field),
911 ctx->sender_decl, field, NULL);
912}
913
914/* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
915
916static void
fd6481cf 917install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1e8e9920 918{
fd6481cf 919 tree field, type, sfield = NULL_TREE;
1e8e9920 920
fd6481cf 921 gcc_assert ((mask & 1) == 0
922 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
923 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
924 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1e8e9920 925
926 type = TREE_TYPE (var);
927 if (by_ref)
928 type = build_pointer_type (type);
fd6481cf 929 else if ((mask & 3) == 1 && is_reference (var))
930 type = TREE_TYPE (type);
1e8e9920 931
932 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
933
934 /* Remember what variable this field was created for. This does have a
935 side effect of making dwarf2out ignore this member, so for helpful
936 debugging we clear it later in delete_omp_context. */
937 DECL_ABSTRACT_ORIGIN (field) = var;
fd6481cf 938 if (type == TREE_TYPE (var))
939 {
940 DECL_ALIGN (field) = DECL_ALIGN (var);
941 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
942 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
943 }
944 else
945 DECL_ALIGN (field) = TYPE_ALIGN (type);
1e8e9920 946
fd6481cf 947 if ((mask & 3) == 3)
948 {
949 insert_field_into_struct (ctx->record_type, field);
950 if (ctx->srecord_type)
951 {
952 sfield = build_decl (FIELD_DECL, DECL_NAME (var), type);
953 DECL_ABSTRACT_ORIGIN (sfield) = var;
954 DECL_ALIGN (sfield) = DECL_ALIGN (field);
955 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
956 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
957 insert_field_into_struct (ctx->srecord_type, sfield);
958 }
959 }
960 else
961 {
962 if (ctx->srecord_type == NULL_TREE)
963 {
964 tree t;
965
966 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
967 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
968 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
969 {
970 sfield = build_decl (FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
971 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
972 insert_field_into_struct (ctx->srecord_type, sfield);
973 splay_tree_insert (ctx->sfield_map,
974 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
975 (splay_tree_value) sfield);
976 }
977 }
978 sfield = field;
979 insert_field_into_struct ((mask & 1) ? ctx->record_type
980 : ctx->srecord_type, field);
981 }
1e8e9920 982
fd6481cf 983 if (mask & 1)
984 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
985 (splay_tree_value) field);
986 if ((mask & 2) && ctx->sfield_map)
987 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
988 (splay_tree_value) sfield);
1e8e9920 989}
990
991static tree
992install_var_local (tree var, omp_context *ctx)
993{
994 tree new_var = omp_copy_decl_1 (var, ctx);
995 insert_decl_map (&ctx->cb, var, new_var);
996 return new_var;
997}
998
999/* Adjust the replacement for DECL in CTX for the new context. This means
1000 copying the DECL_VALUE_EXPR, and fixing up the type. */
1001
1002static void
1003fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1004{
1005 tree new_decl, size;
1006
1007 new_decl = lookup_decl (decl, ctx);
1008
1009 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1010
1011 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1012 && DECL_HAS_VALUE_EXPR_P (decl))
1013 {
1014 tree ve = DECL_VALUE_EXPR (decl);
75a70cf9 1015 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1e8e9920 1016 SET_DECL_VALUE_EXPR (new_decl, ve);
1017 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1018 }
1019
1020 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1021 {
1022 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1023 if (size == error_mark_node)
1024 size = TYPE_SIZE (TREE_TYPE (new_decl));
1025 DECL_SIZE (new_decl) = size;
1026
1027 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1028 if (size == error_mark_node)
1029 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1030 DECL_SIZE_UNIT (new_decl) = size;
1031 }
1032}
1033
1034/* The callback for remap_decl. Search all containing contexts for a
1035 mapping of the variable; this avoids having to duplicate the splay
1036 tree ahead of time. We know a mapping doesn't already exist in the
1037 given context. Create new mappings to implement default semantics. */
1038
1039static tree
1040omp_copy_decl (tree var, copy_body_data *cb)
1041{
1042 omp_context *ctx = (omp_context *) cb;
1043 tree new_var;
1044
1e8e9920 1045 if (TREE_CODE (var) == LABEL_DECL)
1046 {
1047 new_var = create_artificial_label ();
773c5ba7 1048 DECL_CONTEXT (new_var) = current_function_decl;
1e8e9920 1049 insert_decl_map (&ctx->cb, var, new_var);
1050 return new_var;
1051 }
1052
fd6481cf 1053 while (!is_taskreg_ctx (ctx))
1e8e9920 1054 {
1055 ctx = ctx->outer;
1056 if (ctx == NULL)
1057 return var;
1058 new_var = maybe_lookup_decl (var, ctx);
1059 if (new_var)
1060 return new_var;
1061 }
1062
f49d7bb5 1063 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1064 return var;
1065
1e8e9920 1066 return error_mark_node;
1067}
1068
773c5ba7 1069
1070/* Return the parallel region associated with STMT. */
1071
773c5ba7 1072/* Debugging dumps for parallel regions. */
1073void dump_omp_region (FILE *, struct omp_region *, int);
1074void debug_omp_region (struct omp_region *);
1075void debug_all_omp_regions (void);
1076
1077/* Dump the parallel region tree rooted at REGION. */
1078
1079void
1080dump_omp_region (FILE *file, struct omp_region *region, int indent)
1081{
61e47ac8 1082 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
75a70cf9 1083 gimple_code_name[region->type]);
773c5ba7 1084
1085 if (region->inner)
1086 dump_omp_region (file, region->inner, indent + 4);
1087
61e47ac8 1088 if (region->cont)
1089 {
75a70cf9 1090 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
61e47ac8 1091 region->cont->index);
1092 }
1093
773c5ba7 1094 if (region->exit)
75a70cf9 1095 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
61e47ac8 1096 region->exit->index);
773c5ba7 1097 else
61e47ac8 1098 fprintf (file, "%*s[no exit marker]\n", indent, "");
773c5ba7 1099
1100 if (region->next)
61e47ac8 1101 dump_omp_region (file, region->next, indent);
773c5ba7 1102}
1103
1104void
1105debug_omp_region (struct omp_region *region)
1106{
1107 dump_omp_region (stderr, region, 0);
1108}
1109
1110void
1111debug_all_omp_regions (void)
1112{
1113 dump_omp_region (stderr, root_omp_region, 0);
1114}
1115
1116
1117/* Create a new parallel region starting at STMT inside region PARENT. */
1118
61e47ac8 1119struct omp_region *
75a70cf9 1120new_omp_region (basic_block bb, enum gimple_code type,
1121 struct omp_region *parent)
773c5ba7 1122{
4077bf7a 1123 struct omp_region *region = XCNEW (struct omp_region);
773c5ba7 1124
1125 region->outer = parent;
61e47ac8 1126 region->entry = bb;
1127 region->type = type;
773c5ba7 1128
1129 if (parent)
1130 {
1131 /* This is a nested region. Add it to the list of inner
1132 regions in PARENT. */
1133 region->next = parent->inner;
1134 parent->inner = region;
1135 }
61e47ac8 1136 else
773c5ba7 1137 {
1138 /* This is a toplevel region. Add it to the list of toplevel
1139 regions in ROOT_OMP_REGION. */
1140 region->next = root_omp_region;
1141 root_omp_region = region;
1142 }
61e47ac8 1143
1144 return region;
1145}
1146
1147/* Release the memory associated with the region tree rooted at REGION. */
1148
1149static void
1150free_omp_region_1 (struct omp_region *region)
1151{
1152 struct omp_region *i, *n;
1153
1154 for (i = region->inner; i ; i = n)
773c5ba7 1155 {
61e47ac8 1156 n = i->next;
1157 free_omp_region_1 (i);
773c5ba7 1158 }
1159
61e47ac8 1160 free (region);
1161}
773c5ba7 1162
61e47ac8 1163/* Release the memory for the entire omp region tree. */
1164
1165void
1166free_omp_regions (void)
1167{
1168 struct omp_region *r, *n;
1169 for (r = root_omp_region; r ; r = n)
1170 {
1171 n = r->next;
1172 free_omp_region_1 (r);
1173 }
1174 root_omp_region = NULL;
773c5ba7 1175}
1176
1177
1e8e9920 1178/* Create a new context, with OUTER_CTX being the surrounding context. */
1179
1180static omp_context *
75a70cf9 1181new_omp_context (gimple stmt, omp_context *outer_ctx)
1e8e9920 1182{
1183 omp_context *ctx = XCNEW (omp_context);
1184
1185 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1186 (splay_tree_value) ctx);
1187 ctx->stmt = stmt;
1188
1189 if (outer_ctx)
1190 {
1191 ctx->outer = outer_ctx;
1192 ctx->cb = outer_ctx->cb;
1193 ctx->cb.block = NULL;
1194 ctx->depth = outer_ctx->depth + 1;
1195 }
1196 else
1197 {
1198 ctx->cb.src_fn = current_function_decl;
1199 ctx->cb.dst_fn = current_function_decl;
1200 ctx->cb.src_node = cgraph_node (current_function_decl);
1201 ctx->cb.dst_node = ctx->cb.src_node;
1202 ctx->cb.src_cfun = cfun;
1203 ctx->cb.copy_decl = omp_copy_decl;
1204 ctx->cb.eh_region = -1;
1205 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1206 ctx->depth = 1;
1207 }
1208
e3022db7 1209 ctx->cb.decl_map = pointer_map_create ();
1e8e9920 1210
1211 return ctx;
1212}
1213
75a70cf9 1214static gimple_seq maybe_catch_exception (gimple_seq);
f6430caa 1215
1216/* Finalize task copyfn. */
1217
1218static void
75a70cf9 1219finalize_task_copyfn (gimple task_stmt)
f6430caa 1220{
1221 struct function *child_cfun;
1222 tree child_fn, old_fn;
75a70cf9 1223 gimple_seq seq, new_seq;
1224 gimple bind;
f6430caa 1225
75a70cf9 1226 child_fn = gimple_omp_task_copy_fn (task_stmt);
f6430caa 1227 if (child_fn == NULL_TREE)
1228 return;
1229
1230 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1231
1232 /* Inform the callgraph about the new function. */
1233 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1234 = cfun->curr_properties;
1235
1236 old_fn = current_function_decl;
1237 push_cfun (child_cfun);
1238 current_function_decl = child_fn;
75a70cf9 1239 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1240 seq = gimple_seq_alloc ();
1241 gimple_seq_add_stmt (&seq, bind);
1242 new_seq = maybe_catch_exception (seq);
1243 if (new_seq != seq)
1244 {
1245 bind = gimple_build_bind (NULL, new_seq, NULL);
1246 seq = gimple_seq_alloc ();
1247 gimple_seq_add_stmt (&seq, bind);
1248 }
1249 gimple_set_body (child_fn, seq);
f6430caa 1250 pop_cfun ();
1251 current_function_decl = old_fn;
1252
1253 cgraph_add_new_function (child_fn, false);
1254}
1255
1e8e9920 1256/* Destroy a omp_context data structures. Called through the splay tree
1257 value delete callback. */
1258
1259static void
1260delete_omp_context (splay_tree_value value)
1261{
1262 omp_context *ctx = (omp_context *) value;
1263
e3022db7 1264 pointer_map_destroy (ctx->cb.decl_map);
1e8e9920 1265
1266 if (ctx->field_map)
1267 splay_tree_delete (ctx->field_map);
fd6481cf 1268 if (ctx->sfield_map)
1269 splay_tree_delete (ctx->sfield_map);
1e8e9920 1270
1271 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1272 it produces corrupt debug information. */
1273 if (ctx->record_type)
1274 {
1275 tree t;
1276 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1277 DECL_ABSTRACT_ORIGIN (t) = NULL;
1278 }
fd6481cf 1279 if (ctx->srecord_type)
1280 {
1281 tree t;
1282 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1283 DECL_ABSTRACT_ORIGIN (t) = NULL;
1284 }
1e8e9920 1285
f6430caa 1286 if (is_task_ctx (ctx))
1287 finalize_task_copyfn (ctx->stmt);
1288
1e8e9920 1289 XDELETE (ctx);
1290}
1291
1292/* Fix up RECEIVER_DECL with a type that has been remapped to the child
1293 context. */
1294
1295static void
1296fixup_child_record_type (omp_context *ctx)
1297{
1298 tree f, type = ctx->record_type;
1299
1300 /* ??? It isn't sufficient to just call remap_type here, because
1301 variably_modified_type_p doesn't work the way we expect for
1302 record types. Testing each field for whether it needs remapping
1303 and creating a new record by hand works, however. */
1304 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1305 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1306 break;
1307 if (f)
1308 {
1309 tree name, new_fields = NULL;
1310
1311 type = lang_hooks.types.make_type (RECORD_TYPE);
1312 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1313 name = build_decl (TYPE_DECL, name, type);
1314 TYPE_NAME (type) = name;
1315
1316 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1317 {
1318 tree new_f = copy_node (f);
1319 DECL_CONTEXT (new_f) = type;
1320 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1321 TREE_CHAIN (new_f) = new_fields;
75a70cf9 1322 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1323 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1324 &ctx->cb, NULL);
1325 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1326 &ctx->cb, NULL);
1e8e9920 1327 new_fields = new_f;
1328
1329 /* Arrange to be able to look up the receiver field
1330 given the sender field. */
1331 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1332 (splay_tree_value) new_f);
1333 }
1334 TYPE_FIELDS (type) = nreverse (new_fields);
1335 layout_type (type);
1336 }
1337
1338 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1339}
1340
1341/* Instantiate decls as necessary in CTX to satisfy the data sharing
1342 specified by CLAUSES. */
1343
1344static void
1345scan_sharing_clauses (tree clauses, omp_context *ctx)
1346{
1347 tree c, decl;
1348 bool scan_array_reductions = false;
1349
1350 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1351 {
1352 bool by_ref;
1353
55d6e7cd 1354 switch (OMP_CLAUSE_CODE (c))
1e8e9920 1355 {
1356 case OMP_CLAUSE_PRIVATE:
1357 decl = OMP_CLAUSE_DECL (c);
fd6481cf 1358 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1359 goto do_private;
1360 else if (!is_variable_sized (decl))
1e8e9920 1361 install_var_local (decl, ctx);
1362 break;
1363
1364 case OMP_CLAUSE_SHARED:
fd6481cf 1365 gcc_assert (is_taskreg_ctx (ctx));
1e8e9920 1366 decl = OMP_CLAUSE_DECL (c);
e7327393 1367 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1368 || !is_variable_sized (decl));
f49d7bb5 1369 /* Global variables don't need to be copied,
1370 the receiver side will use them directly. */
1371 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1372 break;
fd6481cf 1373 by_ref = use_pointer_for_field (decl, ctx);
1e8e9920 1374 if (! TREE_READONLY (decl)
1375 || TREE_ADDRESSABLE (decl)
1376 || by_ref
1377 || is_reference (decl))
1378 {
fd6481cf 1379 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1380 install_var_local (decl, ctx);
1381 break;
1382 }
1383 /* We don't need to copy const scalar vars back. */
55d6e7cd 1384 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1e8e9920 1385 goto do_private;
1386
1387 case OMP_CLAUSE_LASTPRIVATE:
1388 /* Let the corresponding firstprivate clause create
1389 the variable. */
1390 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1391 break;
1392 /* FALLTHRU */
1393
1394 case OMP_CLAUSE_FIRSTPRIVATE:
1395 case OMP_CLAUSE_REDUCTION:
1396 decl = OMP_CLAUSE_DECL (c);
1397 do_private:
1398 if (is_variable_sized (decl))
1e8e9920 1399 {
fd6481cf 1400 if (is_task_ctx (ctx))
1401 install_var_field (decl, false, 1, ctx);
1402 break;
1403 }
1404 else if (is_taskreg_ctx (ctx))
1405 {
1406 bool global
1407 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
e8a588af 1408 by_ref = use_pointer_for_field (decl, NULL);
fd6481cf 1409
1410 if (is_task_ctx (ctx)
1411 && (global || by_ref || is_reference (decl)))
1412 {
1413 install_var_field (decl, false, 1, ctx);
1414 if (!global)
1415 install_var_field (decl, by_ref, 2, ctx);
1416 }
1417 else if (!global)
1418 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1419 }
1420 install_var_local (decl, ctx);
1421 break;
1422
1423 case OMP_CLAUSE_COPYPRIVATE:
1424 if (ctx->outer)
75a70cf9 1425 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1e8e9920 1426 /* FALLTHRU */
1427
1428 case OMP_CLAUSE_COPYIN:
1429 decl = OMP_CLAUSE_DECL (c);
e8a588af 1430 by_ref = use_pointer_for_field (decl, NULL);
fd6481cf 1431 install_var_field (decl, by_ref, 3, ctx);
1e8e9920 1432 break;
1433
1434 case OMP_CLAUSE_DEFAULT:
1435 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1436 break;
1437
1438 case OMP_CLAUSE_IF:
1439 case OMP_CLAUSE_NUM_THREADS:
1440 case OMP_CLAUSE_SCHEDULE:
1441 if (ctx->outer)
75a70cf9 1442 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1e8e9920 1443 break;
1444
1445 case OMP_CLAUSE_NOWAIT:
1446 case OMP_CLAUSE_ORDERED:
fd6481cf 1447 case OMP_CLAUSE_COLLAPSE:
1448 case OMP_CLAUSE_UNTIED:
1e8e9920 1449 break;
1450
1451 default:
1452 gcc_unreachable ();
1453 }
1454 }
1455
1456 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1457 {
55d6e7cd 1458 switch (OMP_CLAUSE_CODE (c))
1e8e9920 1459 {
1460 case OMP_CLAUSE_LASTPRIVATE:
1461 /* Let the corresponding firstprivate clause create
1462 the variable. */
75a70cf9 1463 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
fd6481cf 1464 scan_array_reductions = true;
1e8e9920 1465 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1466 break;
1467 /* FALLTHRU */
1468
1469 case OMP_CLAUSE_PRIVATE:
1470 case OMP_CLAUSE_FIRSTPRIVATE:
1471 case OMP_CLAUSE_REDUCTION:
1472 decl = OMP_CLAUSE_DECL (c);
1473 if (is_variable_sized (decl))
1474 install_var_local (decl, ctx);
1475 fixup_remapped_decl (decl, ctx,
55d6e7cd 1476 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1e8e9920 1477 && OMP_CLAUSE_PRIVATE_DEBUG (c));
55d6e7cd 1478 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1e8e9920 1479 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1480 scan_array_reductions = true;
1481 break;
1482
1483 case OMP_CLAUSE_SHARED:
1484 decl = OMP_CLAUSE_DECL (c);
f49d7bb5 1485 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1486 fixup_remapped_decl (decl, ctx, false);
1e8e9920 1487 break;
1488
1489 case OMP_CLAUSE_COPYPRIVATE:
1490 case OMP_CLAUSE_COPYIN:
1491 case OMP_CLAUSE_DEFAULT:
1492 case OMP_CLAUSE_IF:
1493 case OMP_CLAUSE_NUM_THREADS:
1494 case OMP_CLAUSE_SCHEDULE:
1495 case OMP_CLAUSE_NOWAIT:
1496 case OMP_CLAUSE_ORDERED:
fd6481cf 1497 case OMP_CLAUSE_COLLAPSE:
1498 case OMP_CLAUSE_UNTIED:
1e8e9920 1499 break;
1500
1501 default:
1502 gcc_unreachable ();
1503 }
1504 }
1505
1506 if (scan_array_reductions)
1507 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
55d6e7cd 1508 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1e8e9920 1509 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1510 {
75a70cf9 1511 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1512 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1e8e9920 1513 }
fd6481cf 1514 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
75a70cf9 1515 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1516 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1e8e9920 1517}
1518
1519/* Create a new name for omp child function. Returns an identifier. */
1520
1521static GTY(()) unsigned int tmp_ompfn_id_num;
1522
1523static tree
fd6481cf 1524create_omp_child_function_name (bool task_copy)
1e8e9920 1525{
1526 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1527 size_t len = IDENTIFIER_LENGTH (name);
1528 char *tmp_name, *prefix;
fd6481cf 1529 const char *suffix;
1e8e9920 1530
fd6481cf 1531 suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
4077bf7a 1532 prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
1e8e9920 1533 memcpy (prefix, IDENTIFIER_POINTER (name), len);
fd6481cf 1534 strcpy (prefix + len, suffix);
1e8e9920 1535#ifndef NO_DOT_IN_LABEL
1536 prefix[len] = '.';
1537#elif !defined NO_DOLLAR_IN_LABEL
1538 prefix[len] = '$';
1539#endif
1540 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1541 return get_identifier (tmp_name);
1542}
1543
1544/* Build a decl for the omp child function. It'll not contain a body
1545 yet, just the bare decl. */
1546
1547static void
fd6481cf 1548create_omp_child_function (omp_context *ctx, bool task_copy)
1e8e9920 1549{
1550 tree decl, type, name, t;
1551
fd6481cf 1552 name = create_omp_child_function_name (task_copy);
1553 if (task_copy)
1554 type = build_function_type_list (void_type_node, ptr_type_node,
1555 ptr_type_node, NULL_TREE);
1556 else
1557 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1e8e9920 1558
1559 decl = build_decl (FUNCTION_DECL, name, type);
1560 decl = lang_hooks.decls.pushdecl (decl);
1561
fd6481cf 1562 if (!task_copy)
1563 ctx->cb.dst_fn = decl;
1564 else
75a70cf9 1565 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1e8e9920 1566
1567 TREE_STATIC (decl) = 1;
1568 TREE_USED (decl) = 1;
1569 DECL_ARTIFICIAL (decl) = 1;
1570 DECL_IGNORED_P (decl) = 0;
1571 TREE_PUBLIC (decl) = 0;
1572 DECL_UNINLINABLE (decl) = 1;
1573 DECL_EXTERNAL (decl) = 0;
1574 DECL_CONTEXT (decl) = NULL_TREE;
773c5ba7 1575 DECL_INITIAL (decl) = make_node (BLOCK);
1e8e9920 1576
1577 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1578 DECL_ARTIFICIAL (t) = 1;
1579 DECL_IGNORED_P (t) = 1;
1580 DECL_RESULT (decl) = t;
1581
1582 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1583 DECL_ARTIFICIAL (t) = 1;
1584 DECL_ARG_TYPE (t) = ptr_type_node;
773c5ba7 1585 DECL_CONTEXT (t) = current_function_decl;
1e8e9920 1586 TREE_USED (t) = 1;
1587 DECL_ARGUMENTS (decl) = t;
fd6481cf 1588 if (!task_copy)
1589 ctx->receiver_decl = t;
1590 else
1591 {
1592 t = build_decl (PARM_DECL, get_identifier (".omp_data_o"),
1593 ptr_type_node);
1594 DECL_ARTIFICIAL (t) = 1;
1595 DECL_ARG_TYPE (t) = ptr_type_node;
1596 DECL_CONTEXT (t) = current_function_decl;
1597 TREE_USED (t) = 1;
1598 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1599 DECL_ARGUMENTS (decl) = t;
1600 }
1e8e9920 1601
1602 /* Allocate memory for the function structure. The call to
773c5ba7 1603 allocate_struct_function clobbers CFUN, so we need to restore
1e8e9920 1604 it afterward. */
87d4aa85 1605 push_struct_function (decl);
75a70cf9 1606 DECL_SOURCE_LOCATION (decl) = gimple_location (ctx->stmt);
1607 cfun->function_end_locus = gimple_location (ctx->stmt);
87d4aa85 1608 pop_cfun ();
1e8e9920 1609}
1610
1e8e9920 1611
1612/* Scan an OpenMP parallel directive. */
1613
1614static void
75a70cf9 1615scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1e8e9920 1616{
1617 omp_context *ctx;
1618 tree name;
75a70cf9 1619 gimple stmt = gsi_stmt (*gsi);
1e8e9920 1620
1621 /* Ignore parallel directives with empty bodies, unless there
1622 are copyin clauses. */
1623 if (optimize > 0
75a70cf9 1624 && empty_body_p (gimple_omp_body (stmt))
1625 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1626 OMP_CLAUSE_COPYIN) == NULL)
1e8e9920 1627 {
75a70cf9 1628 gsi_replace (gsi, gimple_build_nop (), false);
1e8e9920 1629 return;
1630 }
1631
75a70cf9 1632 ctx = new_omp_context (stmt, outer_ctx);
fd6481cf 1633 if (taskreg_nesting_level > 1)
773c5ba7 1634 ctx->is_nested = true;
1e8e9920 1635 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1e8e9920 1636 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1637 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1e8e9920 1638 name = create_tmp_var_name (".omp_data_s");
1639 name = build_decl (TYPE_DECL, name, ctx->record_type);
1640 TYPE_NAME (ctx->record_type) = name;
fd6481cf 1641 create_omp_child_function (ctx, false);
75a70cf9 1642 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1e8e9920 1643
75a70cf9 1644 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1645 scan_omp (gimple_omp_body (stmt), ctx);
1e8e9920 1646
1647 if (TYPE_FIELDS (ctx->record_type) == NULL)
1648 ctx->record_type = ctx->receiver_decl = NULL;
1649 else
1650 {
1651 layout_type (ctx->record_type);
1652 fixup_child_record_type (ctx);
1653 }
1654}
1655
fd6481cf 1656/* Scan an OpenMP task directive. */
1657
1658static void
75a70cf9 1659scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
fd6481cf 1660{
1661 omp_context *ctx;
75a70cf9 1662 tree name, t;
1663 gimple stmt = gsi_stmt (*gsi);
fd6481cf 1664
1665 /* Ignore task directives with empty bodies. */
1666 if (optimize > 0
75a70cf9 1667 && empty_body_p (gimple_omp_body (stmt)))
fd6481cf 1668 {
75a70cf9 1669 gsi_replace (gsi, gimple_build_nop (), false);
fd6481cf 1670 return;
1671 }
1672
75a70cf9 1673 ctx = new_omp_context (stmt, outer_ctx);
fd6481cf 1674 if (taskreg_nesting_level > 1)
1675 ctx->is_nested = true;
1676 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1677 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1678 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1679 name = create_tmp_var_name (".omp_data_s");
1680 name = build_decl (TYPE_DECL, name, ctx->record_type);
1681 TYPE_NAME (ctx->record_type) = name;
1682 create_omp_child_function (ctx, false);
75a70cf9 1683 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
fd6481cf 1684
75a70cf9 1685 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
fd6481cf 1686
1687 if (ctx->srecord_type)
1688 {
1689 name = create_tmp_var_name (".omp_data_a");
1690 name = build_decl (TYPE_DECL, name, ctx->srecord_type);
1691 TYPE_NAME (ctx->srecord_type) = name;
1692 create_omp_child_function (ctx, true);
1693 }
1694
75a70cf9 1695 scan_omp (gimple_omp_body (stmt), ctx);
fd6481cf 1696
1697 if (TYPE_FIELDS (ctx->record_type) == NULL)
1698 {
1699 ctx->record_type = ctx->receiver_decl = NULL;
75a70cf9 1700 t = build_int_cst (long_integer_type_node, 0);
1701 gimple_omp_task_set_arg_size (stmt, t);
1702 t = build_int_cst (long_integer_type_node, 1);
1703 gimple_omp_task_set_arg_align (stmt, t);
fd6481cf 1704 }
1705 else
1706 {
1707 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1708 /* Move VLA fields to the end. */
1709 p = &TYPE_FIELDS (ctx->record_type);
1710 while (*p)
1711 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1712 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1713 {
1714 *q = *p;
1715 *p = TREE_CHAIN (*p);
1716 TREE_CHAIN (*q) = NULL_TREE;
1717 q = &TREE_CHAIN (*q);
1718 }
1719 else
1720 p = &TREE_CHAIN (*p);
1721 *p = vla_fields;
1722 layout_type (ctx->record_type);
1723 fixup_child_record_type (ctx);
1724 if (ctx->srecord_type)
1725 layout_type (ctx->srecord_type);
75a70cf9 1726 t = fold_convert (long_integer_type_node,
fd6481cf 1727 TYPE_SIZE_UNIT (ctx->record_type));
75a70cf9 1728 gimple_omp_task_set_arg_size (stmt, t);
1729 t = build_int_cst (long_integer_type_node,
fd6481cf 1730 TYPE_ALIGN_UNIT (ctx->record_type));
75a70cf9 1731 gimple_omp_task_set_arg_align (stmt, t);
fd6481cf 1732 }
1733}
1734
1e8e9920 1735
773c5ba7 1736/* Scan an OpenMP loop directive. */
1e8e9920 1737
1738static void
75a70cf9 1739scan_omp_for (gimple stmt, omp_context *outer_ctx)
1e8e9920 1740{
773c5ba7 1741 omp_context *ctx;
75a70cf9 1742 size_t i;
1e8e9920 1743
773c5ba7 1744 ctx = new_omp_context (stmt, outer_ctx);
1e8e9920 1745
75a70cf9 1746 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1e8e9920 1747
75a70cf9 1748 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1749 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
fd6481cf 1750 {
75a70cf9 1751 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1752 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1753 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1754 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
fd6481cf 1755 }
75a70cf9 1756 scan_omp (gimple_omp_body (stmt), ctx);
1e8e9920 1757}
1758
1759/* Scan an OpenMP sections directive. */
1760
1761static void
75a70cf9 1762scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1e8e9920 1763{
1e8e9920 1764 omp_context *ctx;
1765
1766 ctx = new_omp_context (stmt, outer_ctx);
75a70cf9 1767 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1768 scan_omp (gimple_omp_body (stmt), ctx);
1e8e9920 1769}
1770
1771/* Scan an OpenMP single directive. */
1772
1773static void
75a70cf9 1774scan_omp_single (gimple stmt, omp_context *outer_ctx)
1e8e9920 1775{
1e8e9920 1776 omp_context *ctx;
1777 tree name;
1778
1779 ctx = new_omp_context (stmt, outer_ctx);
1780 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1781 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1782 name = create_tmp_var_name (".omp_copy_s");
1783 name = build_decl (TYPE_DECL, name, ctx->record_type);
1784 TYPE_NAME (ctx->record_type) = name;
1785
75a70cf9 1786 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1787 scan_omp (gimple_omp_body (stmt), ctx);
1e8e9920 1788
1789 if (TYPE_FIELDS (ctx->record_type) == NULL)
1790 ctx->record_type = NULL;
1791 else
1792 layout_type (ctx->record_type);
1793}
1794
1e8e9920 1795
c1d127dd 1796/* Check OpenMP nesting restrictions. */
1797static void
75a70cf9 1798check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
c1d127dd 1799{
75a70cf9 1800 switch (gimple_code (stmt))
c1d127dd 1801 {
75a70cf9 1802 case GIMPLE_OMP_FOR:
1803 case GIMPLE_OMP_SECTIONS:
1804 case GIMPLE_OMP_SINGLE:
1805 case GIMPLE_CALL:
c1d127dd 1806 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1807 switch (gimple_code (ctx->stmt))
c1d127dd 1808 {
75a70cf9 1809 case GIMPLE_OMP_FOR:
1810 case GIMPLE_OMP_SECTIONS:
1811 case GIMPLE_OMP_SINGLE:
1812 case GIMPLE_OMP_ORDERED:
1813 case GIMPLE_OMP_MASTER:
1814 case GIMPLE_OMP_TASK:
1815 if (is_gimple_call (stmt))
fd6481cf 1816 {
1817 warning (0, "barrier region may not be closely nested inside "
1818 "of work-sharing, critical, ordered, master or "
1819 "explicit task region");
1820 return;
1821 }
c1d127dd 1822 warning (0, "work-sharing region may not be closely nested inside "
fd6481cf 1823 "of work-sharing, critical, ordered, master or explicit "
1824 "task region");
c1d127dd 1825 return;
75a70cf9 1826 case GIMPLE_OMP_PARALLEL:
c1d127dd 1827 return;
1828 default:
1829 break;
1830 }
1831 break;
75a70cf9 1832 case GIMPLE_OMP_MASTER:
c1d127dd 1833 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1834 switch (gimple_code (ctx->stmt))
c1d127dd 1835 {
75a70cf9 1836 case GIMPLE_OMP_FOR:
1837 case GIMPLE_OMP_SECTIONS:
1838 case GIMPLE_OMP_SINGLE:
1839 case GIMPLE_OMP_TASK:
c1d127dd 1840 warning (0, "master region may not be closely nested inside "
fd6481cf 1841 "of work-sharing or explicit task region");
c1d127dd 1842 return;
75a70cf9 1843 case GIMPLE_OMP_PARALLEL:
c1d127dd 1844 return;
1845 default:
1846 break;
1847 }
1848 break;
75a70cf9 1849 case GIMPLE_OMP_ORDERED:
c1d127dd 1850 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1851 switch (gimple_code (ctx->stmt))
c1d127dd 1852 {
75a70cf9 1853 case GIMPLE_OMP_CRITICAL:
1854 case GIMPLE_OMP_TASK:
c1d127dd 1855 warning (0, "ordered region may not be closely nested inside "
fd6481cf 1856 "of critical or explicit task region");
c1d127dd 1857 return;
75a70cf9 1858 case GIMPLE_OMP_FOR:
1859 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
c1d127dd 1860 OMP_CLAUSE_ORDERED) == NULL)
1861 warning (0, "ordered region must be closely nested inside "
1862 "a loop region with an ordered clause");
1863 return;
75a70cf9 1864 case GIMPLE_OMP_PARALLEL:
c1d127dd 1865 return;
1866 default:
1867 break;
1868 }
1869 break;
75a70cf9 1870 case GIMPLE_OMP_CRITICAL:
c1d127dd 1871 for (; ctx != NULL; ctx = ctx->outer)
75a70cf9 1872 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1873 && (gimple_omp_critical_name (stmt)
1874 == gimple_omp_critical_name (ctx->stmt)))
c1d127dd 1875 {
1876 warning (0, "critical region may not be nested inside a critical "
1877 "region with the same name");
1878 return;
1879 }
1880 break;
1881 default:
1882 break;
1883 }
1884}
1885
1886
75a70cf9 1887/* Helper function scan_omp.
1888
1889 Callback for walk_tree or operators in walk_gimple_stmt used to
1890 scan for OpenMP directives in TP. */
1e8e9920 1891
1892static tree
75a70cf9 1893scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1e8e9920 1894{
4077bf7a 1895 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1896 omp_context *ctx = (omp_context *) wi->info;
1e8e9920 1897 tree t = *tp;
1898
75a70cf9 1899 switch (TREE_CODE (t))
1900 {
1901 case VAR_DECL:
1902 case PARM_DECL:
1903 case LABEL_DECL:
1904 case RESULT_DECL:
1905 if (ctx)
1906 *tp = remap_decl (t, &ctx->cb);
1907 break;
1908
1909 default:
1910 if (ctx && TYPE_P (t))
1911 *tp = remap_type (t, &ctx->cb);
1912 else if (!DECL_P (t))
1913 *walk_subtrees = 1;
1914 break;
1915 }
1916
1917 return NULL_TREE;
1918}
1919
1920
1921/* Helper function for scan_omp.
1922
1923 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1924 the current statement in GSI. */
1925
1926static tree
1927scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1928 struct walk_stmt_info *wi)
1929{
1930 gimple stmt = gsi_stmt (*gsi);
1931 omp_context *ctx = (omp_context *) wi->info;
1932
1933 if (gimple_has_location (stmt))
1934 input_location = gimple_location (stmt);
1e8e9920 1935
c1d127dd 1936 /* Check the OpenMP nesting restrictions. */
fd6481cf 1937 if (ctx != NULL)
1938 {
75a70cf9 1939 if (is_gimple_omp (stmt))
1940 check_omp_nesting_restrictions (stmt, ctx);
1941 else if (is_gimple_call (stmt))
fd6481cf 1942 {
75a70cf9 1943 tree fndecl = gimple_call_fndecl (stmt);
fd6481cf 1944 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1945 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
75a70cf9 1946 check_omp_nesting_restrictions (stmt, ctx);
fd6481cf 1947 }
1948 }
c1d127dd 1949
75a70cf9 1950 *handled_ops_p = true;
1951
1952 switch (gimple_code (stmt))
1e8e9920 1953 {
75a70cf9 1954 case GIMPLE_OMP_PARALLEL:
fd6481cf 1955 taskreg_nesting_level++;
75a70cf9 1956 scan_omp_parallel (gsi, ctx);
fd6481cf 1957 taskreg_nesting_level--;
1958 break;
1959
75a70cf9 1960 case GIMPLE_OMP_TASK:
fd6481cf 1961 taskreg_nesting_level++;
75a70cf9 1962 scan_omp_task (gsi, ctx);
fd6481cf 1963 taskreg_nesting_level--;
1e8e9920 1964 break;
1965
75a70cf9 1966 case GIMPLE_OMP_FOR:
1967 scan_omp_for (stmt, ctx);
1e8e9920 1968 break;
1969
75a70cf9 1970 case GIMPLE_OMP_SECTIONS:
1971 scan_omp_sections (stmt, ctx);
1e8e9920 1972 break;
1973
75a70cf9 1974 case GIMPLE_OMP_SINGLE:
1975 scan_omp_single (stmt, ctx);
1e8e9920 1976 break;
1977
75a70cf9 1978 case GIMPLE_OMP_SECTION:
1979 case GIMPLE_OMP_MASTER:
1980 case GIMPLE_OMP_ORDERED:
1981 case GIMPLE_OMP_CRITICAL:
1982 ctx = new_omp_context (stmt, ctx);
1983 scan_omp (gimple_omp_body (stmt), ctx);
1e8e9920 1984 break;
1985
75a70cf9 1986 case GIMPLE_BIND:
1e8e9920 1987 {
1988 tree var;
1e8e9920 1989
75a70cf9 1990 *handled_ops_p = false;
1991 if (ctx)
1992 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
1993 insert_decl_map (&ctx->cb, var, var);
1e8e9920 1994 }
1995 break;
1e8e9920 1996 default:
75a70cf9 1997 *handled_ops_p = false;
1e8e9920 1998 break;
1999 }
2000
2001 return NULL_TREE;
2002}
2003
2004
75a70cf9 2005/* Scan all the statements starting at the current statement. CTX
2006 contains context information about the OpenMP directives and
2007 clauses found during the scan. */
1e8e9920 2008
2009static void
75a70cf9 2010scan_omp (gimple_seq body, omp_context *ctx)
1e8e9920 2011{
2012 location_t saved_location;
2013 struct walk_stmt_info wi;
2014
2015 memset (&wi, 0, sizeof (wi));
1e8e9920 2016 wi.info = ctx;
1e8e9920 2017 wi.want_locations = true;
2018
2019 saved_location = input_location;
75a70cf9 2020 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
1e8e9920 2021 input_location = saved_location;
2022}
2023\f
2024/* Re-gimplification and code generation routines. */
2025
2026/* Build a call to GOMP_barrier. */
2027
79acaae1 2028static tree
2029build_omp_barrier (void)
1e8e9920 2030{
79acaae1 2031 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
1e8e9920 2032}
2033
2034/* If a context was created for STMT when it was scanned, return it. */
2035
2036static omp_context *
75a70cf9 2037maybe_lookup_ctx (gimple stmt)
1e8e9920 2038{
2039 splay_tree_node n;
2040 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2041 return n ? (omp_context *) n->value : NULL;
2042}
2043
773c5ba7 2044
2045/* Find the mapping for DECL in CTX or the immediately enclosing
2046 context that has a mapping for DECL.
2047
2048 If CTX is a nested parallel directive, we may have to use the decl
2049 mappings created in CTX's parent context. Suppose that we have the
2050 following parallel nesting (variable UIDs showed for clarity):
2051
2052 iD.1562 = 0;
2053 #omp parallel shared(iD.1562) -> outer parallel
2054 iD.1562 = iD.1562 + 1;
2055
2056 #omp parallel shared (iD.1562) -> inner parallel
2057 iD.1562 = iD.1562 - 1;
2058
2059 Each parallel structure will create a distinct .omp_data_s structure
2060 for copying iD.1562 in/out of the directive:
2061
2062 outer parallel .omp_data_s.1.i -> iD.1562
2063 inner parallel .omp_data_s.2.i -> iD.1562
2064
2065 A shared variable mapping will produce a copy-out operation before
2066 the parallel directive and a copy-in operation after it. So, in
2067 this case we would have:
2068
2069 iD.1562 = 0;
2070 .omp_data_o.1.i = iD.1562;
2071 #omp parallel shared(iD.1562) -> outer parallel
2072 .omp_data_i.1 = &.omp_data_o.1
2073 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2074
2075 .omp_data_o.2.i = iD.1562; -> **
2076 #omp parallel shared(iD.1562) -> inner parallel
2077 .omp_data_i.2 = &.omp_data_o.2
2078 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2079
2080
2081 ** This is a problem. The symbol iD.1562 cannot be referenced
2082 inside the body of the outer parallel region. But since we are
2083 emitting this copy operation while expanding the inner parallel
2084 directive, we need to access the CTX structure of the outer
2085 parallel directive to get the correct mapping:
2086
2087 .omp_data_o.2.i = .omp_data_i.1->i
2088
2089 Since there may be other workshare or parallel directives enclosing
2090 the parallel directive, it may be necessary to walk up the context
2091 parent chain. This is not a problem in general because nested
2092 parallelism happens only rarely. */
2093
2094static tree
2095lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2096{
2097 tree t;
2098 omp_context *up;
2099
773c5ba7 2100 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2101 t = maybe_lookup_decl (decl, up);
2102
87b31375 2103 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
773c5ba7 2104
c37594c7 2105 return t ? t : decl;
773c5ba7 2106}
2107
2108
f49d7bb5 2109/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2110 in outer contexts. */
2111
2112static tree
2113maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2114{
2115 tree t = NULL;
2116 omp_context *up;
2117
87b31375 2118 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2119 t = maybe_lookup_decl (decl, up);
f49d7bb5 2120
2121 return t ? t : decl;
2122}
2123
2124
1e8e9920 2125/* Construct the initialization value for reduction CLAUSE. */
2126
2127tree
2128omp_reduction_init (tree clause, tree type)
2129{
2130 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2131 {
2132 case PLUS_EXPR:
2133 case MINUS_EXPR:
2134 case BIT_IOR_EXPR:
2135 case BIT_XOR_EXPR:
2136 case TRUTH_OR_EXPR:
2137 case TRUTH_ORIF_EXPR:
2138 case TRUTH_XOR_EXPR:
2139 case NE_EXPR:
2140 return fold_convert (type, integer_zero_node);
2141
2142 case MULT_EXPR:
2143 case TRUTH_AND_EXPR:
2144 case TRUTH_ANDIF_EXPR:
2145 case EQ_EXPR:
2146 return fold_convert (type, integer_one_node);
2147
2148 case BIT_AND_EXPR:
2149 return fold_convert (type, integer_minus_one_node);
2150
2151 case MAX_EXPR:
2152 if (SCALAR_FLOAT_TYPE_P (type))
2153 {
2154 REAL_VALUE_TYPE max, min;
2155 if (HONOR_INFINITIES (TYPE_MODE (type)))
2156 {
2157 real_inf (&max);
2158 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2159 }
2160 else
2161 real_maxval (&min, 1, TYPE_MODE (type));
2162 return build_real (type, min);
2163 }
2164 else
2165 {
2166 gcc_assert (INTEGRAL_TYPE_P (type));
2167 return TYPE_MIN_VALUE (type);
2168 }
2169
2170 case MIN_EXPR:
2171 if (SCALAR_FLOAT_TYPE_P (type))
2172 {
2173 REAL_VALUE_TYPE max;
2174 if (HONOR_INFINITIES (TYPE_MODE (type)))
2175 real_inf (&max);
2176 else
2177 real_maxval (&max, 0, TYPE_MODE (type));
2178 return build_real (type, max);
2179 }
2180 else
2181 {
2182 gcc_assert (INTEGRAL_TYPE_P (type));
2183 return TYPE_MAX_VALUE (type);
2184 }
2185
2186 default:
2187 gcc_unreachable ();
2188 }
2189}
2190
2191/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2192 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2193 private variables. Initialization statements go in ILIST, while calls
2194 to destructors go in DLIST. */
2195
2196static void
75a70cf9 2197lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
1e4afe3c 2198 omp_context *ctx)
1e8e9920 2199{
75a70cf9 2200 gimple_stmt_iterator diter;
c2f47e15 2201 tree c, dtor, copyin_seq, x, ptr;
1e8e9920 2202 bool copyin_by_ref = false;
f49d7bb5 2203 bool lastprivate_firstprivate = false;
1e8e9920 2204 int pass;
2205
75a70cf9 2206 *dlist = gimple_seq_alloc ();
2207 diter = gsi_start (*dlist);
1e8e9920 2208 copyin_seq = NULL;
2209
2210 /* Do all the fixed sized types in the first pass, and the variable sized
2211 types in the second pass. This makes sure that the scalar arguments to
2212 the variable sized types are processed before we use them in the
2213 variable sized operations. */
2214 for (pass = 0; pass < 2; ++pass)
2215 {
2216 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2217 {
55d6e7cd 2218 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
1e8e9920 2219 tree var, new_var;
2220 bool by_ref;
2221
2222 switch (c_kind)
2223 {
2224 case OMP_CLAUSE_PRIVATE:
2225 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2226 continue;
2227 break;
2228 case OMP_CLAUSE_SHARED:
f49d7bb5 2229 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2230 {
2231 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2232 continue;
2233 }
1e8e9920 2234 case OMP_CLAUSE_FIRSTPRIVATE:
1e8e9920 2235 case OMP_CLAUSE_COPYIN:
2236 case OMP_CLAUSE_REDUCTION:
2237 break;
df2c34fc 2238 case OMP_CLAUSE_LASTPRIVATE:
f49d7bb5 2239 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2240 {
2241 lastprivate_firstprivate = true;
2242 if (pass != 0)
2243 continue;
2244 }
df2c34fc 2245 break;
1e8e9920 2246 default:
2247 continue;
2248 }
2249
2250 new_var = var = OMP_CLAUSE_DECL (c);
2251 if (c_kind != OMP_CLAUSE_COPYIN)
2252 new_var = lookup_decl (var, ctx);
2253
2254 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2255 {
2256 if (pass != 0)
2257 continue;
2258 }
1e8e9920 2259 else if (is_variable_sized (var))
2260 {
773c5ba7 2261 /* For variable sized types, we need to allocate the
2262 actual storage here. Call alloca and store the
2263 result in the pointer decl that we created elsewhere. */
1e8e9920 2264 if (pass == 0)
2265 continue;
2266
fd6481cf 2267 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2268 {
75a70cf9 2269 gimple stmt;
2270 tree tmp;
2271
fd6481cf 2272 ptr = DECL_VALUE_EXPR (new_var);
2273 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2274 ptr = TREE_OPERAND (ptr, 0);
2275 gcc_assert (DECL_P (ptr));
2276 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
75a70cf9 2277
2278 /* void *tmp = __builtin_alloca */
2279 stmt
2280 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2281 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2282 gimple_add_tmp_var (tmp);
2283 gimple_call_set_lhs (stmt, tmp);
2284
2285 gimple_seq_add_stmt (ilist, stmt);
2286
2287 x = fold_convert (TREE_TYPE (ptr), tmp);
2288 gimplify_assign (ptr, x, ilist);
fd6481cf 2289 }
1e8e9920 2290 }
1e8e9920 2291 else if (is_reference (var))
2292 {
773c5ba7 2293 /* For references that are being privatized for Fortran,
2294 allocate new backing storage for the new pointer
2295 variable. This allows us to avoid changing all the
2296 code that expects a pointer to something that expects
2297 a direct variable. Note that this doesn't apply to
2298 C++, since reference types are disallowed in data
df2c34fc 2299 sharing clauses there, except for NRV optimized
2300 return values. */
1e8e9920 2301 if (pass == 0)
2302 continue;
2303
2304 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
fd6481cf 2305 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2306 {
2307 x = build_receiver_ref (var, false, ctx);
2308 x = build_fold_addr_expr (x);
2309 }
2310 else if (TREE_CONSTANT (x))
1e8e9920 2311 {
2312 const char *name = NULL;
2313 if (DECL_NAME (var))
2314 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2315
df2c34fc 2316 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2317 name);
2318 gimple_add_tmp_var (x);
1e8e9920 2319 x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
2320 }
2321 else
2322 {
c2f47e15 2323 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
1e8e9920 2324 x = fold_convert (TREE_TYPE (new_var), x);
2325 }
2326
75a70cf9 2327 gimplify_assign (new_var, x, ilist);
1e8e9920 2328
2329 new_var = build_fold_indirect_ref (new_var);
2330 }
2331 else if (c_kind == OMP_CLAUSE_REDUCTION
2332 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2333 {
2334 if (pass == 0)
2335 continue;
2336 }
2337 else if (pass != 0)
2338 continue;
2339
55d6e7cd 2340 switch (OMP_CLAUSE_CODE (c))
1e8e9920 2341 {
2342 case OMP_CLAUSE_SHARED:
f49d7bb5 2343 /* Shared global vars are just accessed directly. */
2344 if (is_global_var (new_var))
2345 break;
1e8e9920 2346 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2347 needs to be delayed until after fixup_child_record_type so
2348 that we get the correct type during the dereference. */
e8a588af 2349 by_ref = use_pointer_for_field (var, ctx);
1e8e9920 2350 x = build_receiver_ref (var, by_ref, ctx);
2351 SET_DECL_VALUE_EXPR (new_var, x);
2352 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2353
2354 /* ??? If VAR is not passed by reference, and the variable
2355 hasn't been initialized yet, then we'll get a warning for
2356 the store into the omp_data_s structure. Ideally, we'd be
2357 able to notice this and not store anything at all, but
2358 we're generating code too early. Suppress the warning. */
2359 if (!by_ref)
2360 TREE_NO_WARNING (var) = 1;
2361 break;
2362
2363 case OMP_CLAUSE_LASTPRIVATE:
2364 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2365 break;
2366 /* FALLTHRU */
2367
2368 case OMP_CLAUSE_PRIVATE:
fd6481cf 2369 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2370 x = build_outer_var_ref (var, ctx);
2371 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2372 {
2373 if (is_task_ctx (ctx))
2374 x = build_receiver_ref (var, false, ctx);
2375 else
2376 x = build_outer_var_ref (var, ctx);
2377 }
2378 else
2379 x = NULL;
2380 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
1e8e9920 2381 if (x)
2382 gimplify_and_add (x, ilist);
2383 /* FALLTHRU */
2384
2385 do_dtor:
2386 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2387 if (x)
2388 {
75a70cf9 2389 gimple_seq tseq = NULL;
2390
1e8e9920 2391 dtor = x;
75a70cf9 2392 gimplify_stmt (&dtor, &tseq);
2393 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
1e8e9920 2394 }
2395 break;
2396
2397 case OMP_CLAUSE_FIRSTPRIVATE:
fd6481cf 2398 if (is_task_ctx (ctx))
2399 {
2400 if (is_reference (var) || is_variable_sized (var))
2401 goto do_dtor;
2402 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2403 ctx))
2404 || use_pointer_for_field (var, NULL))
2405 {
2406 x = build_receiver_ref (var, false, ctx);
2407 SET_DECL_VALUE_EXPR (new_var, x);
2408 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2409 goto do_dtor;
2410 }
2411 }
1e8e9920 2412 x = build_outer_var_ref (var, ctx);
2413 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2414 gimplify_and_add (x, ilist);
2415 goto do_dtor;
2416 break;
2417
2418 case OMP_CLAUSE_COPYIN:
e8a588af 2419 by_ref = use_pointer_for_field (var, NULL);
1e8e9920 2420 x = build_receiver_ref (var, by_ref, ctx);
2421 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2422 append_to_statement_list (x, &copyin_seq);
2423 copyin_by_ref |= by_ref;
2424 break;
2425
2426 case OMP_CLAUSE_REDUCTION:
2427 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2428 {
fd6481cf 2429 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2430 x = build_outer_var_ref (var, ctx);
2431
2432 if (is_reference (var))
2433 x = build_fold_addr_expr (x);
2434 SET_DECL_VALUE_EXPR (placeholder, x);
2435 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
75a70cf9 2436 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2437 gimple_seq_add_seq (ilist,
2438 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2439 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
fd6481cf 2440 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
1e8e9920 2441 }
2442 else
2443 {
2444 x = omp_reduction_init (c, TREE_TYPE (new_var));
2445 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
75a70cf9 2446 gimplify_assign (new_var, x, ilist);
1e8e9920 2447 }
2448 break;
2449
2450 default:
2451 gcc_unreachable ();
2452 }
2453 }
2454 }
2455
2456 /* The copyin sequence is not to be executed by the main thread, since
2457 that would result in self-copies. Perhaps not visible to scalars,
2458 but it certainly is to C++ operator=. */
2459 if (copyin_seq)
2460 {
c2f47e15 2461 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
1e8e9920 2462 x = build2 (NE_EXPR, boolean_type_node, x,
2463 build_int_cst (TREE_TYPE (x), 0));
2464 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2465 gimplify_and_add (x, ilist);
2466 }
2467
2468 /* If any copyin variable is passed by reference, we must ensure the
2469 master thread doesn't modify it before it is copied over in all
f49d7bb5 2470 threads. Similarly for variables in both firstprivate and
2471 lastprivate clauses we need to ensure the lastprivate copying
2472 happens after firstprivate copying in all threads. */
2473 if (copyin_by_ref || lastprivate_firstprivate)
79acaae1 2474 gimplify_and_add (build_omp_barrier (), ilist);
1e8e9920 2475}
2476
773c5ba7 2477
1e8e9920 2478/* Generate code to implement the LASTPRIVATE clauses. This is used for
2479 both parallel and workshare constructs. PREDICATE may be NULL if it's
2480 always true. */
2481
2482static void
75a70cf9 2483lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2484 omp_context *ctx)
1e8e9920 2485{
75a70cf9 2486 tree x, c, label = NULL;
fd6481cf 2487 bool par_clauses = false;
1e8e9920 2488
2489 /* Early exit if there are no lastprivate clauses. */
2490 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2491 if (clauses == NULL)
2492 {
2493 /* If this was a workshare clause, see if it had been combined
2494 with its parallel. In that case, look for the clauses on the
2495 parallel statement itself. */
2496 if (is_parallel_ctx (ctx))
2497 return;
2498
2499 ctx = ctx->outer;
2500 if (ctx == NULL || !is_parallel_ctx (ctx))
2501 return;
2502
75a70cf9 2503 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
1e8e9920 2504 OMP_CLAUSE_LASTPRIVATE);
2505 if (clauses == NULL)
2506 return;
fd6481cf 2507 par_clauses = true;
1e8e9920 2508 }
2509
75a70cf9 2510 if (predicate)
2511 {
2512 gimple stmt;
2513 tree label_true, arm1, arm2;
2514
2515 label = create_artificial_label ();
2516 label_true = create_artificial_label ();
2517 arm1 = TREE_OPERAND (predicate, 0);
2518 arm2 = TREE_OPERAND (predicate, 1);
2519 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2520 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2521 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2522 label_true, label);
2523 gimple_seq_add_stmt (stmt_list, stmt);
2524 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2525 }
1e8e9920 2526
fd6481cf 2527 for (c = clauses; c ;)
1e8e9920 2528 {
2529 tree var, new_var;
2530
fd6481cf 2531 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2532 {
2533 var = OMP_CLAUSE_DECL (c);
2534 new_var = lookup_decl (var, ctx);
1e8e9920 2535
75a70cf9 2536 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2537 {
2538 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2539 gimple_seq_add_seq (stmt_list,
2540 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2541 }
2542 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
1e8e9920 2543
fd6481cf 2544 x = build_outer_var_ref (var, ctx);
2545 if (is_reference (var))
2546 new_var = build_fold_indirect_ref (new_var);
2547 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
75a70cf9 2548 gimplify_and_add (x, stmt_list);
fd6481cf 2549 }
2550 c = OMP_CLAUSE_CHAIN (c);
2551 if (c == NULL && !par_clauses)
2552 {
2553 /* If this was a workshare clause, see if it had been combined
2554 with its parallel. In that case, continue looking for the
2555 clauses also on the parallel statement itself. */
2556 if (is_parallel_ctx (ctx))
2557 break;
2558
2559 ctx = ctx->outer;
2560 if (ctx == NULL || !is_parallel_ctx (ctx))
2561 break;
2562
75a70cf9 2563 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
fd6481cf 2564 OMP_CLAUSE_LASTPRIVATE);
2565 par_clauses = true;
2566 }
1e8e9920 2567 }
2568
75a70cf9 2569 if (label)
2570 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
1e8e9920 2571}
2572
773c5ba7 2573
1e8e9920 2574/* Generate code to implement the REDUCTION clauses. */
2575
2576static void
75a70cf9 2577lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
1e8e9920 2578{
75a70cf9 2579 gimple_seq sub_seq = NULL;
2580 gimple stmt;
2581 tree x, c;
1e8e9920 2582 int count = 0;
2583
2584 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2585 update in that case, otherwise use a lock. */
2586 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
55d6e7cd 2587 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
1e8e9920 2588 {
2589 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2590 {
2591 /* Never use OMP_ATOMIC for array reductions. */
2592 count = -1;
2593 break;
2594 }
2595 count++;
2596 }
2597
2598 if (count == 0)
2599 return;
2600
2601 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2602 {
2603 tree var, ref, new_var;
2604 enum tree_code code;
2605
55d6e7cd 2606 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
1e8e9920 2607 continue;
2608
2609 var = OMP_CLAUSE_DECL (c);
2610 new_var = lookup_decl (var, ctx);
2611 if (is_reference (var))
2612 new_var = build_fold_indirect_ref (new_var);
2613 ref = build_outer_var_ref (var, ctx);
2614 code = OMP_CLAUSE_REDUCTION_CODE (c);
773c5ba7 2615
2616 /* reduction(-:var) sums up the partial results, so it acts
2617 identically to reduction(+:var). */
1e8e9920 2618 if (code == MINUS_EXPR)
2619 code = PLUS_EXPR;
2620
2621 if (count == 1)
2622 {
2623 tree addr = build_fold_addr_expr (ref);
2624
2625 addr = save_expr (addr);
2626 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2627 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
2628 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
75a70cf9 2629 gimplify_and_add (x, stmt_seqp);
1e8e9920 2630 return;
2631 }
2632
2633 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2634 {
2635 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2636
2637 if (is_reference (var))
2638 ref = build_fold_addr_expr (ref);
2639 SET_DECL_VALUE_EXPR (placeholder, ref);
2640 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
75a70cf9 2641 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2642 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2643 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
1e8e9920 2644 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2645 }
2646 else
2647 {
2648 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2649 ref = build_outer_var_ref (var, ctx);
75a70cf9 2650 gimplify_assign (ref, x, &sub_seq);
1e8e9920 2651 }
2652 }
2653
75a70cf9 2654 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2655 gimple_seq_add_stmt (stmt_seqp, stmt);
1e8e9920 2656
75a70cf9 2657 gimple_seq_add_seq (stmt_seqp, sub_seq);
1e8e9920 2658
75a70cf9 2659 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2660 gimple_seq_add_stmt (stmt_seqp, stmt);
1e8e9920 2661}
2662
773c5ba7 2663
1e8e9920 2664/* Generate code to implement the COPYPRIVATE clauses. */
2665
2666static void
75a70cf9 2667lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
1e8e9920 2668 omp_context *ctx)
2669{
2670 tree c;
2671
2672 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2673 {
2674 tree var, ref, x;
2675 bool by_ref;
2676
55d6e7cd 2677 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
1e8e9920 2678 continue;
2679
2680 var = OMP_CLAUSE_DECL (c);
e8a588af 2681 by_ref = use_pointer_for_field (var, NULL);
1e8e9920 2682
2683 ref = build_sender_ref (var, ctx);
87b31375 2684 x = lookup_decl_in_outer_ctx (var, ctx);
773c5ba7 2685 x = by_ref ? build_fold_addr_expr (x) : x;
75a70cf9 2686 gimplify_assign (ref, x, slist);
1e8e9920 2687
2688 ref = build_receiver_ref (var, by_ref, ctx);
2689 if (is_reference (var))
2690 {
2691 ref = build_fold_indirect_ref (ref);
2692 var = build_fold_indirect_ref (var);
2693 }
2694 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2695 gimplify_and_add (x, rlist);
2696 }
2697}
2698
773c5ba7 2699
1e8e9920 2700/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2701 and REDUCTION from the sender (aka parent) side. */
2702
2703static void
75a70cf9 2704lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2705 omp_context *ctx)
1e8e9920 2706{
2707 tree c;
2708
2709 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2710 {
773c5ba7 2711 tree val, ref, x, var;
1e8e9920 2712 bool by_ref, do_in = false, do_out = false;
2713
55d6e7cd 2714 switch (OMP_CLAUSE_CODE (c))
1e8e9920 2715 {
fd6481cf 2716 case OMP_CLAUSE_PRIVATE:
2717 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2718 break;
2719 continue;
1e8e9920 2720 case OMP_CLAUSE_FIRSTPRIVATE:
2721 case OMP_CLAUSE_COPYIN:
2722 case OMP_CLAUSE_LASTPRIVATE:
2723 case OMP_CLAUSE_REDUCTION:
2724 break;
2725 default:
2726 continue;
2727 }
2728
87b31375 2729 val = OMP_CLAUSE_DECL (c);
2730 var = lookup_decl_in_outer_ctx (val, ctx);
773c5ba7 2731
f49d7bb5 2732 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2733 && is_global_var (var))
2734 continue;
1e8e9920 2735 if (is_variable_sized (val))
2736 continue;
e8a588af 2737 by_ref = use_pointer_for_field (val, NULL);
1e8e9920 2738
55d6e7cd 2739 switch (OMP_CLAUSE_CODE (c))
1e8e9920 2740 {
fd6481cf 2741 case OMP_CLAUSE_PRIVATE:
1e8e9920 2742 case OMP_CLAUSE_FIRSTPRIVATE:
2743 case OMP_CLAUSE_COPYIN:
2744 do_in = true;
2745 break;
2746
2747 case OMP_CLAUSE_LASTPRIVATE:
2748 if (by_ref || is_reference (val))
2749 {
2750 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2751 continue;
2752 do_in = true;
2753 }
2754 else
fd6481cf 2755 {
2756 do_out = true;
2757 if (lang_hooks.decls.omp_private_outer_ref (val))
2758 do_in = true;
2759 }
1e8e9920 2760 break;
2761
2762 case OMP_CLAUSE_REDUCTION:
2763 do_in = true;
2764 do_out = !(by_ref || is_reference (val));
2765 break;
2766
2767 default:
2768 gcc_unreachable ();
2769 }
2770
2771 if (do_in)
2772 {
2773 ref = build_sender_ref (val, ctx);
773c5ba7 2774 x = by_ref ? build_fold_addr_expr (var) : var;
75a70cf9 2775 gimplify_assign (ref, x, ilist);
fd6481cf 2776 if (is_task_ctx (ctx))
2777 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
1e8e9920 2778 }
773c5ba7 2779
1e8e9920 2780 if (do_out)
2781 {
2782 ref = build_sender_ref (val, ctx);
75a70cf9 2783 gimplify_assign (var, ref, olist);
1e8e9920 2784 }
2785 }
2786}
2787
75a70cf9 2788/* Generate code to implement SHARED from the sender (aka parent)
2789 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2790 list things that got automatically shared. */
1e8e9920 2791
2792static void
75a70cf9 2793lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
1e8e9920 2794{
fd6481cf 2795 tree var, ovar, nvar, f, x, record_type;
1e8e9920 2796
2797 if (ctx->record_type == NULL)
2798 return;
773c5ba7 2799
fd6481cf 2800 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2801 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
1e8e9920 2802 {
2803 ovar = DECL_ABSTRACT_ORIGIN (f);
2804 nvar = maybe_lookup_decl (ovar, ctx);
2805 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2806 continue;
2807
773c5ba7 2808 /* If CTX is a nested parallel directive. Find the immediately
2809 enclosing parallel or workshare construct that contains a
2810 mapping for OVAR. */
87b31375 2811 var = lookup_decl_in_outer_ctx (ovar, ctx);
773c5ba7 2812
e8a588af 2813 if (use_pointer_for_field (ovar, ctx))
1e8e9920 2814 {
2815 x = build_sender_ref (ovar, ctx);
773c5ba7 2816 var = build_fold_addr_expr (var);
75a70cf9 2817 gimplify_assign (x, var, ilist);
1e8e9920 2818 }
2819 else
2820 {
2821 x = build_sender_ref (ovar, ctx);
75a70cf9 2822 gimplify_assign (x, var, ilist);
1e8e9920 2823
fd6481cf 2824 if (!TREE_READONLY (var))
2825 {
2826 x = build_sender_ref (ovar, ctx);
75a70cf9 2827 gimplify_assign (var, x, olist);
fd6481cf 2828 }
1e8e9920 2829 }
2830 }
2831}
2832
75a70cf9 2833
2834/* A convenience function to build an empty GIMPLE_COND with just the
2835 condition. */
2836
2837static gimple
2838gimple_build_cond_empty (tree cond)
2839{
2840 enum tree_code pred_code;
2841 tree lhs, rhs;
2842
2843 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2844 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2845}
2846
2847
1e8e9920 2848/* Build the function calls to GOMP_parallel_start etc to actually
773c5ba7 2849 generate the parallel operation. REGION is the parallel region
2850 being expanded. BB is the block where to insert the code. WS_ARGS
2851 will be set if this is a call to a combined parallel+workshare
2852 construct, it contains the list of additional arguments needed by
2853 the workshare construct. */
1e8e9920 2854
2855static void
61e47ac8 2856expand_parallel_call (struct omp_region *region, basic_block bb,
75a70cf9 2857 gimple entry_stmt, tree ws_args)
1e8e9920 2858{
79acaae1 2859 tree t, t1, t2, val, cond, c, clauses;
75a70cf9 2860 gimple_stmt_iterator gsi;
2861 gimple stmt;
773c5ba7 2862 int start_ix;
2863
75a70cf9 2864 clauses = gimple_omp_parallel_clauses (entry_stmt);
773c5ba7 2865
334ec2d8 2866 /* Determine what flavor of GOMP_parallel_start we will be
773c5ba7 2867 emitting. */
2868 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2869 if (is_combined_parallel (region))
2870 {
61e47ac8 2871 switch (region->inner->type)
773c5ba7 2872 {
75a70cf9 2873 case GIMPLE_OMP_FOR:
fd6481cf 2874 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
f77459c5 2875 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
fd6481cf 2876 + (region->inner->sched_kind
2877 == OMP_CLAUSE_SCHEDULE_RUNTIME
2878 ? 3 : region->inner->sched_kind);
61e47ac8 2879 break;
75a70cf9 2880 case GIMPLE_OMP_SECTIONS:
61e47ac8 2881 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2882 break;
2883 default:
2884 gcc_unreachable ();
773c5ba7 2885 }
773c5ba7 2886 }
1e8e9920 2887
2888 /* By default, the value of NUM_THREADS is zero (selected at run time)
2889 and there is no conditional. */
2890 cond = NULL_TREE;
2891 val = build_int_cst (unsigned_type_node, 0);
2892
2893 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2894 if (c)
2895 cond = OMP_CLAUSE_IF_EXPR (c);
2896
2897 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2898 if (c)
2899 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2900
2901 /* Ensure 'val' is of the correct type. */
2902 val = fold_convert (unsigned_type_node, val);
2903
2904 /* If we found the clause 'if (cond)', build either
2905 (cond != 0) or (cond ? val : 1u). */
2906 if (cond)
2907 {
75a70cf9 2908 gimple_stmt_iterator gsi;
773c5ba7 2909
2910 cond = gimple_boolify (cond);
2911
1e8e9920 2912 if (integer_zerop (val))
79acaae1 2913 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2914 build_int_cst (TREE_TYPE (cond), 0));
1e8e9920 2915 else
773c5ba7 2916 {
2917 basic_block cond_bb, then_bb, else_bb;
79acaae1 2918 edge e, e_then, e_else;
75a70cf9 2919 tree tmp_then, tmp_else, tmp_join, tmp_var;
79acaae1 2920
2921 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2922 if (gimple_in_ssa_p (cfun))
2923 {
75a70cf9 2924 tmp_then = make_ssa_name (tmp_var, NULL);
2925 tmp_else = make_ssa_name (tmp_var, NULL);
2926 tmp_join = make_ssa_name (tmp_var, NULL);
79acaae1 2927 }
2928 else
2929 {
2930 tmp_then = tmp_var;
2931 tmp_else = tmp_var;
2932 tmp_join = tmp_var;
2933 }
773c5ba7 2934
773c5ba7 2935 e = split_block (bb, NULL);
2936 cond_bb = e->src;
2937 bb = e->dest;
2938 remove_edge (e);
2939
2940 then_bb = create_empty_bb (cond_bb);
2941 else_bb = create_empty_bb (then_bb);
79acaae1 2942 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2943 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
773c5ba7 2944
75a70cf9 2945 stmt = gimple_build_cond_empty (cond);
2946 gsi = gsi_start_bb (cond_bb);
2947 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 2948
75a70cf9 2949 gsi = gsi_start_bb (then_bb);
2950 stmt = gimple_build_assign (tmp_then, val);
2951 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 2952
75a70cf9 2953 gsi = gsi_start_bb (else_bb);
2954 stmt = gimple_build_assign
2955 (tmp_else, build_int_cst (unsigned_type_node, 1));
2956 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
773c5ba7 2957
2958 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2959 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
79acaae1 2960 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2961 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
773c5ba7 2962
79acaae1 2963 if (gimple_in_ssa_p (cfun))
2964 {
75a70cf9 2965 gimple phi = create_phi_node (tmp_join, bb);
79acaae1 2966 SSA_NAME_DEF_STMT (tmp_join) = phi;
2967 add_phi_arg (phi, tmp_then, e_then);
2968 add_phi_arg (phi, tmp_else, e_else);
2969 }
2970
2971 val = tmp_join;
773c5ba7 2972 }
2973
75a70cf9 2974 gsi = gsi_start_bb (bb);
2975 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
2976 false, GSI_CONTINUE_LINKING);
1e8e9920 2977 }
2978
75a70cf9 2979 gsi = gsi_last_bb (bb);
2980 t = gimple_omp_parallel_data_arg (entry_stmt);
1e8e9920 2981 if (t == NULL)
c2f47e15 2982 t1 = null_pointer_node;
1e8e9920 2983 else
c2f47e15 2984 t1 = build_fold_addr_expr (t);
75a70cf9 2985 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
773c5ba7 2986
2987 if (ws_args)
c2f47e15 2988 {
2989 tree args = tree_cons (NULL, t2,
2990 tree_cons (NULL, t1,
2991 tree_cons (NULL, val, ws_args)));
2992 t = build_function_call_expr (built_in_decls[start_ix], args);
2993 }
2994 else
2995 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
773c5ba7 2996
75a70cf9 2997 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
2998 false, GSI_CONTINUE_LINKING);
1e8e9920 2999
75a70cf9 3000 t = gimple_omp_parallel_data_arg (entry_stmt);
1e8e9920 3001 if (t == NULL)
3002 t = null_pointer_node;
3003 else
3004 t = build_fold_addr_expr (t);
75a70cf9 3005 t = build_call_expr (gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3006 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3007 false, GSI_CONTINUE_LINKING);
1e8e9920 3008
c2f47e15 3009 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
75a70cf9 3010 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3011 false, GSI_CONTINUE_LINKING);
1e8e9920 3012}
3013
773c5ba7 3014
fd6481cf 3015/* Build the function call to GOMP_task to actually
3016 generate the task operation. BB is the block where to insert the code. */
3017
3018static void
75a70cf9 3019expand_task_call (basic_block bb, gimple entry_stmt)
fd6481cf 3020{
3021 tree t, t1, t2, t3, flags, cond, c, clauses;
75a70cf9 3022 gimple_stmt_iterator gsi;
fd6481cf 3023
75a70cf9 3024 clauses = gimple_omp_task_clauses (entry_stmt);
fd6481cf 3025
fd6481cf 3026 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3027 if (c)
3028 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3029 else
3030 cond = boolean_true_node;
3031
3032 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3033 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3034
75a70cf9 3035 gsi = gsi_last_bb (bb);
3036 t = gimple_omp_task_data_arg (entry_stmt);
fd6481cf 3037 if (t == NULL)
3038 t2 = null_pointer_node;
3039 else
3040 t2 = build_fold_addr_expr (t);
75a70cf9 3041 t1 = build_fold_addr_expr (gimple_omp_task_child_fn (entry_stmt));
3042 t = gimple_omp_task_copy_fn (entry_stmt);
fd6481cf 3043 if (t == NULL)
3044 t3 = null_pointer_node;
3045 else
3046 t3 = build_fold_addr_expr (t);
3047
3048 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
75a70cf9 3049 gimple_omp_task_arg_size (entry_stmt),
3050 gimple_omp_task_arg_align (entry_stmt), cond, flags);
fd6481cf 3051
75a70cf9 3052 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3053 false, GSI_CONTINUE_LINKING);
fd6481cf 3054}
3055
3056
75a70cf9 3057/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3058 catch handler and return it. This prevents programs from violating the
3059 structured block semantics with throws. */
1e8e9920 3060
75a70cf9 3061static gimple_seq
3062maybe_catch_exception (gimple_seq body)
1e8e9920 3063{
75a70cf9 3064 gimple f, t;
1e8e9920 3065
3066 if (!flag_exceptions)
75a70cf9 3067 return body;
1e8e9920 3068
3069 if (lang_protect_cleanup_actions)
3070 t = lang_protect_cleanup_actions ();
3071 else
75a70cf9 3072 t = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
3073
3074 f = gimple_build_eh_filter (NULL, gimple_seq_alloc_with_stmt (t));
3075 gimple_eh_filter_set_must_not_throw (f, true);
3076
3077 t = gimple_build_try (body, gimple_seq_alloc_with_stmt (f),
3078 GIMPLE_TRY_CATCH);
1e8e9920 3079
75a70cf9 3080 return gimple_seq_alloc_with_stmt (t);
1e8e9920 3081}
3082
773c5ba7 3083/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
1e8e9920 3084
773c5ba7 3085static tree
3086list2chain (tree list)
1e8e9920 3087{
773c5ba7 3088 tree t;
1e8e9920 3089
773c5ba7 3090 for (t = list; t; t = TREE_CHAIN (t))
3091 {
3092 tree var = TREE_VALUE (t);
3093 if (TREE_CHAIN (t))
3094 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3095 else
3096 TREE_CHAIN (var) = NULL_TREE;
3097 }
1e8e9920 3098
773c5ba7 3099 return list ? TREE_VALUE (list) : NULL_TREE;
3100}
1e8e9920 3101
1e8e9920 3102
773c5ba7 3103/* Remove barriers in REGION->EXIT's block. Note that this is only
75a70cf9 3104 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3105 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3106 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
773c5ba7 3107 removed. */
1e8e9920 3108
773c5ba7 3109static void
3110remove_exit_barrier (struct omp_region *region)
3111{
75a70cf9 3112 gimple_stmt_iterator gsi;
773c5ba7 3113 basic_block exit_bb;
61e47ac8 3114 edge_iterator ei;
3115 edge e;
75a70cf9 3116 gimple stmt;
1e8e9920 3117
61e47ac8 3118 exit_bb = region->exit;
1e8e9920 3119
5056ba1a 3120 /* If the parallel region doesn't return, we don't have REGION->EXIT
3121 block at all. */
3122 if (! exit_bb)
3123 return;
3124
75a70cf9 3125 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3126 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
61e47ac8 3127 statements that can appear in between are extremely limited -- no
3128 memory operations at all. Here, we allow nothing at all, so the
75a70cf9 3129 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3130 gsi = gsi_last_bb (exit_bb);
3131 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3132 gsi_prev (&gsi);
3133 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
773c5ba7 3134 return;
1e8e9920 3135
61e47ac8 3136 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3137 {
75a70cf9 3138 gsi = gsi_last_bb (e->src);
3139 if (gsi_end_p (gsi))
61e47ac8 3140 continue;
75a70cf9 3141 stmt = gsi_stmt (gsi);
3142 if (gimple_code (stmt) == GIMPLE_OMP_RETURN)
3143 gimple_omp_return_set_nowait (stmt);
61e47ac8 3144 }
1e8e9920 3145}
3146
61e47ac8 3147static void
3148remove_exit_barriers (struct omp_region *region)
3149{
75a70cf9 3150 if (region->type == GIMPLE_OMP_PARALLEL)
61e47ac8 3151 remove_exit_barrier (region);
3152
3153 if (region->inner)
3154 {
3155 region = region->inner;
3156 remove_exit_barriers (region);
3157 while (region->next)
3158 {
3159 region = region->next;
3160 remove_exit_barriers (region);
3161 }
3162 }
3163}
773c5ba7 3164
658b4427 3165/* Optimize omp_get_thread_num () and omp_get_num_threads ()
3166 calls. These can't be declared as const functions, but
3167 within one parallel body they are constant, so they can be
3168 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
fd6481cf 3169 which are declared const. Similarly for task body, except
3170 that in untied task omp_get_thread_num () can change at any task
3171 scheduling point. */
658b4427 3172
3173static void
75a70cf9 3174optimize_omp_library_calls (gimple entry_stmt)
658b4427 3175{
3176 basic_block bb;
75a70cf9 3177 gimple_stmt_iterator gsi;
658b4427 3178 tree thr_num_id
3179 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3180 tree num_thr_id
3181 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
75a70cf9 3182 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3183 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
fd6481cf 3184 OMP_CLAUSE_UNTIED) != NULL);
658b4427 3185
3186 FOR_EACH_BB (bb)
75a70cf9 3187 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
658b4427 3188 {
75a70cf9 3189 gimple call = gsi_stmt (gsi);
658b4427 3190 tree decl;
3191
75a70cf9 3192 if (is_gimple_call (call)
3193 && (decl = gimple_call_fndecl (call))
658b4427 3194 && DECL_EXTERNAL (decl)
3195 && TREE_PUBLIC (decl)
3196 && DECL_INITIAL (decl) == NULL)
3197 {
3198 tree built_in;
3199
3200 if (DECL_NAME (decl) == thr_num_id)
fd6481cf 3201 {
3202 /* In #pragma omp task untied omp_get_thread_num () can change
3203 during the execution of the task region. */
3204 if (untied_task)
3205 continue;
3206 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3207 }
658b4427 3208 else if (DECL_NAME (decl) == num_thr_id)
3209 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3210 else
3211 continue;
3212
3213 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
75a70cf9 3214 || gimple_call_num_args (call) != 0)
658b4427 3215 continue;
3216
3217 if (flag_exceptions && !TREE_NOTHROW (decl))
3218 continue;
3219
3220 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3221 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
3222 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
3223 continue;
3224
0acacf9e 3225 gimple_call_set_fndecl (call, built_in);
658b4427 3226 }
3227 }
3228}
3229
fd6481cf 3230/* Expand the OpenMP parallel or task directive starting at REGION. */
1e8e9920 3231
3232static void
fd6481cf 3233expand_omp_taskreg (struct omp_region *region)
1e8e9920 3234{
773c5ba7 3235 basic_block entry_bb, exit_bb, new_bb;
87d4aa85 3236 struct function *child_cfun;
1d22f541 3237 tree child_fn, block, t, ws_args, *tp;
75a70cf9 3238 gimple_stmt_iterator gsi;
3239 gimple entry_stmt, stmt;
773c5ba7 3240 edge e;
3241
61e47ac8 3242 entry_stmt = last_stmt (region->entry);
75a70cf9 3243 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
773c5ba7 3244 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
b3a3ddec 3245 /* If this function has been already instrumented, make sure
3246 the child function isn't instrumented again. */
3247 child_cfun->after_tree_profile = cfun->after_tree_profile;
773c5ba7 3248
61e47ac8 3249 entry_bb = region->entry;
3250 exit_bb = region->exit;
773c5ba7 3251
773c5ba7 3252 if (is_combined_parallel (region))
61e47ac8 3253 ws_args = region->ws_args;
773c5ba7 3254 else
3255 ws_args = NULL_TREE;
1e8e9920 3256
61e47ac8 3257 if (child_cfun->cfg)
1e8e9920 3258 {
773c5ba7 3259 /* Due to inlining, it may happen that we have already outlined
3260 the region, in which case all we need to do is make the
3261 sub-graph unreachable and emit the parallel call. */
3262 edge entry_succ_e, exit_succ_e;
75a70cf9 3263 gimple_stmt_iterator gsi;
773c5ba7 3264
3265 entry_succ_e = single_succ_edge (entry_bb);
773c5ba7 3266
75a70cf9 3267 gsi = gsi_last_bb (entry_bb);
3268 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3269 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3270 gsi_remove (&gsi, true);
773c5ba7 3271
3272 new_bb = entry_bb;
03ed154b 3273 if (exit_bb)
3274 {
3275 exit_succ_e = single_succ_edge (exit_bb);
3276 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3277 }
79acaae1 3278 remove_edge_and_dominated_blocks (entry_succ_e);
1e8e9920 3279 }
773c5ba7 3280 else
3281 {
3282 /* If the parallel region needs data sent from the parent
3480139d 3283 function, then the very first statement (except possible
3284 tree profile counter updates) of the parallel body
773c5ba7 3285 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3286 &.OMP_DATA_O is passed as an argument to the child function,
3287 we need to replace it with the argument as seen by the child
3288 function.
3289
3290 In most cases, this will end up being the identity assignment
3291 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3292 a function call that has been inlined, the original PARM_DECL
3293 .OMP_DATA_I may have been converted into a different local
3294 variable. In which case, we need to keep the assignment. */
75a70cf9 3295 if (gimple_omp_taskreg_data_arg (entry_stmt))
773c5ba7 3296 {
3297 basic_block entry_succ_bb = single_succ (entry_bb);
75a70cf9 3298 gimple_stmt_iterator gsi;
3299 tree arg, narg;
3300 gimple parcopy_stmt = NULL;
1e8e9920 3301
75a70cf9 3302 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3480139d 3303 {
75a70cf9 3304 gimple stmt;
3480139d 3305
75a70cf9 3306 gcc_assert (!gsi_end_p (gsi));
3307 stmt = gsi_stmt (gsi);
3308 if (gimple_code (stmt) != GIMPLE_ASSIGN)
cc6b725b 3309 continue;
3310
75a70cf9 3311 if (gimple_num_ops (stmt) == 2)
3480139d 3312 {
75a70cf9 3313 tree arg = gimple_assign_rhs1 (stmt);
3314
3315 /* We're ignore the subcode because we're
3316 effectively doing a STRIP_NOPS. */
3317
3318 if (TREE_CODE (arg) == ADDR_EXPR
3319 && TREE_OPERAND (arg, 0)
3320 == gimple_omp_taskreg_data_arg (entry_stmt))
3321 {
3322 parcopy_stmt = stmt;
3323 break;
3324 }
3480139d 3325 }
3326 }
79acaae1 3327
75a70cf9 3328 gcc_assert (parcopy_stmt != NULL);
79acaae1 3329 arg = DECL_ARGUMENTS (child_fn);
3330
3331 if (!gimple_in_ssa_p (cfun))
3332 {
75a70cf9 3333 if (gimple_assign_lhs (parcopy_stmt) == arg)
3334 gsi_remove (&gsi, true);
79acaae1 3335 else
75a70cf9 3336 {
3337 /* ?? Is setting the subcode really necessary ?? */
3338 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3339 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3340 }
79acaae1 3341 }
3342 else
3343 {
3344 /* If we are in ssa form, we must load the value from the default
3345 definition of the argument. That should not be defined now,
3346 since the argument is not used uninitialized. */
3347 gcc_assert (gimple_default_def (cfun, arg) == NULL);
75a70cf9 3348 narg = make_ssa_name (arg, gimple_build_nop ());
79acaae1 3349 set_default_def (arg, narg);
75a70cf9 3350 /* ?? Is setting the subcode really necessary ?? */
3351 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3352 gimple_assign_set_rhs1 (parcopy_stmt, narg);
79acaae1 3353 update_stmt (parcopy_stmt);
3354 }
773c5ba7 3355 }
3356
3357 /* Declare local variables needed in CHILD_CFUN. */
3358 block = DECL_INITIAL (child_fn);
edb7afe8 3359 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
75a70cf9 3360 DECL_SAVED_TREE (child_fn) = NULL;
3361 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
1d22f541 3362 TREE_USED (block) = 1;
773c5ba7 3363
79acaae1 3364 /* Reset DECL_CONTEXT on function arguments. */
773c5ba7 3365 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3366 DECL_CONTEXT (t) = child_fn;
3367
75a70cf9 3368 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3369 so that it can be moved to the child function. */
3370 gsi = gsi_last_bb (entry_bb);
3371 stmt = gsi_stmt (gsi);
3372 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3373 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3374 gsi_remove (&gsi, true);
3375 e = split_block (entry_bb, stmt);
773c5ba7 3376 entry_bb = e->dest;
3377 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3378
75a70cf9 3379 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
5056ba1a 3380 if (exit_bb)
3381 {
75a70cf9 3382 gsi = gsi_last_bb (exit_bb);
3383 gcc_assert (!gsi_end_p (gsi)
3384 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3385 stmt = gimple_build_return (NULL);
3386 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3387 gsi_remove (&gsi, true);
5056ba1a 3388 }
79acaae1 3389
3390 /* Move the parallel region into CHILD_CFUN. */
3391
3392 if (gimple_in_ssa_p (cfun))
3393 {
3394 push_cfun (child_cfun);
bcaa2770 3395 init_tree_ssa (child_cfun);
79acaae1 3396 init_ssa_operands ();
3397 cfun->gimple_df->in_ssa_p = true;
3398 pop_cfun ();
1d22f541 3399 block = NULL_TREE;
79acaae1 3400 }
1d22f541 3401 else
75a70cf9 3402 block = gimple_block (entry_stmt);
1d22f541 3403
3404 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
79acaae1 3405 if (exit_bb)
3406 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3407
1d22f541 3408 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3409 for (tp = &child_cfun->local_decls; *tp; )
3410 if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3411 tp = &TREE_CHAIN (*tp);
3412 else
3413 *tp = TREE_CHAIN (*tp);
3414
79acaae1 3415 /* Inform the callgraph about the new function. */
3416 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3417 = cfun->curr_properties;
3418 cgraph_add_new_function (child_fn, true);
3419
3420 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3421 fixed in a following pass. */
3422 push_cfun (child_cfun);
658b4427 3423 if (optimize)
fd6481cf 3424 optimize_omp_library_calls (entry_stmt);
79acaae1 3425 rebuild_cgraph_edges ();
fbe86b1b 3426
3427 /* Some EH regions might become dead, see PR34608. If
3428 pass_cleanup_cfg isn't the first pass to happen with the
3429 new child, these dead EH edges might cause problems.
3430 Clean them up now. */
3431 if (flag_exceptions)
3432 {
3433 basic_block bb;
3434 tree save_current = current_function_decl;
3435 bool changed = false;
3436
3437 current_function_decl = child_fn;
3438 FOR_EACH_BB (bb)
75a70cf9 3439 changed |= gimple_purge_dead_eh_edges (bb);
fbe86b1b 3440 if (changed)
3441 cleanup_tree_cfg ();
3442 current_function_decl = save_current;
3443 }
79acaae1 3444 pop_cfun ();
773c5ba7 3445 }
cb7f680b 3446
773c5ba7 3447 /* Emit a library call to launch the children threads. */
75a70cf9 3448 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
fd6481cf 3449 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3450 else
3451 expand_task_call (new_bb, entry_stmt);
28c92cbb 3452 update_ssa (TODO_update_ssa_only_virtuals);
1e8e9920 3453}
3454
773c5ba7 3455
3456/* A subroutine of expand_omp_for. Generate code for a parallel
1e8e9920 3457 loop with any schedule. Given parameters:
3458
3459 for (V = N1; V cond N2; V += STEP) BODY;
3460
3461 where COND is "<" or ">", we generate pseudocode
3462
3463 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
773c5ba7 3464 if (more) goto L0; else goto L3;
1e8e9920 3465 L0:
3466 V = istart0;
3467 iend = iend0;
3468 L1:
3469 BODY;
3470 V += STEP;
773c5ba7 3471 if (V cond iend) goto L1; else goto L2;
1e8e9920 3472 L2:
773c5ba7 3473 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3474 L3:
1e8e9920 3475
773c5ba7 3476 If this is a combined omp parallel loop, instead of the call to
fd6481cf 3477 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3478
3479 For collapsed loops, given parameters:
3480 collapse(3)
3481 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3482 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3483 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3484 BODY;
3485
3486 we generate pseudocode
3487
3488 if (cond3 is <)
3489 adj = STEP3 - 1;
3490 else
3491 adj = STEP3 + 1;
3492 count3 = (adj + N32 - N31) / STEP3;
3493 if (cond2 is <)
3494 adj = STEP2 - 1;
3495 else
3496 adj = STEP2 + 1;
3497 count2 = (adj + N22 - N21) / STEP2;
3498 if (cond1 is <)
3499 adj = STEP1 - 1;
3500 else
3501 adj = STEP1 + 1;
3502 count1 = (adj + N12 - N11) / STEP1;
3503 count = count1 * count2 * count3;
3504 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3505 if (more) goto L0; else goto L3;
3506 L0:
3507 V = istart0;
3508 T = V;
3509 V3 = N31 + (T % count3) * STEP3;
3510 T = T / count3;
3511 V2 = N21 + (T % count2) * STEP2;
3512 T = T / count2;
3513 V1 = N11 + T * STEP1;
3514 iend = iend0;
3515 L1:
3516 BODY;
3517 V += 1;
3518 if (V < iend) goto L10; else goto L2;
3519 L10:
3520 V3 += STEP3;
3521 if (V3 cond3 N32) goto L1; else goto L11;
3522 L11:
3523 V3 = N31;
3524 V2 += STEP2;
3525 if (V2 cond2 N22) goto L1; else goto L12;
3526 L12:
3527 V2 = N21;
3528 V1 += STEP1;
3529 goto L1;
3530 L2:
3531 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3532 L3:
3533
3534 */
1e8e9920 3535
61e47ac8 3536static void
773c5ba7 3537expand_omp_for_generic (struct omp_region *region,
3538 struct omp_for_data *fd,
1e8e9920 3539 enum built_in_function start_fn,
3540 enum built_in_function next_fn)
3541{
75a70cf9 3542 tree type, istart0, iend0, iend;
fd6481cf 3543 tree t, vmain, vback, bias = NULL_TREE;
3544 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
03ed154b 3545 basic_block l2_bb = NULL, l3_bb = NULL;
75a70cf9 3546 gimple_stmt_iterator gsi;
3547 gimple stmt;
773c5ba7 3548 bool in_combined_parallel = is_combined_parallel (region);
ac6e3339 3549 bool broken_loop = region->cont == NULL;
79acaae1 3550 edge e, ne;
fd6481cf 3551 tree *counts = NULL;
3552 int i;
ac6e3339 3553
3554 gcc_assert (!broken_loop || !in_combined_parallel);
fd6481cf 3555 gcc_assert (fd->iter_type == long_integer_type_node
3556 || !in_combined_parallel);
1e8e9920 3557
fd6481cf 3558 type = TREE_TYPE (fd->loop.v);
3559 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3560 iend0 = create_tmp_var (fd->iter_type, ".iend0");
6d63fc03 3561 TREE_ADDRESSABLE (istart0) = 1;
3562 TREE_ADDRESSABLE (iend0) = 1;
79acaae1 3563 if (gimple_in_ssa_p (cfun))
3564 {
3565 add_referenced_var (istart0);
3566 add_referenced_var (iend0);
3567 }
1e8e9920 3568
fd6481cf 3569 /* See if we need to bias by LLONG_MIN. */
3570 if (fd->iter_type == long_long_unsigned_type_node
3571 && TREE_CODE (type) == INTEGER_TYPE
3572 && !TYPE_UNSIGNED (type))
3573 {
3574 tree n1, n2;
3575
3576 if (fd->loop.cond_code == LT_EXPR)
3577 {
3578 n1 = fd->loop.n1;
3579 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3580 }
3581 else
3582 {
3583 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3584 n2 = fd->loop.n1;
3585 }
3586 if (TREE_CODE (n1) != INTEGER_CST
3587 || TREE_CODE (n2) != INTEGER_CST
3588 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3589 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3590 }
3591
61e47ac8 3592 entry_bb = region->entry;
03ed154b 3593 cont_bb = region->cont;
fd6481cf 3594 collapse_bb = NULL;
ac6e3339 3595 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3596 gcc_assert (broken_loop
3597 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3598 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3599 l1_bb = single_succ (l0_bb);
3600 if (!broken_loop)
03ed154b 3601 {
3602 l2_bb = create_empty_bb (cont_bb);
ac6e3339 3603 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3604 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
03ed154b 3605 }
ac6e3339 3606 else
3607 l2_bb = NULL;
3608 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3609 exit_bb = region->exit;
773c5ba7 3610
75a70cf9 3611 gsi = gsi_last_bb (entry_bb);
fd6481cf 3612
75a70cf9 3613 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
fd6481cf 3614 if (fd->collapse > 1)
3615 {
3616 /* collapsed loops need work for expansion in SSA form. */
3617 gcc_assert (!gimple_in_ssa_p (cfun));
3618 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3619 for (i = 0; i < fd->collapse; i++)
3620 {
3621 tree itype = TREE_TYPE (fd->loops[i].v);
3622
3623 if (POINTER_TYPE_P (itype))
3624 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3625 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3626 ? -1 : 1));
3627 t = fold_build2 (PLUS_EXPR, itype,
3628 fold_convert (itype, fd->loops[i].step), t);
3629 t = fold_build2 (PLUS_EXPR, itype, t,
3630 fold_convert (itype, fd->loops[i].n2));
3631 t = fold_build2 (MINUS_EXPR, itype, t,
3632 fold_convert (itype, fd->loops[i].n1));
3633 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3634 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3635 fold_build1 (NEGATE_EXPR, itype, t),
3636 fold_build1 (NEGATE_EXPR, itype,
3637 fold_convert (itype,
3638 fd->loops[i].step)));
3639 else
3640 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3641 fold_convert (itype, fd->loops[i].step));
3642 t = fold_convert (type, t);
3643 if (TREE_CODE (t) == INTEGER_CST)
3644 counts[i] = t;
3645 else
3646 {
3647 counts[i] = create_tmp_var (type, ".count");
75a70cf9 3648 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3649 true, GSI_SAME_STMT);
3650 stmt = gimple_build_assign (counts[i], t);
3651 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
fd6481cf 3652 }
3653 if (SSA_VAR_P (fd->loop.n2))
3654 {
3655 if (i == 0)
75a70cf9 3656 t = counts[0];
fd6481cf 3657 else
3658 {
3659 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
75a70cf9 3660 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3661 true, GSI_SAME_STMT);
fd6481cf 3662 }
75a70cf9 3663 stmt = gimple_build_assign (fd->loop.n2, t);
3664 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
fd6481cf 3665 }
3666 }
3667 }
79acaae1 3668 if (in_combined_parallel)
3669 {
3670 /* In a combined parallel loop, emit a call to
3671 GOMP_loop_foo_next. */
3672 t = build_call_expr (built_in_decls[next_fn], 2,
3673 build_fold_addr_expr (istart0),
3674 build_fold_addr_expr (iend0));
3675 }
3676 else
1e8e9920 3677 {
c2f47e15 3678 tree t0, t1, t2, t3, t4;
773c5ba7 3679 /* If this is not a combined parallel loop, emit a call to
3680 GOMP_loop_foo_start in ENTRY_BB. */
c2f47e15 3681 t4 = build_fold_addr_expr (iend0);
3682 t3 = build_fold_addr_expr (istart0);
fd6481cf 3683 t2 = fold_convert (fd->iter_type, fd->loop.step);
c799f233 3684 if (POINTER_TYPE_P (type)
3685 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3686 {
3687 /* Avoid casting pointers to integer of a different size. */
3688 tree itype
3689 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3690 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3691 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3692 }
3693 else
3694 {
3695 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3696 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3697 }
fd6481cf 3698 if (bias)
1e8e9920 3699 {
fd6481cf 3700 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3701 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3702 }
3703 if (fd->iter_type == long_integer_type_node)
3704 {
3705 if (fd->chunk_size)
3706 {
3707 t = fold_convert (fd->iter_type, fd->chunk_size);
3708 t = build_call_expr (built_in_decls[start_fn], 6,
3709 t0, t1, t2, t, t3, t4);
3710 }
3711 else
3712 t = build_call_expr (built_in_decls[start_fn], 5,
3713 t0, t1, t2, t3, t4);
1e8e9920 3714 }
c2f47e15 3715 else
fd6481cf 3716 {
3717 tree t5;
3718 tree c_bool_type;
3719
3720 /* The GOMP_loop_ull_*start functions have additional boolean
3721 argument, true for < loops and false for > loops.
3722 In Fortran, the C bool type can be different from
3723 boolean_type_node. */
3724 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3725 t5 = build_int_cst (c_bool_type,
3726 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3727 if (fd->chunk_size)
3728 {
3729 t = fold_convert (fd->iter_type, fd->chunk_size);
3730 t = build_call_expr (built_in_decls[start_fn], 7,
3731 t5, t0, t1, t2, t, t3, t4);
3732 }
3733 else
3734 t = build_call_expr (built_in_decls[start_fn], 6,
3735 t5, t0, t1, t2, t3, t4);
3736 }
1e8e9920 3737 }
fd6481cf 3738 if (TREE_TYPE (t) != boolean_type_node)
3739 t = fold_build2 (NE_EXPR, boolean_type_node,
3740 t, build_int_cst (TREE_TYPE (t), 0));
75a70cf9 3741 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3742 true, GSI_SAME_STMT);
3743 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
79acaae1 3744
75a70cf9 3745 /* Remove the GIMPLE_OMP_FOR statement. */
3746 gsi_remove (&gsi, true);
1e8e9920 3747
773c5ba7 3748 /* Iteration setup for sequential loop goes in L0_BB. */
75a70cf9 3749 gsi = gsi_start_bb (l0_bb);
fd6481cf 3750 if (bias)
3751 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3752 istart0, bias));
3753 else
3754 t = fold_convert (type, istart0);
75a70cf9 3755 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3756 false, GSI_CONTINUE_LINKING);
3757 stmt = gimple_build_assign (fd->loop.v, t);
3758 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1e8e9920 3759
fd6481cf 3760 if (bias)
3761 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3762 iend0, bias));
3763 else
3764 t = fold_convert (type, iend0);
75a70cf9 3765 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3766 false, GSI_CONTINUE_LINKING);
fd6481cf 3767 if (fd->collapse > 1)
3768 {
3769 tree tem = create_tmp_var (type, ".tem");
3770
75a70cf9 3771 stmt = gimple_build_assign (tem, fd->loop.v);
3772 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3773 for (i = fd->collapse - 1; i >= 0; i--)
3774 {
3775 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3776 itype = vtype;
3777 if (POINTER_TYPE_P (vtype))
3778 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3779 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3780 t = fold_convert (itype, t);
3781 t = fold_build2 (MULT_EXPR, itype, t, fd->loops[i].step);
3782 if (POINTER_TYPE_P (vtype))
3783 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3784 fd->loops[i].n1, fold_convert (sizetype, t));
3785 else
3786 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
75a70cf9 3787 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3788 false, GSI_CONTINUE_LINKING);
3789 stmt = gimple_build_assign (fd->loops[i].v, t);
3790 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3791 if (i != 0)
3792 {
3793 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
75a70cf9 3794 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3795 false, GSI_CONTINUE_LINKING);
3796 stmt = gimple_build_assign (tem, t);
3797 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3798 }
3799 }
3800 }
773c5ba7 3801
ac6e3339 3802 if (!broken_loop)
03ed154b 3803 {
ac6e3339 3804 /* Code to control the increment and predicate for the sequential
3805 loop goes in the CONT_BB. */
75a70cf9 3806 gsi = gsi_last_bb (cont_bb);
3807 stmt = gsi_stmt (gsi);
3808 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3809 vmain = gimple_omp_continue_control_use (stmt);
3810 vback = gimple_omp_continue_control_def (stmt);
79acaae1 3811
fd6481cf 3812 if (POINTER_TYPE_P (type))
3813 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3814 fold_convert (sizetype, fd->loop.step));
3815 else
3816 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
75a70cf9 3817 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3818 true, GSI_SAME_STMT);
3819 stmt = gimple_build_assign (vback, t);
3820 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3821
fd6481cf 3822 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
75a70cf9 3823 stmt = gimple_build_cond_empty (t);
3824 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
773c5ba7 3825
75a70cf9 3826 /* Remove GIMPLE_OMP_CONTINUE. */
3827 gsi_remove (&gsi, true);
773c5ba7 3828
fd6481cf 3829 if (fd->collapse > 1)
3830 {
3831 basic_block last_bb, bb;
3832
3833 last_bb = cont_bb;
3834 for (i = fd->collapse - 1; i >= 0; i--)
3835 {
3836 tree vtype = TREE_TYPE (fd->loops[i].v);
3837
3838 bb = create_empty_bb (last_bb);
75a70cf9 3839 gsi = gsi_start_bb (bb);
fd6481cf 3840
3841 if (i < fd->collapse - 1)
3842 {
3843 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3844 e->probability = REG_BR_PROB_BASE / 8;
3845
75a70cf9 3846 t = fd->loops[i + 1].n1;
3847 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3848 false, GSI_CONTINUE_LINKING);
3849 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3850 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3851 }
3852 else
3853 collapse_bb = bb;
3854
3855 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3856
3857 if (POINTER_TYPE_P (vtype))
3858 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3859 fd->loops[i].v,
3860 fold_convert (sizetype, fd->loops[i].step));
3861 else
3862 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3863 fd->loops[i].step);
75a70cf9 3864 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3865 false, GSI_CONTINUE_LINKING);
3866 stmt = gimple_build_assign (fd->loops[i].v, t);
3867 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3868
3869 if (i > 0)
3870 {
75a70cf9 3871 t = fd->loops[i].n2;
3872 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3873 false, GSI_CONTINUE_LINKING);
fd6481cf 3874 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
75a70cf9 3875 fd->loops[i].v, t);
3876 stmt = gimple_build_cond_empty (t);
3877 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
fd6481cf 3878 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3879 e->probability = REG_BR_PROB_BASE * 7 / 8;
3880 }
3881 else
3882 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3883 last_bb = bb;
3884 }
3885 }
3886
ac6e3339 3887 /* Emit code to get the next parallel iteration in L2_BB. */
75a70cf9 3888 gsi = gsi_start_bb (l2_bb);
773c5ba7 3889
ac6e3339 3890 t = build_call_expr (built_in_decls[next_fn], 2,
3891 build_fold_addr_expr (istart0),
3892 build_fold_addr_expr (iend0));
75a70cf9 3893 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3894 false, GSI_CONTINUE_LINKING);
fd6481cf 3895 if (TREE_TYPE (t) != boolean_type_node)
3896 t = fold_build2 (NE_EXPR, boolean_type_node,
3897 t, build_int_cst (TREE_TYPE (t), 0));
75a70cf9 3898 stmt = gimple_build_cond_empty (t);
3899 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
ac6e3339 3900 }
1e8e9920 3901
61e47ac8 3902 /* Add the loop cleanup function. */
75a70cf9 3903 gsi = gsi_last_bb (exit_bb);
3904 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
61e47ac8 3905 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
3906 else
3907 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
75a70cf9 3908 stmt = gimple_build_call (t, 0);
3909 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3910 gsi_remove (&gsi, true);
773c5ba7 3911
3912 /* Connect the new blocks. */
79acaae1 3913 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
3914 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
1e8e9920 3915
ac6e3339 3916 if (!broken_loop)
3917 {
75a70cf9 3918 gimple_seq phis;
3919
79acaae1 3920 e = find_edge (cont_bb, l3_bb);
3921 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
3922
75a70cf9 3923 phis = phi_nodes (l3_bb);
3924 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
3925 {
3926 gimple phi = gsi_stmt (gsi);
3927 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
3928 PHI_ARG_DEF_FROM_EDGE (phi, e));
3929 }
79acaae1 3930 remove_edge (e);
3931
ac6e3339 3932 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
fd6481cf 3933 if (fd->collapse > 1)
3934 {
3935 e = find_edge (cont_bb, l1_bb);
3936 remove_edge (e);
3937 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
3938 }
3939 else
3940 {
3941 e = find_edge (cont_bb, l1_bb);
3942 e->flags = EDGE_TRUE_VALUE;
3943 }
3944 e->probability = REG_BR_PROB_BASE * 7 / 8;
3945 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
ac6e3339 3946 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
79acaae1 3947
3948 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
3949 recompute_dominator (CDI_DOMINATORS, l2_bb));
3950 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
3951 recompute_dominator (CDI_DOMINATORS, l3_bb));
3952 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
3953 recompute_dominator (CDI_DOMINATORS, l0_bb));
3954 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
3955 recompute_dominator (CDI_DOMINATORS, l1_bb));
ac6e3339 3956 }
1e8e9920 3957}
3958
3959
773c5ba7 3960/* A subroutine of expand_omp_for. Generate code for a parallel
3961 loop with static schedule and no specified chunk size. Given
3962 parameters:
1e8e9920 3963
3964 for (V = N1; V cond N2; V += STEP) BODY;
3965
3966 where COND is "<" or ">", we generate pseudocode
3967
3968 if (cond is <)
3969 adj = STEP - 1;
3970 else
3971 adj = STEP + 1;
fd6481cf 3972 if ((__typeof (V)) -1 > 0 && cond is >)
3973 n = -(adj + N2 - N1) / -STEP;
3974 else
3975 n = (adj + N2 - N1) / STEP;
1e8e9920 3976 q = n / nthreads;
3977 q += (q * nthreads != n);
3978 s0 = q * threadid;
3979 e0 = min(s0 + q, n);
79acaae1 3980 V = s0 * STEP + N1;
1e8e9920 3981 if (s0 >= e0) goto L2; else goto L0;
3982 L0:
1e8e9920 3983 e = e0 * STEP + N1;
3984 L1:
3985 BODY;
3986 V += STEP;
3987 if (V cond e) goto L1;
1e8e9920 3988 L2:
3989*/
3990
61e47ac8 3991static void
773c5ba7 3992expand_omp_for_static_nochunk (struct omp_region *region,
3993 struct omp_for_data *fd)
1e8e9920 3994{
63f88450 3995 tree n, q, s0, e0, e, t, nthreads, threadid;
fd6481cf 3996 tree type, itype, vmain, vback;
61e47ac8 3997 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
3998 basic_block fin_bb;
75a70cf9 3999 gimple_stmt_iterator gsi;
4000 gimple stmt;
1e8e9920 4001
fd6481cf 4002 itype = type = TREE_TYPE (fd->loop.v);
4003 if (POINTER_TYPE_P (type))
4004 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
1e8e9920 4005
61e47ac8 4006 entry_bb = region->entry;
61e47ac8 4007 cont_bb = region->cont;
ac6e3339 4008 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4009 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4010 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4011 body_bb = single_succ (seq_start_bb);
4012 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4013 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4014 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
61e47ac8 4015 exit_bb = region->exit;
4016
773c5ba7 4017 /* Iteration space partitioning goes in ENTRY_BB. */
75a70cf9 4018 gsi = gsi_last_bb (entry_bb);
4019 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
61e47ac8 4020
c2f47e15 4021 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
fd6481cf 4022 t = fold_convert (itype, t);
75a70cf9 4023 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4024 true, GSI_SAME_STMT);
1e8e9920 4025
c2f47e15 4026 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
fd6481cf 4027 t = fold_convert (itype, t);
75a70cf9 4028 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4029 true, GSI_SAME_STMT);
1e8e9920 4030
fd6481cf 4031 fd->loop.n1
75a70cf9 4032 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4033 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4034 fd->loop.n2
75a70cf9 4035 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4036 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4037 fd->loop.step
75a70cf9 4038 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4039 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4040
4041 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4042 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4043 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4044 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4045 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4046 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4047 fold_build1 (NEGATE_EXPR, itype, t),
4048 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4049 else
4050 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4051 t = fold_convert (itype, t);
75a70cf9 4052 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 4053
fd6481cf 4054 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
75a70cf9 4055 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 4056
fd6481cf 4057 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4058 t = fold_build2 (NE_EXPR, itype, t, n);
4059 t = fold_build2 (PLUS_EXPR, itype, q, t);
75a70cf9 4060 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 4061
fd6481cf 4062 t = build2 (MULT_EXPR, itype, q, threadid);
75a70cf9 4063 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
1e8e9920 4064
fd6481cf 4065 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4066 t = fold_build2 (MIN_EXPR, itype, t, n);
75a70cf9 4067 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
79acaae1 4068
1e8e9920 4069 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
75a70cf9 4070 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
773c5ba7 4071
75a70cf9 4072 /* Remove the GIMPLE_OMP_FOR statement. */
4073 gsi_remove (&gsi, true);
773c5ba7 4074
4075 /* Setup code for sequential iteration goes in SEQ_START_BB. */
75a70cf9 4076 gsi = gsi_start_bb (seq_start_bb);
1e8e9920 4077
fd6481cf 4078 t = fold_convert (itype, s0);
4079 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4080 if (POINTER_TYPE_P (type))
4081 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4082 fold_convert (sizetype, t));
4083 else
4084 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4085 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4086 false, GSI_CONTINUE_LINKING);
4087 stmt = gimple_build_assign (fd->loop.v, t);
4088 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4089
fd6481cf 4090 t = fold_convert (itype, e0);
4091 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4092 if (POINTER_TYPE_P (type))
4093 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4094 fold_convert (sizetype, t));
4095 else
4096 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4097 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4098 false, GSI_CONTINUE_LINKING);
1e8e9920 4099
75a70cf9 4100 /* The code controlling the sequential loop replaces the
4101 GIMPLE_OMP_CONTINUE. */
4102 gsi = gsi_last_bb (cont_bb);
4103 stmt = gsi_stmt (gsi);
4104 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4105 vmain = gimple_omp_continue_control_use (stmt);
4106 vback = gimple_omp_continue_control_def (stmt);
79acaae1 4107
fd6481cf 4108 if (POINTER_TYPE_P (type))
4109 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4110 fold_convert (sizetype, fd->loop.step));
4111 else
4112 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
75a70cf9 4113 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4114 true, GSI_SAME_STMT);
4115 stmt = gimple_build_assign (vback, t);
4116 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
79acaae1 4117
fd6481cf 4118 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
75a70cf9 4119 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
1e8e9920 4120
75a70cf9 4121 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4122 gsi_remove (&gsi, true);
773c5ba7 4123
75a70cf9 4124 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4125 gsi = gsi_last_bb (exit_bb);
4126 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4127 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4128 false, GSI_SAME_STMT);
4129 gsi_remove (&gsi, true);
773c5ba7 4130
4131 /* Connect all the blocks. */
ac6e3339 4132 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4133 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
79acaae1 4134
ac6e3339 4135 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
61e47ac8 4136 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
79acaae1 4137
4138 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4139 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4140 recompute_dominator (CDI_DOMINATORS, body_bb));
4141 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4142 recompute_dominator (CDI_DOMINATORS, fin_bb));
1e8e9920 4143}
4144
773c5ba7 4145
4146/* A subroutine of expand_omp_for. Generate code for a parallel
4147 loop with static schedule and a specified chunk size. Given
4148 parameters:
1e8e9920 4149
4150 for (V = N1; V cond N2; V += STEP) BODY;
4151
4152 where COND is "<" or ">", we generate pseudocode
4153
4154 if (cond is <)
4155 adj = STEP - 1;
4156 else
4157 adj = STEP + 1;
fd6481cf 4158 if ((__typeof (V)) -1 > 0 && cond is >)
4159 n = -(adj + N2 - N1) / -STEP;
4160 else
4161 n = (adj + N2 - N1) / STEP;
1e8e9920 4162 trip = 0;
79acaae1 4163 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4164 here so that V is defined
4165 if the loop is not entered
1e8e9920 4166 L0:
4167 s0 = (trip * nthreads + threadid) * CHUNK;
4168 e0 = min(s0 + CHUNK, n);
4169 if (s0 < n) goto L1; else goto L4;
4170 L1:
4171 V = s0 * STEP + N1;
4172 e = e0 * STEP + N1;
4173 L2:
4174 BODY;
4175 V += STEP;
4176 if (V cond e) goto L2; else goto L3;
4177 L3:
4178 trip += 1;
4179 goto L0;
4180 L4:
1e8e9920 4181*/
4182
61e47ac8 4183static void
75a70cf9 4184expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
1e8e9920 4185{
75a70cf9 4186 tree n, s0, e0, e, t;
79acaae1 4187 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
75a70cf9 4188 tree type, itype, v_main, v_back, v_extra;
773c5ba7 4189 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
61e47ac8 4190 basic_block trip_update_bb, cont_bb, fin_bb;
75a70cf9 4191 gimple_stmt_iterator si;
4192 gimple stmt;
4193 edge se;
1e8e9920 4194
fd6481cf 4195 itype = type = TREE_TYPE (fd->loop.v);
4196 if (POINTER_TYPE_P (type))
4197 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
1e8e9920 4198
61e47ac8 4199 entry_bb = region->entry;
ac6e3339 4200 se = split_block (entry_bb, last_stmt (entry_bb));
4201 entry_bb = se->src;
4202 iter_part_bb = se->dest;
61e47ac8 4203 cont_bb = region->cont;
ac6e3339 4204 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4205 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4206 == FALLTHRU_EDGE (cont_bb)->dest);
4207 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4208 body_bb = single_succ (seq_start_bb);
4209 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4210 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4211 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4212 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
61e47ac8 4213 exit_bb = region->exit;
773c5ba7 4214
773c5ba7 4215 /* Trip and adjustment setup goes in ENTRY_BB. */
75a70cf9 4216 si = gsi_last_bb (entry_bb);
4217 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
773c5ba7 4218
c2f47e15 4219 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
fd6481cf 4220 t = fold_convert (itype, t);
75a70cf9 4221 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4222 true, GSI_SAME_STMT);
1e8e9920 4223
c2f47e15 4224 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
fd6481cf 4225 t = fold_convert (itype, t);
75a70cf9 4226 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4227 true, GSI_SAME_STMT);
79acaae1 4228
fd6481cf 4229 fd->loop.n1
75a70cf9 4230 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4231 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4232 fd->loop.n2
75a70cf9 4233 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4234 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4235 fd->loop.step
75a70cf9 4236 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4237 true, NULL_TREE, true, GSI_SAME_STMT);
79acaae1 4238 fd->chunk_size
75a70cf9 4239 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4240 true, NULL_TREE, true, GSI_SAME_STMT);
fd6481cf 4241
4242 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4243 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4244 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4245 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4246 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4247 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4248 fold_build1 (NEGATE_EXPR, itype, t),
4249 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4250 else
4251 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4252 t = fold_convert (itype, t);
75a70cf9 4253 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4254 true, GSI_SAME_STMT);
79acaae1 4255
fd6481cf 4256 trip_var = create_tmp_var (itype, ".trip");
79acaae1 4257 if (gimple_in_ssa_p (cfun))
4258 {
4259 add_referenced_var (trip_var);
75a70cf9 4260 trip_init = make_ssa_name (trip_var, NULL);
4261 trip_main = make_ssa_name (trip_var, NULL);
4262 trip_back = make_ssa_name (trip_var, NULL);
79acaae1 4263 }
1e8e9920 4264 else
79acaae1 4265 {
4266 trip_init = trip_var;
4267 trip_main = trip_var;
4268 trip_back = trip_var;
4269 }
1e8e9920 4270
75a70cf9 4271 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4272 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
773c5ba7 4273
fd6481cf 4274 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4275 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4276 if (POINTER_TYPE_P (type))
4277 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4278 fold_convert (sizetype, t));
4279 else
4280 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4281 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4282 true, GSI_SAME_STMT);
79acaae1 4283
75a70cf9 4284 /* Remove the GIMPLE_OMP_FOR. */
4285 gsi_remove (&si, true);
773c5ba7 4286
4287 /* Iteration space partitioning goes in ITER_PART_BB. */
75a70cf9 4288 si = gsi_last_bb (iter_part_bb);
1e8e9920 4289
fd6481cf 4290 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4291 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4292 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
75a70cf9 4293 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4294 false, GSI_CONTINUE_LINKING);
1e8e9920 4295
fd6481cf 4296 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4297 t = fold_build2 (MIN_EXPR, itype, t, n);
75a70cf9 4298 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4299 false, GSI_CONTINUE_LINKING);
1e8e9920 4300
4301 t = build2 (LT_EXPR, boolean_type_node, s0, n);
75a70cf9 4302 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
773c5ba7 4303
4304 /* Setup code for sequential iteration goes in SEQ_START_BB. */
75a70cf9 4305 si = gsi_start_bb (seq_start_bb);
1e8e9920 4306
fd6481cf 4307 t = fold_convert (itype, s0);
4308 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4309 if (POINTER_TYPE_P (type))
4310 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4311 fold_convert (sizetype, t));
4312 else
4313 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4314 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4315 false, GSI_CONTINUE_LINKING);
4316 stmt = gimple_build_assign (fd->loop.v, t);
4317 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
1e8e9920 4318
fd6481cf 4319 t = fold_convert (itype, e0);
4320 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4321 if (POINTER_TYPE_P (type))
4322 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4323 fold_convert (sizetype, t));
4324 else
4325 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
75a70cf9 4326 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4327 false, GSI_CONTINUE_LINKING);
1e8e9920 4328
61e47ac8 4329 /* The code controlling the sequential loop goes in CONT_BB,
75a70cf9 4330 replacing the GIMPLE_OMP_CONTINUE. */
4331 si = gsi_last_bb (cont_bb);
4332 stmt = gsi_stmt (si);
4333 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4334 v_main = gimple_omp_continue_control_use (stmt);
4335 v_back = gimple_omp_continue_control_def (stmt);
79acaae1 4336
fd6481cf 4337 if (POINTER_TYPE_P (type))
4338 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4339 fold_convert (sizetype, fd->loop.step));
4340 else
75a70cf9 4341 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4342 stmt = gimple_build_assign (v_back, t);
4343 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
79acaae1 4344
fd6481cf 4345 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
75a70cf9 4346 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
773c5ba7 4347
75a70cf9 4348 /* Remove GIMPLE_OMP_CONTINUE. */
4349 gsi_remove (&si, true);
773c5ba7 4350
4351 /* Trip update code goes into TRIP_UPDATE_BB. */
75a70cf9 4352 si = gsi_start_bb (trip_update_bb);
1e8e9920 4353
fd6481cf 4354 t = build_int_cst (itype, 1);
4355 t = build2 (PLUS_EXPR, itype, trip_main, t);
75a70cf9 4356 stmt = gimple_build_assign (trip_back, t);
4357 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
1e8e9920 4358
75a70cf9 4359 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4360 si = gsi_last_bb (exit_bb);
4361 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4362 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4363 false, GSI_SAME_STMT);
4364 gsi_remove (&si, true);
1e8e9920 4365
773c5ba7 4366 /* Connect the new blocks. */
ac6e3339 4367 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4368 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
79acaae1 4369
ac6e3339 4370 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4371 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
79acaae1 4372
ac6e3339 4373 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
79acaae1 4374
4375 if (gimple_in_ssa_p (cfun))
4376 {
75a70cf9 4377 gimple_stmt_iterator psi;
4378 gimple phi;
4379 edge re, ene;
4380 edge_var_map_vector head;
4381 edge_var_map *vm;
4382 size_t i;
4383
79acaae1 4384 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4385 remove arguments of the phi nodes in fin_bb. We need to create
4386 appropriate phi nodes in iter_part_bb instead. */
4387 se = single_pred_edge (fin_bb);
4388 re = single_succ_edge (trip_update_bb);
75a70cf9 4389 head = redirect_edge_var_map_vector (re);
79acaae1 4390 ene = single_succ_edge (entry_bb);
4391
75a70cf9 4392 psi = gsi_start_phis (fin_bb);
4393 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4394 gsi_next (&psi), ++i)
79acaae1 4395 {
75a70cf9 4396 gimple nphi;
4397
4398 phi = gsi_stmt (psi);
4399 t = gimple_phi_result (phi);
4400 gcc_assert (t == redirect_edge_var_map_result (vm));
79acaae1 4401 nphi = create_phi_node (t, iter_part_bb);
4402 SSA_NAME_DEF_STMT (t) = nphi;
4403
4404 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
fd6481cf 4405 /* A special case -- fd->loop.v is not yet computed in
4406 iter_part_bb, we need to use v_extra instead. */
4407 if (t == fd->loop.v)
79acaae1 4408 t = v_extra;
4409 add_phi_arg (nphi, t, ene);
75a70cf9 4410 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re);
4411 }
4412 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4413 redirect_edge_var_map_clear (re);
4414 while (1)
4415 {
4416 psi = gsi_start_phis (fin_bb);
4417 if (gsi_end_p (psi))
4418 break;
4419 remove_phi_node (&psi, false);
79acaae1 4420 }
79acaae1 4421
4422 /* Make phi node for trip. */
4423 phi = create_phi_node (trip_main, iter_part_bb);
4424 SSA_NAME_DEF_STMT (trip_main) = phi;
4425 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
4426 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
4427 }
4428
4429 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4430 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4431 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4432 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4433 recompute_dominator (CDI_DOMINATORS, fin_bb));
4434 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4435 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4436 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4437 recompute_dominator (CDI_DOMINATORS, body_bb));
1e8e9920 4438}
4439
1e8e9920 4440
773c5ba7 4441/* Expand the OpenMP loop defined by REGION. */
1e8e9920 4442
773c5ba7 4443static void
4444expand_omp_for (struct omp_region *region)
4445{
4446 struct omp_for_data fd;
fd6481cf 4447 struct omp_for_data_loop *loops;
1e8e9920 4448
fd6481cf 4449 loops
4450 = (struct omp_for_data_loop *)
75a70cf9 4451 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
fd6481cf 4452 * sizeof (struct omp_for_data_loop));
fd6481cf 4453 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
f77459c5 4454 region->sched_kind = fd.sched_kind;
1e8e9920 4455
b3a3ddec 4456 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4457 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4458 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4459 if (region->cont)
4460 {
4461 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4462 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4463 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4464 }
4465
03ed154b 4466 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4467 && !fd.have_ordered
fd6481cf 4468 && fd.collapse == 1
ac6e3339 4469 && region->cont != NULL)
1e8e9920 4470 {
4471 if (fd.chunk_size == NULL)
61e47ac8 4472 expand_omp_for_static_nochunk (region, &fd);
1e8e9920 4473 else
61e47ac8 4474 expand_omp_for_static_chunk (region, &fd);
1e8e9920 4475 }
4476 else
4477 {
fd6481cf 4478 int fn_index, start_ix, next_ix;
4479
4480 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4481 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
75a70cf9 4482 ? 3 : fd.sched_kind;
fd6481cf 4483 fn_index += fd.have_ordered * 4;
4484 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4485 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4486 if (fd.iter_type == long_long_unsigned_type_node)
4487 {
4488 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4489 - BUILT_IN_GOMP_LOOP_STATIC_START;
4490 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4491 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4492 }
61e47ac8 4493 expand_omp_for_generic (region, &fd, start_ix, next_ix);
1e8e9920 4494 }
28c92cbb 4495
4496 update_ssa (TODO_update_ssa_only_virtuals);
1e8e9920 4497}
4498
1e8e9920 4499
4500/* Expand code for an OpenMP sections directive. In pseudo code, we generate
4501
1e8e9920 4502 v = GOMP_sections_start (n);
4503 L0:
4504 switch (v)
4505 {
4506 case 0:
4507 goto L2;
4508 case 1:
4509 section 1;
4510 goto L1;
4511 case 2:
4512 ...
4513 case n:
4514 ...
1e8e9920 4515 default:
4516 abort ();
4517 }
4518 L1:
4519 v = GOMP_sections_next ();
4520 goto L0;
4521 L2:
4522 reduction;
4523
773c5ba7 4524 If this is a combined parallel sections, replace the call to
79acaae1 4525 GOMP_sections_start with call to GOMP_sections_next. */
1e8e9920 4526
4527static void
773c5ba7 4528expand_omp_sections (struct omp_region *region)
1e8e9920 4529{
75a70cf9 4530 tree t, u, vin = NULL, vmain, vnext, l1, l2;
4531 VEC (tree,heap) *label_vec;
4532 unsigned len;
ac6e3339 4533 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
75a70cf9 4534 gimple_stmt_iterator si, switch_si;
4535 gimple sections_stmt, stmt, cont;
9884aaf8 4536 edge_iterator ei;
4537 edge e;
61e47ac8 4538 struct omp_region *inner;
75a70cf9 4539 unsigned i, casei;
ac6e3339 4540 bool exit_reachable = region->cont != NULL;
1e8e9920 4541
ac6e3339 4542 gcc_assert (exit_reachable == (region->exit != NULL));
61e47ac8 4543 entry_bb = region->entry;
ac6e3339 4544 l0_bb = single_succ (entry_bb);
61e47ac8 4545 l1_bb = region->cont;
ac6e3339 4546 l2_bb = region->exit;
4547 if (exit_reachable)
03ed154b 4548 {
9884aaf8 4549 if (single_pred (l2_bb) == l0_bb)
75a70cf9 4550 l2 = gimple_block_label (l2_bb);
9884aaf8 4551 else
4552 {
4553 /* This can happen if there are reductions. */
4554 len = EDGE_COUNT (l0_bb->succs);
4555 gcc_assert (len > 0);
4556 e = EDGE_SUCC (l0_bb, len - 1);
75a70cf9 4557 si = gsi_last_bb (e->dest);
6d5a0fbe 4558 l2 = NULL_TREE;
75a70cf9 4559 if (gsi_end_p (si)
4560 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4561 l2 = gimple_block_label (e->dest);
9884aaf8 4562 else
4563 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4564 {
75a70cf9 4565 si = gsi_last_bb (e->dest);
4566 if (gsi_end_p (si)
4567 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
9884aaf8 4568 {
75a70cf9 4569 l2 = gimple_block_label (e->dest);
9884aaf8 4570 break;
4571 }
4572 }
4573 }
03ed154b 4574 default_bb = create_empty_bb (l1_bb->prev_bb);
75a70cf9 4575 l1 = gimple_block_label (l1_bb);
03ed154b 4576 }
4577 else
4578 {
ac6e3339 4579 default_bb = create_empty_bb (l0_bb);
4580 l1 = NULL_TREE;
75a70cf9 4581 l2 = gimple_block_label (default_bb);
03ed154b 4582 }
773c5ba7 4583
4584 /* We will build a switch() with enough cases for all the
75a70cf9 4585 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
773c5ba7 4586 and a default case to abort if something goes wrong. */
ac6e3339 4587 len = EDGE_COUNT (l0_bb->succs);
75a70cf9 4588
4589 /* Use VEC_quick_push on label_vec throughout, since we know the size
4590 in advance. */
4591 label_vec = VEC_alloc (tree, heap, len);
1e8e9920 4592
61e47ac8 4593 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
75a70cf9 4594 GIMPLE_OMP_SECTIONS statement. */
4595 si = gsi_last_bb (entry_bb);
4596 sections_stmt = gsi_stmt (si);
4597 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4598 vin = gimple_omp_sections_control (sections_stmt);
773c5ba7 4599 if (!is_combined_parallel (region))
1e8e9920 4600 {
773c5ba7 4601 /* If we are not inside a combined parallel+sections region,
4602 call GOMP_sections_start. */
ac6e3339 4603 t = build_int_cst (unsigned_type_node,
4604 exit_reachable ? len - 1 : len);
1e8e9920 4605 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
75a70cf9 4606 stmt = gimple_build_call (u, 1, t);
1e8e9920 4607 }
79acaae1 4608 else
4609 {
4610 /* Otherwise, call GOMP_sections_next. */
4611 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
75a70cf9 4612 stmt = gimple_build_call (u, 0);
79acaae1 4613 }
75a70cf9 4614 gimple_call_set_lhs (stmt, vin);
4615 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4616 gsi_remove (&si, true);
4617
4618 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4619 L0_BB. */
4620 switch_si = gsi_last_bb (l0_bb);
4621 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
79acaae1 4622 if (exit_reachable)
4623 {
4624 cont = last_stmt (l1_bb);
75a70cf9 4625 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4626 vmain = gimple_omp_continue_control_use (cont);
4627 vnext = gimple_omp_continue_control_def (cont);
79acaae1 4628 }
4629 else
4630 {
4631 vmain = vin;
4632 vnext = NULL_TREE;
4633 }
1e8e9920 4634
ac6e3339 4635 i = 0;
4636 if (exit_reachable)
4637 {
4638 t = build3 (CASE_LABEL_EXPR, void_type_node,
4639 build_int_cst (unsigned_type_node, 0), NULL, l2);
75a70cf9 4640 VEC_quick_push (tree, label_vec, t);
ac6e3339 4641 i++;
4642 }
03ed154b 4643
75a70cf9 4644 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
ac6e3339 4645 for (inner = region->inner, casei = 1;
4646 inner;
4647 inner = inner->next, i++, casei++)
1e8e9920 4648 {
773c5ba7 4649 basic_block s_entry_bb, s_exit_bb;
4650
9884aaf8 4651 /* Skip optional reduction region. */
75a70cf9 4652 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
9884aaf8 4653 {
4654 --i;
4655 --casei;
4656 continue;
4657 }
4658
61e47ac8 4659 s_entry_bb = inner->entry;
4660 s_exit_bb = inner->exit;
1e8e9920 4661
75a70cf9 4662 t = gimple_block_label (s_entry_bb);
ac6e3339 4663 u = build_int_cst (unsigned_type_node, casei);
1e8e9920 4664 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
75a70cf9 4665 VEC_quick_push (tree, label_vec, u);
61e47ac8 4666
75a70cf9 4667 si = gsi_last_bb (s_entry_bb);
4668 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4669 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4670 gsi_remove (&si, true);
61e47ac8 4671 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
03ed154b 4672
4673 if (s_exit_bb == NULL)
4674 continue;
4675
75a70cf9 4676 si = gsi_last_bb (s_exit_bb);
4677 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4678 gsi_remove (&si, true);
03ed154b 4679
773c5ba7 4680 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
1e8e9920 4681 }
4682
773c5ba7 4683 /* Error handling code goes in DEFAULT_BB. */
75a70cf9 4684 t = gimple_block_label (default_bb);
1e8e9920 4685 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
61e47ac8 4686 make_edge (l0_bb, default_bb, 0);
1e8e9920 4687
75a70cf9 4688 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4689 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4690 gsi_remove (&switch_si, true);
4691 VEC_free (tree, heap, label_vec);
4692
4693 si = gsi_start_bb (default_bb);
4694 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4695 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
773c5ba7 4696
ac6e3339 4697 if (exit_reachable)
03ed154b 4698 {
ac6e3339 4699 /* Code to get the next section goes in L1_BB. */
75a70cf9 4700 si = gsi_last_bb (l1_bb);
4701 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
1e8e9920 4702
75a70cf9 4703 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4704 gimple_call_set_lhs (stmt, vnext);
4705 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4706 gsi_remove (&si, true);
773c5ba7 4707
ac6e3339 4708 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4709
75a70cf9 4710 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4711 si = gsi_last_bb (l2_bb);
4712 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
03ed154b 4713 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4714 else
4715 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
75a70cf9 4716 stmt = gimple_build_call (t, 0);
4717 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4718 gsi_remove (&si, true);
03ed154b 4719 }
773c5ba7 4720
79acaae1 4721 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
773c5ba7 4722}
1e8e9920 4723
1e8e9920 4724
61e47ac8 4725/* Expand code for an OpenMP single directive. We've already expanded
4726 much of the code, here we simply place the GOMP_barrier call. */
4727
4728static void
4729expand_omp_single (struct omp_region *region)
4730{
4731 basic_block entry_bb, exit_bb;
75a70cf9 4732 gimple_stmt_iterator si;
61e47ac8 4733 bool need_barrier = false;
4734
4735 entry_bb = region->entry;
4736 exit_bb = region->exit;
4737
75a70cf9 4738 si = gsi_last_bb (entry_bb);
61e47ac8 4739 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4740 be removed. We need to ensure that the thread that entered the single
4741 does not exit before the data is copied out by the other threads. */
75a70cf9 4742 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
61e47ac8 4743 OMP_CLAUSE_COPYPRIVATE))
4744 need_barrier = true;
75a70cf9 4745 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4746 gsi_remove (&si, true);
61e47ac8 4747 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4748
75a70cf9 4749 si = gsi_last_bb (exit_bb);
4750 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4751 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4752 false, GSI_SAME_STMT);
4753 gsi_remove (&si, true);
61e47ac8 4754 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4755}
4756
4757
4758/* Generic expansion for OpenMP synchronization directives: master,
4759 ordered and critical. All we need to do here is remove the entry
4760 and exit markers for REGION. */
773c5ba7 4761
4762static void
4763expand_omp_synch (struct omp_region *region)
4764{
4765 basic_block entry_bb, exit_bb;
75a70cf9 4766 gimple_stmt_iterator si;
773c5ba7 4767
61e47ac8 4768 entry_bb = region->entry;
4769 exit_bb = region->exit;
773c5ba7 4770
75a70cf9 4771 si = gsi_last_bb (entry_bb);
4772 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4773 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4774 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4775 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4776 gsi_remove (&si, true);
773c5ba7 4777 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4778
03ed154b 4779 if (exit_bb)
4780 {
75a70cf9 4781 si = gsi_last_bb (exit_bb);
4782 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4783 gsi_remove (&si, true);
03ed154b 4784 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4785 }
773c5ba7 4786}
1e8e9920 4787
cb7f680b 4788/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4789 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4790 size of the data type, and thus usable to find the index of the builtin
4791 decl. Returns false if the expression is not of the proper form. */
4792
4793static bool
4794expand_omp_atomic_fetch_op (basic_block load_bb,
4795 tree addr, tree loaded_val,
4796 tree stored_val, int index)
4797{
4798 enum built_in_function base;
4799 tree decl, itype, call;
4800 enum insn_code *optab;
4801 tree rhs;
4802 basic_block store_bb = single_succ (load_bb);
75a70cf9 4803 gimple_stmt_iterator gsi;
4804 gimple stmt;
cb7f680b 4805
4806 /* We expect to find the following sequences:
4807
4808 load_bb:
75a70cf9 4809 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
cb7f680b 4810
4811 store_bb:
4812 val = tmp OP something; (or: something OP tmp)
75a70cf9 4813 GIMPLE_OMP_STORE (val)
cb7f680b 4814
4815 ???FIXME: Allow a more flexible sequence.
4816 Perhaps use data flow to pick the statements.
4817
4818 */
4819
75a70cf9 4820 gsi = gsi_after_labels (store_bb);
4821 stmt = gsi_stmt (gsi);
4822 if (!is_gimple_assign (stmt))
cb7f680b 4823 return false;
75a70cf9 4824 gsi_next (&gsi);
4825 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
cb7f680b 4826 return false;
4827
75a70cf9 4828 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
cb7f680b 4829 return false;
4830
cb7f680b 4831 /* Check for one of the supported fetch-op operations. */
75a70cf9 4832 switch (gimple_assign_rhs_code (stmt))
cb7f680b 4833 {
4834 case PLUS_EXPR:
4835 case POINTER_PLUS_EXPR:
4836 base = BUILT_IN_FETCH_AND_ADD_N;
4837 optab = sync_add_optab;
4838 break;
4839 case MINUS_EXPR:
4840 base = BUILT_IN_FETCH_AND_SUB_N;
4841 optab = sync_add_optab;
4842 break;
4843 case BIT_AND_EXPR:
4844 base = BUILT_IN_FETCH_AND_AND_N;
4845 optab = sync_and_optab;
4846 break;
4847 case BIT_IOR_EXPR:
4848 base = BUILT_IN_FETCH_AND_OR_N;
4849 optab = sync_ior_optab;
4850 break;
4851 case BIT_XOR_EXPR:
4852 base = BUILT_IN_FETCH_AND_XOR_N;
4853 optab = sync_xor_optab;
4854 break;
4855 default:
4856 return false;
4857 }
4858 /* Make sure the expression is of the proper form. */
75a70cf9 4859 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4860 rhs = gimple_assign_rhs2 (stmt);
4861 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4862 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4863 rhs = gimple_assign_rhs1 (stmt);
cb7f680b 4864 else
4865 return false;
4866
4867 decl = built_in_decls[base + index + 1];
4868 itype = TREE_TYPE (TREE_TYPE (decl));
4869
4870 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4871 return false;
4872
75a70cf9 4873 gsi = gsi_last_bb (load_bb);
4874 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
cb7f680b 4875 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
ae9150f2 4876 call = fold_convert (void_type_node, call);
75a70cf9 4877 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
4878 gsi_remove (&gsi, true);
cb7f680b 4879
75a70cf9 4880 gsi = gsi_last_bb (store_bb);
4881 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
4882 gsi_remove (&gsi, true);
4883 gsi = gsi_last_bb (store_bb);
4884 gsi_remove (&gsi, true);
cb7f680b 4885
4886 if (gimple_in_ssa_p (cfun))
4887 update_ssa (TODO_update_ssa_no_phi);
4888
4889 return true;
4890}
4891
4892/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
4893
4894 oldval = *addr;
4895 repeat:
4896 newval = rhs; // with oldval replacing *addr in rhs
4897 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
4898 if (oldval != newval)
4899 goto repeat;
4900
4901 INDEX is log2 of the size of the data type, and thus usable to find the
4902 index of the builtin decl. */
4903
4904static bool
4905expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
4906 tree addr, tree loaded_val, tree stored_val,
4907 int index)
4908{
790368c5 4909 tree loadedi, storedi, initial, new_storedi, old_vali;
cb7f680b 4910 tree type, itype, cmpxchg, iaddr;
75a70cf9 4911 gimple_stmt_iterator si;
cb7f680b 4912 basic_block loop_header = single_succ (load_bb);
75a70cf9 4913 gimple phi, stmt;
cb7f680b 4914 edge e;
4915
4916 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
4917 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
4918 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
4919
4920 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
4921 return false;
4922
75a70cf9 4923 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
4924 si = gsi_last_bb (load_bb);
4925 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
4926
790368c5 4927 /* For floating-point values, we'll need to view-convert them to integers
4928 so that we can perform the atomic compare and swap. Simplify the
4929 following code by always setting up the "i"ntegral variables. */
4930 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
4931 {
75a70cf9 4932 tree iaddr_val;
4933
790368c5 4934 iaddr = create_tmp_var (build_pointer_type (itype), NULL);
75a70cf9 4935 iaddr_val
4936 = force_gimple_operand_gsi (&si,
4937 fold_convert (TREE_TYPE (iaddr), addr),
4938 false, NULL_TREE, true, GSI_SAME_STMT);
4939 stmt = gimple_build_assign (iaddr, iaddr_val);
4940 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
790368c5 4941 DECL_NO_TBAA_P (iaddr) = 1;
4942 DECL_POINTER_ALIAS_SET (iaddr) = 0;
4943 loadedi = create_tmp_var (itype, NULL);
4944 if (gimple_in_ssa_p (cfun))
4945 {
4946 add_referenced_var (iaddr);
4947 add_referenced_var (loadedi);
4948 loadedi = make_ssa_name (loadedi, NULL);
4949 }
4950 }
4951 else
4952 {
4953 iaddr = addr;
4954 loadedi = loaded_val;
4955 }
75a70cf9 4956
4957 initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
4958 true, NULL_TREE, true, GSI_SAME_STMT);
790368c5 4959
4960 /* Move the value to the LOADEDI temporary. */
cb7f680b 4961 if (gimple_in_ssa_p (cfun))
4962 {
75a70cf9 4963 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
790368c5 4964 phi = create_phi_node (loadedi, loop_header);
4965 SSA_NAME_DEF_STMT (loadedi) = phi;
cb7f680b 4966 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
4967 initial);
4968 }
4969 else
75a70cf9 4970 gsi_insert_before (&si,
4971 gimple_build_assign (loadedi, initial),
4972 GSI_SAME_STMT);
790368c5 4973 if (loadedi != loaded_val)
4974 {
75a70cf9 4975 gimple_stmt_iterator gsi2;
4976 tree x;
790368c5 4977
4978 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
75a70cf9 4979 gsi2 = gsi_start_bb (loop_header);
790368c5 4980 if (gimple_in_ssa_p (cfun))
4981 {
75a70cf9 4982 gimple stmt;
4983 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
4984 true, GSI_SAME_STMT);
4985 stmt = gimple_build_assign (loaded_val, x);
4986 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
790368c5 4987 }
4988 else
4989 {
75a70cf9 4990 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
4991 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
4992 true, GSI_SAME_STMT);
790368c5 4993 }
4994 }
75a70cf9 4995 gsi_remove (&si, true);
cb7f680b 4996
75a70cf9 4997 si = gsi_last_bb (store_bb);
4998 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
cb7f680b 4999
790368c5 5000 if (iaddr == addr)
5001 storedi = stored_val;
cb7f680b 5002 else
790368c5 5003 storedi =
75a70cf9 5004 force_gimple_operand_gsi (&si,
790368c5 5005 build1 (VIEW_CONVERT_EXPR, itype,
5006 stored_val), true, NULL_TREE, true,
75a70cf9 5007 GSI_SAME_STMT);
cb7f680b 5008
5009 /* Build the compare&swap statement. */
5010 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
75a70cf9 5011 new_storedi = force_gimple_operand_gsi (&si,
cb7f680b 5012 fold_convert (itype, new_storedi),
5013 true, NULL_TREE,
75a70cf9 5014 true, GSI_SAME_STMT);
cb7f680b 5015
5016 if (gimple_in_ssa_p (cfun))
5017 old_vali = loadedi;
5018 else
5019 {
5020 old_vali = create_tmp_var (itype, NULL);
790368c5 5021 if (gimple_in_ssa_p (cfun))
5022 add_referenced_var (old_vali);
75a70cf9 5023 stmt = gimple_build_assign (old_vali, loadedi);
5024 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5025
75a70cf9 5026 stmt = gimple_build_assign (loadedi, new_storedi);
5027 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5028 }
5029
5030 /* Note that we always perform the comparison as an integer, even for
5031 floating point. This allows the atomic operation to properly
5032 succeed even with NaNs and -0.0. */
75a70cf9 5033 stmt = gimple_build_cond_empty
5034 (build2 (NE_EXPR, boolean_type_node,
5035 new_storedi, old_vali));
5036 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5037
5038 /* Update cfg. */
5039 e = single_succ_edge (store_bb);
5040 e->flags &= ~EDGE_FALLTHRU;
5041 e->flags |= EDGE_FALSE_VALUE;
5042
5043 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5044
790368c5 5045 /* Copy the new value to loadedi (we already did that before the condition
cb7f680b 5046 if we are not in SSA). */
5047 if (gimple_in_ssa_p (cfun))
5048 {
75a70cf9 5049 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
790368c5 5050 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
cb7f680b 5051 }
5052
75a70cf9 5053 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5054 gsi_remove (&si, true);
cb7f680b 5055
5056 if (gimple_in_ssa_p (cfun))
5057 update_ssa (TODO_update_ssa_no_phi);
5058
5059 return true;
5060}
5061
5062/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5063
5064 GOMP_atomic_start ();
5065 *addr = rhs;
5066 GOMP_atomic_end ();
5067
5068 The result is not globally atomic, but works so long as all parallel
5069 references are within #pragma omp atomic directives. According to
5070 responses received from omp@openmp.org, appears to be within spec.
5071 Which makes sense, since that's how several other compilers handle
5072 this situation as well.
75a70cf9 5073 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5074 expanding. STORED_VAL is the operand of the matching
5075 GIMPLE_OMP_ATOMIC_STORE.
cb7f680b 5076
5077 We replace
75a70cf9 5078 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
cb7f680b 5079 loaded_val = *addr;
5080
5081 and replace
75a70cf9 5082 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
cb7f680b 5083 *addr = stored_val;
5084*/
5085
5086static bool
5087expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5088 tree addr, tree loaded_val, tree stored_val)
5089{
75a70cf9 5090 gimple_stmt_iterator si;
5091 gimple stmt;
cb7f680b 5092 tree t;
5093
75a70cf9 5094 si = gsi_last_bb (load_bb);
5095 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
cb7f680b 5096
5097 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5098 t = build_function_call_expr (t, 0);
75a70cf9 5099 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
cb7f680b 5100
75a70cf9 5101 stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
5102 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5103 gsi_remove (&si, true);
cb7f680b 5104
75a70cf9 5105 si = gsi_last_bb (store_bb);
5106 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
cb7f680b 5107
75a70cf9 5108 stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
cb7f680b 5109 stored_val);
75a70cf9 5110 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
cb7f680b 5111
5112 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5113 t = build_function_call_expr (t, 0);
75a70cf9 5114 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5115 gsi_remove (&si, true);
cb7f680b 5116
5117 if (gimple_in_ssa_p (cfun))
5118 update_ssa (TODO_update_ssa_no_phi);
5119 return true;
5120}
5121
75a70cf9 5122/* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
cb7f680b 5123 using expand_omp_atomic_fetch_op. If it failed, we try to
5124 call expand_omp_atomic_pipeline, and if it fails too, the
5125 ultimate fallback is wrapping the operation in a mutex
5126 (expand_omp_atomic_mutex). REGION is the atomic region built
5127 by build_omp_regions_1(). */
5128
5129static void
5130expand_omp_atomic (struct omp_region *region)
5131{
5132 basic_block load_bb = region->entry, store_bb = region->exit;
75a70cf9 5133 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5134 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5135 tree addr = gimple_omp_atomic_load_rhs (load);
5136 tree stored_val = gimple_omp_atomic_store_val (store);
cb7f680b 5137 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5138 HOST_WIDE_INT index;
5139
5140 /* Make sure the type is one of the supported sizes. */
5141 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5142 index = exact_log2 (index);
5143 if (index >= 0 && index <= 4)
5144 {
5145 unsigned int align = TYPE_ALIGN_UNIT (type);
5146
5147 /* __sync builtins require strict data alignment. */
5148 if (exact_log2 (align) >= index)
5149 {
5150 /* When possible, use specialized atomic update functions. */
5151 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5152 && store_bb == single_succ (load_bb))
5153 {
5154 if (expand_omp_atomic_fetch_op (load_bb, addr,
5155 loaded_val, stored_val, index))
5156 return;
5157 }
5158
5159 /* If we don't have specialized __sync builtins, try and implement
5160 as a compare and swap loop. */
5161 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5162 loaded_val, stored_val, index))
5163 return;
5164 }
5165 }
5166
5167 /* The ultimate fallback is wrapping the operation in a mutex. */
5168 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5169}
5170
1e8e9920 5171
773c5ba7 5172/* Expand the parallel region tree rooted at REGION. Expansion
5173 proceeds in depth-first order. Innermost regions are expanded
5174 first. This way, parallel regions that require a new function to
75a70cf9 5175 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
773c5ba7 5176 internal dependencies in their body. */
5177
5178static void
5179expand_omp (struct omp_region *region)
5180{
5181 while (region)
5182 {
1d22f541 5183 location_t saved_location;
5184
d1d5b012 5185 /* First, determine whether this is a combined parallel+workshare
5186 region. */
75a70cf9 5187 if (region->type == GIMPLE_OMP_PARALLEL)
d1d5b012 5188 determine_parallel_type (region);
5189
773c5ba7 5190 if (region->inner)
5191 expand_omp (region->inner);
5192
1d22f541 5193 saved_location = input_location;
75a70cf9 5194 if (gimple_has_location (last_stmt (region->entry)))
5195 input_location = gimple_location (last_stmt (region->entry));
1d22f541 5196
61e47ac8 5197 switch (region->type)
773c5ba7 5198 {
75a70cf9 5199 case GIMPLE_OMP_PARALLEL:
5200 case GIMPLE_OMP_TASK:
fd6481cf 5201 expand_omp_taskreg (region);
5202 break;
5203
75a70cf9 5204 case GIMPLE_OMP_FOR:
61e47ac8 5205 expand_omp_for (region);
5206 break;
773c5ba7 5207
75a70cf9 5208 case GIMPLE_OMP_SECTIONS:
61e47ac8 5209 expand_omp_sections (region);
5210 break;
773c5ba7 5211
75a70cf9 5212 case GIMPLE_OMP_SECTION:
61e47ac8 5213 /* Individual omp sections are handled together with their
75a70cf9 5214 parent GIMPLE_OMP_SECTIONS region. */
61e47ac8 5215 break;
773c5ba7 5216
75a70cf9 5217 case GIMPLE_OMP_SINGLE:
61e47ac8 5218 expand_omp_single (region);
5219 break;
773c5ba7 5220
75a70cf9 5221 case GIMPLE_OMP_MASTER:
5222 case GIMPLE_OMP_ORDERED:
5223 case GIMPLE_OMP_CRITICAL:
61e47ac8 5224 expand_omp_synch (region);
5225 break;
773c5ba7 5226
75a70cf9 5227 case GIMPLE_OMP_ATOMIC_LOAD:
cb7f680b 5228 expand_omp_atomic (region);
5229 break;
5230
61e47ac8 5231 default:
5232 gcc_unreachable ();
5233 }
cc5982dc 5234
1d22f541 5235 input_location = saved_location;
773c5ba7 5236 region = region->next;
5237 }
5238}
5239
5240
5241/* Helper for build_omp_regions. Scan the dominator tree starting at
28c92cbb 5242 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5243 true, the function ends once a single tree is built (otherwise, whole
5244 forest of OMP constructs may be built). */
773c5ba7 5245
5246static void
28c92cbb 5247build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5248 bool single_tree)
773c5ba7 5249{
75a70cf9 5250 gimple_stmt_iterator gsi;
5251 gimple stmt;
773c5ba7 5252 basic_block son;
5253
75a70cf9 5254 gsi = gsi_last_bb (bb);
5255 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
773c5ba7 5256 {
5257 struct omp_region *region;
75a70cf9 5258 enum gimple_code code;
773c5ba7 5259
75a70cf9 5260 stmt = gsi_stmt (gsi);
5261 code = gimple_code (stmt);
5262 if (code == GIMPLE_OMP_RETURN)
773c5ba7 5263 {
5264 /* STMT is the return point out of region PARENT. Mark it
5265 as the exit point and make PARENT the immediately
5266 enclosing region. */
5267 gcc_assert (parent);
5268 region = parent;
61e47ac8 5269 region->exit = bb;
773c5ba7 5270 parent = parent->outer;
773c5ba7 5271 }
75a70cf9 5272 else if (code == GIMPLE_OMP_ATOMIC_STORE)
cb7f680b 5273 {
75a70cf9 5274 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5275 GIMPLE_OMP_RETURN, but matches with
5276 GIMPLE_OMP_ATOMIC_LOAD. */
cb7f680b 5277 gcc_assert (parent);
75a70cf9 5278 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
cb7f680b 5279 region = parent;
5280 region->exit = bb;
5281 parent = parent->outer;
5282 }
5283
75a70cf9 5284 else if (code == GIMPLE_OMP_CONTINUE)
61e47ac8 5285 {
5286 gcc_assert (parent);
5287 parent->cont = bb;
5288 }
75a70cf9 5289 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
ac6e3339 5290 {
75a70cf9 5291 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5292 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5293 ;
ac6e3339 5294 }
773c5ba7 5295 else
5296 {
5297 /* Otherwise, this directive becomes the parent for a new
5298 region. */
61e47ac8 5299 region = new_omp_region (bb, code, parent);
773c5ba7 5300 parent = region;
5301 }
773c5ba7 5302 }
5303
28c92cbb 5304 if (single_tree && !parent)
5305 return;
5306
773c5ba7 5307 for (son = first_dom_son (CDI_DOMINATORS, bb);
5308 son;
5309 son = next_dom_son (CDI_DOMINATORS, son))
28c92cbb 5310 build_omp_regions_1 (son, parent, single_tree);
5311}
5312
5313/* Builds the tree of OMP regions rooted at ROOT, storing it to
5314 root_omp_region. */
5315
5316static void
5317build_omp_regions_root (basic_block root)
5318{
5319 gcc_assert (root_omp_region == NULL);
5320 build_omp_regions_1 (root, NULL, true);
5321 gcc_assert (root_omp_region != NULL);
773c5ba7 5322}
5323
28c92cbb 5324/* Expands omp construct (and its subconstructs) starting in HEAD. */
5325
5326void
5327omp_expand_local (basic_block head)
5328{
5329 build_omp_regions_root (head);
5330 if (dump_file && (dump_flags & TDF_DETAILS))
5331 {
5332 fprintf (dump_file, "\nOMP region tree\n\n");
5333 dump_omp_region (dump_file, root_omp_region, 0);
5334 fprintf (dump_file, "\n");
5335 }
5336
5337 remove_exit_barriers (root_omp_region);
5338 expand_omp (root_omp_region);
5339
5340 free_omp_regions ();
5341}
773c5ba7 5342
5343/* Scan the CFG and build a tree of OMP regions. Return the root of
5344 the OMP region tree. */
5345
5346static void
5347build_omp_regions (void)
5348{
61e47ac8 5349 gcc_assert (root_omp_region == NULL);
773c5ba7 5350 calculate_dominance_info (CDI_DOMINATORS);
28c92cbb 5351 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
773c5ba7 5352}
5353
773c5ba7 5354/* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5355
2a1990e9 5356static unsigned int
773c5ba7 5357execute_expand_omp (void)
5358{
5359 build_omp_regions ();
5360
61e47ac8 5361 if (!root_omp_region)
5362 return 0;
773c5ba7 5363
61e47ac8 5364 if (dump_file)
5365 {
5366 fprintf (dump_file, "\nOMP region tree\n\n");
5367 dump_omp_region (dump_file, root_omp_region, 0);
5368 fprintf (dump_file, "\n");
773c5ba7 5369 }
61e47ac8 5370
5371 remove_exit_barriers (root_omp_region);
5372
5373 expand_omp (root_omp_region);
5374
61e47ac8 5375 cleanup_tree_cfg ();
5376
5377 free_omp_regions ();
5378
2a1990e9 5379 return 0;
773c5ba7 5380}
5381
79acaae1 5382/* OMP expansion -- the default pass, run before creation of SSA form. */
5383
773c5ba7 5384static bool
5385gate_expand_omp (void)
5386{
945e498c 5387 return (flag_openmp != 0 && errorcount == 0);
773c5ba7 5388}
5389
20099e35 5390struct gimple_opt_pass pass_expand_omp =
773c5ba7 5391{
20099e35 5392 {
5393 GIMPLE_PASS,
773c5ba7 5394 "ompexp", /* name */
5395 gate_expand_omp, /* gate */
5396 execute_expand_omp, /* execute */
5397 NULL, /* sub */
5398 NULL, /* next */
5399 0, /* static_pass_number */
5400 0, /* tv_id */
5401 PROP_gimple_any, /* properties_required */
5402 PROP_gimple_lomp, /* properties_provided */
5403 0, /* properties_destroyed */
5404 0, /* todo_flags_start */
20099e35 5405 TODO_dump_func /* todo_flags_finish */
5406 }
773c5ba7 5407};
5408\f
5409/* Routines to lower OpenMP directives into OMP-GIMPLE. */
5410
75a70cf9 5411/* Lower the OpenMP sections directive in the current statement in GSI_P.
5412 CTX is the enclosing OMP context for the current statement. */
773c5ba7 5413
5414static void
75a70cf9 5415lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 5416{
75a70cf9 5417 tree block, control;
5418 gimple_stmt_iterator tgsi;
773c5ba7 5419 unsigned i, len;
75a70cf9 5420 gimple stmt, new_stmt, bind, t;
5421 gimple_seq ilist, dlist, olist, new_body, body;
dac18d1a 5422 struct gimplify_ctx gctx;
773c5ba7 5423
75a70cf9 5424 stmt = gsi_stmt (*gsi_p);
773c5ba7 5425
dac18d1a 5426 push_gimplify_context (&gctx);
773c5ba7 5427
5428 dlist = NULL;
5429 ilist = NULL;
75a70cf9 5430 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5431 &ilist, &dlist, ctx);
773c5ba7 5432
75a70cf9 5433 tgsi = gsi_start (gimple_omp_body (stmt));
5434 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
773c5ba7 5435 continue;
5436
75a70cf9 5437 tgsi = gsi_start (gimple_omp_body (stmt));
5438 body = NULL;
5439 for (i = 0; i < len; i++, gsi_next (&tgsi))
773c5ba7 5440 {
5441 omp_context *sctx;
75a70cf9 5442 gimple sec_start;
773c5ba7 5443
75a70cf9 5444 sec_start = gsi_stmt (tgsi);
773c5ba7 5445 sctx = maybe_lookup_ctx (sec_start);
5446 gcc_assert (sctx);
5447
75a70cf9 5448 gimple_seq_add_stmt (&body, sec_start);
61e47ac8 5449
75a70cf9 5450 lower_omp (gimple_omp_body (sec_start), sctx);
5451 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5452 gimple_omp_set_body (sec_start, NULL);
773c5ba7 5453
5454 if (i == len - 1)
5455 {
75a70cf9 5456 gimple_seq l = NULL;
5457 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
773c5ba7 5458 &l, ctx);
75a70cf9 5459 gimple_seq_add_seq (&body, l);
5460 gimple_omp_section_set_last (sec_start);
773c5ba7 5461 }
5462
75a70cf9 5463 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
773c5ba7 5464 }
1e8e9920 5465
5466 block = make_node (BLOCK);
75a70cf9 5467 bind = gimple_build_bind (NULL, body, block);
1e8e9920 5468
75a70cf9 5469 olist = NULL;
5470 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
773c5ba7 5471
1d22f541 5472 block = make_node (BLOCK);
75a70cf9 5473 new_stmt = gimple_build_bind (NULL, NULL, block);
773c5ba7 5474
1d22f541 5475 pop_gimplify_context (new_stmt);
75a70cf9 5476 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5477 BLOCK_VARS (block) = gimple_bind_vars (bind);
1d22f541 5478 if (BLOCK_VARS (block))
5479 TREE_USED (block) = 1;
5480
75a70cf9 5481 new_body = NULL;
5482 gimple_seq_add_seq (&new_body, ilist);
5483 gimple_seq_add_stmt (&new_body, stmt);
5484 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5485 gimple_seq_add_stmt (&new_body, bind);
61e47ac8 5486
ac6e3339 5487 control = create_tmp_var (unsigned_type_node, ".section");
75a70cf9 5488 t = gimple_build_omp_continue (control, control);
5489 gimple_omp_sections_set_control (stmt, control);
5490 gimple_seq_add_stmt (&new_body, t);
61e47ac8 5491
75a70cf9 5492 gimple_seq_add_seq (&new_body, olist);
5493 gimple_seq_add_seq (&new_body, dlist);
773c5ba7 5494
75a70cf9 5495 new_body = maybe_catch_exception (new_body);
aade31a0 5496
75a70cf9 5497 t = gimple_build_omp_return
5498 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5499 OMP_CLAUSE_NOWAIT));
5500 gimple_seq_add_stmt (&new_body, t);
61e47ac8 5501
75a70cf9 5502 gimple_bind_set_body (new_stmt, new_body);
5503 gimple_omp_set_body (stmt, NULL);
773c5ba7 5504
75a70cf9 5505 gsi_replace (gsi_p, new_stmt, true);
1e8e9920 5506}
5507
5508
773c5ba7 5509/* A subroutine of lower_omp_single. Expand the simple form of
75a70cf9 5510 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
1e8e9920 5511
5512 if (GOMP_single_start ())
5513 BODY;
5514 [ GOMP_barrier (); ] -> unless 'nowait' is present.
773c5ba7 5515
5516 FIXME. It may be better to delay expanding the logic of this until
5517 pass_expand_omp. The expanded logic may make the job more difficult
5518 to a synchronization analysis pass. */
1e8e9920 5519
5520static void
75a70cf9 5521lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
1e8e9920 5522{
75a70cf9 5523 tree tlabel = create_artificial_label ();
5524 tree flabel = create_artificial_label ();
5525 gimple call, cond;
5526 tree lhs, decl;
5527
5528 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5529 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5530 call = gimple_build_call (decl, 0);
5531 gimple_call_set_lhs (call, lhs);
5532 gimple_seq_add_stmt (pre_p, call);
5533
5534 cond = gimple_build_cond (EQ_EXPR, lhs,
5535 fold_convert (TREE_TYPE (lhs), boolean_true_node),
5536 tlabel, flabel);
5537 gimple_seq_add_stmt (pre_p, cond);
5538 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5539 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5540 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
1e8e9920 5541}
5542
773c5ba7 5543
5544/* A subroutine of lower_omp_single. Expand the simple form of
75a70cf9 5545 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
1e8e9920 5546
5547 #pragma omp single copyprivate (a, b, c)
5548
5549 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5550
5551 {
5552 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5553 {
5554 BODY;
5555 copyout.a = a;
5556 copyout.b = b;
5557 copyout.c = c;
5558 GOMP_single_copy_end (&copyout);
5559 }
5560 else
5561 {
5562 a = copyout_p->a;
5563 b = copyout_p->b;
5564 c = copyout_p->c;
5565 }
5566 GOMP_barrier ();
5567 }
773c5ba7 5568
5569 FIXME. It may be better to delay expanding the logic of this until
5570 pass_expand_omp. The expanded logic may make the job more difficult
5571 to a synchronization analysis pass. */
1e8e9920 5572
5573static void
75a70cf9 5574lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
1e8e9920 5575{
75a70cf9 5576 tree ptr_type, t, l0, l1, l2;
5577 gimple_seq copyin_seq;
1e8e9920 5578
5579 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5580
5581 ptr_type = build_pointer_type (ctx->record_type);
5582 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5583
5584 l0 = create_artificial_label ();
5585 l1 = create_artificial_label ();
5586 l2 = create_artificial_label ();
5587
c2f47e15 5588 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
1e8e9920 5589 t = fold_convert (ptr_type, t);
75a70cf9 5590 gimplify_assign (ctx->receiver_decl, t, pre_p);
1e8e9920 5591
5592 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5593 build_int_cst (ptr_type, 0));
5594 t = build3 (COND_EXPR, void_type_node, t,
5595 build_and_jump (&l0), build_and_jump (&l1));
5596 gimplify_and_add (t, pre_p);
5597
75a70cf9 5598 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
1e8e9920 5599
75a70cf9 5600 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
1e8e9920 5601
5602 copyin_seq = NULL;
75a70cf9 5603 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
1e8e9920 5604 &copyin_seq, ctx);
5605
5606 t = build_fold_addr_expr (ctx->sender_decl);
c2f47e15 5607 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
1e8e9920 5608 gimplify_and_add (t, pre_p);
5609
5610 t = build_and_jump (&l2);
5611 gimplify_and_add (t, pre_p);
5612
75a70cf9 5613 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
1e8e9920 5614
75a70cf9 5615 gimple_seq_add_seq (pre_p, copyin_seq);
1e8e9920 5616
75a70cf9 5617 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
1e8e9920 5618}
5619
773c5ba7 5620
1e8e9920 5621/* Expand code for an OpenMP single directive. */
5622
5623static void
75a70cf9 5624lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 5625{
75a70cf9 5626 tree block;
5627 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5628 gimple_seq bind_body, dlist;
dac18d1a 5629 struct gimplify_ctx gctx;
1e8e9920 5630
dac18d1a 5631 push_gimplify_context (&gctx);
1e8e9920 5632
75a70cf9 5633 bind_body = NULL;
5634 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5635 &bind_body, &dlist, ctx);
5636 lower_omp (gimple_omp_body (single_stmt), ctx);
1e8e9920 5637
75a70cf9 5638 gimple_seq_add_stmt (&bind_body, single_stmt);
1e8e9920 5639
5640 if (ctx->record_type)
75a70cf9 5641 lower_omp_single_copy (single_stmt, &bind_body, ctx);
1e8e9920 5642 else
75a70cf9 5643 lower_omp_single_simple (single_stmt, &bind_body);
5644
5645 gimple_omp_set_body (single_stmt, NULL);
1e8e9920 5646
75a70cf9 5647 gimple_seq_add_seq (&bind_body, dlist);
61e47ac8 5648
75a70cf9 5649 bind_body = maybe_catch_exception (bind_body);
61e47ac8 5650
75a70cf9 5651 t = gimple_build_omp_return
5652 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5653 OMP_CLAUSE_NOWAIT));
5654 gimple_seq_add_stmt (&bind_body, t);
aade31a0 5655
75a70cf9 5656 block = make_node (BLOCK);
5657 bind = gimple_build_bind (NULL, bind_body, block);
61e47ac8 5658
1e8e9920 5659 pop_gimplify_context (bind);
773c5ba7 5660
75a70cf9 5661 gimple_bind_append_vars (bind, ctx->block_vars);
5662 BLOCK_VARS (block) = ctx->block_vars;
5663 gsi_replace (gsi_p, bind, true);
1d22f541 5664 if (BLOCK_VARS (block))
5665 TREE_USED (block) = 1;
1e8e9920 5666}
5667
773c5ba7 5668
1e8e9920 5669/* Expand code for an OpenMP master directive. */
5670
5671static void
75a70cf9 5672lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 5673{
75a70cf9 5674 tree block, lab = NULL, x;
5675 gimple stmt = gsi_stmt (*gsi_p), bind;
5676 gimple_seq tseq;
dac18d1a 5677 struct gimplify_ctx gctx;
1e8e9920 5678
dac18d1a 5679 push_gimplify_context (&gctx);
1e8e9920 5680
5681 block = make_node (BLOCK);
75a70cf9 5682 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5683 block);
61e47ac8 5684
c2f47e15 5685 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
1e8e9920 5686 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5687 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
75a70cf9 5688 tseq = NULL;
5689 gimplify_and_add (x, &tseq);
5690 gimple_bind_add_seq (bind, tseq);
1e8e9920 5691
75a70cf9 5692 lower_omp (gimple_omp_body (stmt), ctx);
5693 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5694 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5695 gimple_omp_set_body (stmt, NULL);
1e8e9920 5696
75a70cf9 5697 gimple_bind_add_stmt (bind, gimple_build_label (lab));
61e47ac8 5698
75a70cf9 5699 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
61e47ac8 5700
1e8e9920 5701 pop_gimplify_context (bind);
773c5ba7 5702
75a70cf9 5703 gimple_bind_append_vars (bind, ctx->block_vars);
5704 BLOCK_VARS (block) = ctx->block_vars;
5705 gsi_replace (gsi_p, bind, true);
1e8e9920 5706}
5707
773c5ba7 5708
1e8e9920 5709/* Expand code for an OpenMP ordered directive. */
5710
5711static void
75a70cf9 5712lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 5713{
75a70cf9 5714 tree block;
5715 gimple stmt = gsi_stmt (*gsi_p), bind, x;
dac18d1a 5716 struct gimplify_ctx gctx;
1e8e9920 5717
dac18d1a 5718 push_gimplify_context (&gctx);
1e8e9920 5719
5720 block = make_node (BLOCK);
75a70cf9 5721 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5722 block);
61e47ac8 5723
75a70cf9 5724 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5725 gimple_bind_add_stmt (bind, x);
1e8e9920 5726
75a70cf9 5727 lower_omp (gimple_omp_body (stmt), ctx);
5728 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5729 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5730 gimple_omp_set_body (stmt, NULL);
1e8e9920 5731
75a70cf9 5732 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5733 gimple_bind_add_stmt (bind, x);
61e47ac8 5734
75a70cf9 5735 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
61e47ac8 5736
1e8e9920 5737 pop_gimplify_context (bind);
773c5ba7 5738
75a70cf9 5739 gimple_bind_append_vars (bind, ctx->block_vars);
5740 BLOCK_VARS (block) = gimple_bind_vars (bind);
5741 gsi_replace (gsi_p, bind, true);
1e8e9920 5742}
5743
1e8e9920 5744
75a70cf9 5745/* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
1e8e9920 5746 substitution of a couple of function calls. But in the NAMED case,
5747 requires that languages coordinate a symbol name. It is therefore
5748 best put here in common code. */
5749
5750static GTY((param1_is (tree), param2_is (tree)))
5751 splay_tree critical_name_mutexes;
5752
5753static void
75a70cf9 5754lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 5755{
75a70cf9 5756 tree block;
5757 tree name, lock, unlock;
5758 gimple stmt = gsi_stmt (*gsi_p), bind;
5759 gimple_seq tbody;
dac18d1a 5760 struct gimplify_ctx gctx;
1e8e9920 5761
75a70cf9 5762 name = gimple_omp_critical_name (stmt);
1e8e9920 5763 if (name)
5764 {
c2f47e15 5765 tree decl;
1e8e9920 5766 splay_tree_node n;
5767
5768 if (!critical_name_mutexes)
5769 critical_name_mutexes
5770 = splay_tree_new_ggc (splay_tree_compare_pointers);
5771
5772 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5773 if (n == NULL)
5774 {
5775 char *new_str;
5776
5777 decl = create_tmp_var_raw (ptr_type_node, NULL);
5778
5779 new_str = ACONCAT ((".gomp_critical_user_",
5780 IDENTIFIER_POINTER (name), NULL));
5781 DECL_NAME (decl) = get_identifier (new_str);
5782 TREE_PUBLIC (decl) = 1;
5783 TREE_STATIC (decl) = 1;
5784 DECL_COMMON (decl) = 1;
5785 DECL_ARTIFICIAL (decl) = 1;
5786 DECL_IGNORED_P (decl) = 1;
1d416bd7 5787 varpool_finalize_decl (decl);
1e8e9920 5788
5789 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5790 (splay_tree_value) decl);
5791 }
5792 else
5793 decl = (tree) n->value;
5794
1e8e9920 5795 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
c2f47e15 5796 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
1e8e9920 5797
1e8e9920 5798 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
c2f47e15 5799 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
1e8e9920 5800 }
5801 else
5802 {
5803 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
c2f47e15 5804 lock = build_call_expr (lock, 0);
1e8e9920 5805
5806 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
c2f47e15 5807 unlock = build_call_expr (unlock, 0);
1e8e9920 5808 }
5809
dac18d1a 5810 push_gimplify_context (&gctx);
1e8e9920 5811
5812 block = make_node (BLOCK);
75a70cf9 5813 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
61e47ac8 5814
75a70cf9 5815 tbody = gimple_bind_body (bind);
5816 gimplify_and_add (lock, &tbody);
5817 gimple_bind_set_body (bind, tbody);
1e8e9920 5818
75a70cf9 5819 lower_omp (gimple_omp_body (stmt), ctx);
5820 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5821 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5822 gimple_omp_set_body (stmt, NULL);
1e8e9920 5823
75a70cf9 5824 tbody = gimple_bind_body (bind);
5825 gimplify_and_add (unlock, &tbody);
5826 gimple_bind_set_body (bind, tbody);
61e47ac8 5827
75a70cf9 5828 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
1e8e9920 5829
5830 pop_gimplify_context (bind);
75a70cf9 5831 gimple_bind_append_vars (bind, ctx->block_vars);
5832 BLOCK_VARS (block) = gimple_bind_vars (bind);
5833 gsi_replace (gsi_p, bind, true);
773c5ba7 5834}
5835
5836
5837/* A subroutine of lower_omp_for. Generate code to emit the predicate
5838 for a lastprivate clause. Given a loop control predicate of (V
5839 cond N2), we gate the clause on (!(V cond N2)). The lowered form
1e4afe3c 5840 is appended to *DLIST, iterator initialization is appended to
5841 *BODY_P. */
773c5ba7 5842
5843static void
75a70cf9 5844lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5845 gimple_seq *dlist, struct omp_context *ctx)
773c5ba7 5846{
75a70cf9 5847 tree clauses, cond, vinit;
773c5ba7 5848 enum tree_code cond_code;
75a70cf9 5849 gimple_seq stmts;
773c5ba7 5850
fd6481cf 5851 cond_code = fd->loop.cond_code;
773c5ba7 5852 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5853
5854 /* When possible, use a strict equality expression. This can let VRP
5855 type optimizations deduce the value and remove a copy. */
fd6481cf 5856 if (host_integerp (fd->loop.step, 0))
773c5ba7 5857 {
fd6481cf 5858 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
773c5ba7 5859 if (step == 1 || step == -1)
5860 cond_code = EQ_EXPR;
5861 }
5862
fd6481cf 5863 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
773c5ba7 5864
75a70cf9 5865 clauses = gimple_omp_for_clauses (fd->for_stmt);
1e4afe3c 5866 stmts = NULL;
5867 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
75a70cf9 5868 if (!gimple_seq_empty_p (stmts))
1e4afe3c 5869 {
75a70cf9 5870 gimple_seq_add_seq (&stmts, *dlist);
fd6481cf 5871 *dlist = stmts;
1e4afe3c 5872
5873 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
fd6481cf 5874 vinit = fd->loop.n1;
1e4afe3c 5875 if (cond_code == EQ_EXPR
fd6481cf 5876 && host_integerp (fd->loop.n2, 0)
5877 && ! integer_zerop (fd->loop.n2))
5878 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
1e4afe3c 5879
5880 /* Initialize the iterator variable, so that threads that don't execute
5881 any iterations don't execute the lastprivate clauses by accident. */
75a70cf9 5882 gimplify_assign (fd->loop.v, vinit, body_p);
1e4afe3c 5883 }
773c5ba7 5884}
5885
5886
5887/* Lower code for an OpenMP loop directive. */
5888
5889static void
75a70cf9 5890lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 5891{
75a70cf9 5892 tree *rhs_p, block;
773c5ba7 5893 struct omp_for_data fd;
75a70cf9 5894 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
5895 gimple_seq omp_for_body, body, dlist, ilist;
5896 size_t i;
dac18d1a 5897 struct gimplify_ctx gctx;
773c5ba7 5898
dac18d1a 5899 push_gimplify_context (&gctx);
773c5ba7 5900
75a70cf9 5901 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
5902 lower_omp (gimple_omp_body (stmt), ctx);
773c5ba7 5903
1d22f541 5904 block = make_node (BLOCK);
75a70cf9 5905 new_stmt = gimple_build_bind (NULL, NULL, block);
1d22f541 5906
773c5ba7 5907 /* Move declaration of temporaries in the loop body before we make
5908 it go away. */
75a70cf9 5909 omp_for_body = gimple_omp_body (stmt);
5910 if (!gimple_seq_empty_p (omp_for_body)
5911 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
5912 {
5913 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
5914 gimple_bind_append_vars (new_stmt, vars);
5915 }
773c5ba7 5916
75a70cf9 5917 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
773c5ba7 5918 ilist = NULL;
5919 dlist = NULL;
75a70cf9 5920 body = NULL;
5921 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
5922 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
773c5ba7 5923
5924 /* Lower the header expressions. At this point, we can assume that
5925 the header is of the form:
5926
5927 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
5928
5929 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
5930 using the .omp_data_s mapping, if needed. */
75a70cf9 5931 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
fd6481cf 5932 {
75a70cf9 5933 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
fd6481cf 5934 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 5935 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 5936
75a70cf9 5937 rhs_p = gimple_omp_for_final_ptr (stmt, i);
fd6481cf 5938 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 5939 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 5940
75a70cf9 5941 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
fd6481cf 5942 if (!is_gimple_min_invariant (*rhs_p))
75a70cf9 5943 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
fd6481cf 5944 }
773c5ba7 5945
5946 /* Once lowered, extract the bounds and clauses. */
fd6481cf 5947 extract_omp_for_data (stmt, &fd, NULL);
773c5ba7 5948
75a70cf9 5949 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
773c5ba7 5950
75a70cf9 5951 gimple_seq_add_stmt (&body, stmt);
5952 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
61e47ac8 5953
75a70cf9 5954 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
5955 fd.loop.v));
61e47ac8 5956
773c5ba7 5957 /* After the loop, add exit clauses. */
75a70cf9 5958 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
5959 gimple_seq_add_seq (&body, dlist);
773c5ba7 5960
75a70cf9 5961 body = maybe_catch_exception (body);
aade31a0 5962
61e47ac8 5963 /* Region exit marker goes at the end of the loop body. */
75a70cf9 5964 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
773c5ba7 5965
1d22f541 5966 pop_gimplify_context (new_stmt);
75a70cf9 5967
5968 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5969 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
1d22f541 5970 if (BLOCK_VARS (block))
5971 TREE_USED (block) = 1;
773c5ba7 5972
75a70cf9 5973 gimple_bind_set_body (new_stmt, body);
5974 gimple_omp_set_body (stmt, NULL);
5975 gimple_omp_for_set_pre_body (stmt, NULL);
5976 gsi_replace (gsi_p, new_stmt, true);
1e8e9920 5977}
5978
75a70cf9 5979/* Callback for walk_stmts. Check if the current statement only contains
5980 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
de7ef844 5981
5982static tree
75a70cf9 5983check_combined_parallel (gimple_stmt_iterator *gsi_p,
5984 bool *handled_ops_p,
5985 struct walk_stmt_info *wi)
de7ef844 5986{
4077bf7a 5987 int *info = (int *) wi->info;
75a70cf9 5988 gimple stmt = gsi_stmt (*gsi_p);
de7ef844 5989
75a70cf9 5990 *handled_ops_p = true;
5991 switch (gimple_code (stmt))
de7ef844 5992 {
75a70cf9 5993 WALK_SUBSTMTS;
5994
5995 case GIMPLE_OMP_FOR:
5996 case GIMPLE_OMP_SECTIONS:
de7ef844 5997 *info = *info == 0 ? 1 : -1;
5998 break;
5999 default:
6000 *info = -1;
6001 break;
6002 }
6003 return NULL;
6004}
773c5ba7 6005
fd6481cf 6006struct omp_taskcopy_context
6007{
6008 /* This field must be at the beginning, as we do "inheritance": Some
6009 callback functions for tree-inline.c (e.g., omp_copy_decl)
6010 receive a copy_body_data pointer that is up-casted to an
6011 omp_context pointer. */
6012 copy_body_data cb;
6013 omp_context *ctx;
6014};
6015
6016static tree
6017task_copyfn_copy_decl (tree var, copy_body_data *cb)
6018{
6019 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6020
6021 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6022 return create_tmp_var (TREE_TYPE (var), NULL);
6023
6024 return var;
6025}
6026
6027static tree
6028task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6029{
6030 tree name, new_fields = NULL, type, f;
6031
6032 type = lang_hooks.types.make_type (RECORD_TYPE);
6033 name = DECL_NAME (TYPE_NAME (orig_type));
6034 name = build_decl (TYPE_DECL, name, type);
6035 TYPE_NAME (type) = name;
6036
6037 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6038 {
6039 tree new_f = copy_node (f);
6040 DECL_CONTEXT (new_f) = type;
6041 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6042 TREE_CHAIN (new_f) = new_fields;
75a70cf9 6043 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6044 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6045 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6046 &tcctx->cb, NULL);
fd6481cf 6047 new_fields = new_f;
6048 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6049 }
6050 TYPE_FIELDS (type) = nreverse (new_fields);
6051 layout_type (type);
6052 return type;
6053}
6054
6055/* Create task copyfn. */
6056
6057static void
75a70cf9 6058create_task_copyfn (gimple task_stmt, omp_context *ctx)
fd6481cf 6059{
6060 struct function *child_cfun;
6061 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6062 tree record_type, srecord_type, bind, list;
6063 bool record_needs_remap = false, srecord_needs_remap = false;
6064 splay_tree_node n;
6065 struct omp_taskcopy_context tcctx;
dac18d1a 6066 struct gimplify_ctx gctx;
fd6481cf 6067
75a70cf9 6068 child_fn = gimple_omp_task_copy_fn (task_stmt);
fd6481cf 6069 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6070 gcc_assert (child_cfun->cfg == NULL);
6071 child_cfun->dont_save_pending_sizes_p = 1;
6072 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6073
6074 /* Reset DECL_CONTEXT on function arguments. */
6075 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6076 DECL_CONTEXT (t) = child_fn;
6077
6078 /* Populate the function. */
dac18d1a 6079 push_gimplify_context (&gctx);
fd6481cf 6080 current_function_decl = child_fn;
6081
6082 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6083 TREE_SIDE_EFFECTS (bind) = 1;
6084 list = NULL;
6085 DECL_SAVED_TREE (child_fn) = bind;
75a70cf9 6086 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
fd6481cf 6087
6088 /* Remap src and dst argument types if needed. */
6089 record_type = ctx->record_type;
6090 srecord_type = ctx->srecord_type;
6091 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6092 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6093 {
6094 record_needs_remap = true;
6095 break;
6096 }
6097 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6098 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6099 {
6100 srecord_needs_remap = true;
6101 break;
6102 }
6103
6104 if (record_needs_remap || srecord_needs_remap)
6105 {
6106 memset (&tcctx, '\0', sizeof (tcctx));
6107 tcctx.cb.src_fn = ctx->cb.src_fn;
6108 tcctx.cb.dst_fn = child_fn;
6109 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6110 tcctx.cb.dst_node = tcctx.cb.src_node;
6111 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6112 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6113 tcctx.cb.eh_region = -1;
6114 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6115 tcctx.cb.decl_map = pointer_map_create ();
6116 tcctx.ctx = ctx;
6117
6118 if (record_needs_remap)
6119 record_type = task_copyfn_remap_type (&tcctx, record_type);
6120 if (srecord_needs_remap)
6121 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6122 }
6123 else
6124 tcctx.cb.decl_map = NULL;
6125
6126 push_cfun (child_cfun);
6127
6128 arg = DECL_ARGUMENTS (child_fn);
6129 TREE_TYPE (arg) = build_pointer_type (record_type);
6130 sarg = TREE_CHAIN (arg);
6131 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6132
6133 /* First pass: initialize temporaries used in record_type and srecord_type
6134 sizes and field offsets. */
6135 if (tcctx.cb.decl_map)
75a70cf9 6136 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 6137 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6138 {
6139 tree *p;
6140
6141 decl = OMP_CLAUSE_DECL (c);
6142 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6143 if (p == NULL)
6144 continue;
6145 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6146 sf = (tree) n->value;
6147 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6148 src = build_fold_indirect_ref (sarg);
6149 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
75a70cf9 6150 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
fd6481cf 6151 append_to_statement_list (t, &list);
6152 }
6153
6154 /* Second pass: copy shared var pointers and copy construct non-VLA
6155 firstprivate vars. */
75a70cf9 6156 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 6157 switch (OMP_CLAUSE_CODE (c))
6158 {
6159 case OMP_CLAUSE_SHARED:
6160 decl = OMP_CLAUSE_DECL (c);
6161 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6162 if (n == NULL)
6163 break;
6164 f = (tree) n->value;
6165 if (tcctx.cb.decl_map)
6166 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6167 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6168 sf = (tree) n->value;
6169 if (tcctx.cb.decl_map)
6170 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6171 src = build_fold_indirect_ref (sarg);
6172 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6173 dst = build_fold_indirect_ref (arg);
6174 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
75a70cf9 6175 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
fd6481cf 6176 append_to_statement_list (t, &list);
6177 break;
6178 case OMP_CLAUSE_FIRSTPRIVATE:
6179 decl = OMP_CLAUSE_DECL (c);
6180 if (is_variable_sized (decl))
6181 break;
6182 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6183 if (n == NULL)
6184 break;
6185 f = (tree) n->value;
6186 if (tcctx.cb.decl_map)
6187 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6188 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6189 if (n != NULL)
6190 {
6191 sf = (tree) n->value;
6192 if (tcctx.cb.decl_map)
6193 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6194 src = build_fold_indirect_ref (sarg);
6195 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6196 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6197 src = build_fold_indirect_ref (src);
6198 }
6199 else
6200 src = decl;
6201 dst = build_fold_indirect_ref (arg);
6202 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6203 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6204 append_to_statement_list (t, &list);
6205 break;
6206 case OMP_CLAUSE_PRIVATE:
6207 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6208 break;
6209 decl = OMP_CLAUSE_DECL (c);
6210 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6211 f = (tree) n->value;
6212 if (tcctx.cb.decl_map)
6213 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6214 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6215 if (n != NULL)
6216 {
6217 sf = (tree) n->value;
6218 if (tcctx.cb.decl_map)
6219 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6220 src = build_fold_indirect_ref (sarg);
6221 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6222 if (use_pointer_for_field (decl, NULL))
6223 src = build_fold_indirect_ref (src);
6224 }
6225 else
6226 src = decl;
6227 dst = build_fold_indirect_ref (arg);
6228 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
75a70cf9 6229 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
fd6481cf 6230 append_to_statement_list (t, &list);
6231 break;
6232 default:
6233 break;
6234 }
6235
6236 /* Last pass: handle VLA firstprivates. */
6237 if (tcctx.cb.decl_map)
75a70cf9 6238 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
fd6481cf 6239 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6240 {
6241 tree ind, ptr, df;
6242
6243 decl = OMP_CLAUSE_DECL (c);
6244 if (!is_variable_sized (decl))
6245 continue;
6246 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6247 if (n == NULL)
6248 continue;
6249 f = (tree) n->value;
6250 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6251 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6252 ind = DECL_VALUE_EXPR (decl);
6253 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6254 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6255 n = splay_tree_lookup (ctx->sfield_map,
6256 (splay_tree_key) TREE_OPERAND (ind, 0));
6257 sf = (tree) n->value;
6258 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6259 src = build_fold_indirect_ref (sarg);
6260 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6261 src = build_fold_indirect_ref (src);
6262 dst = build_fold_indirect_ref (arg);
6263 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6264 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6265 append_to_statement_list (t, &list);
6266 n = splay_tree_lookup (ctx->field_map,
6267 (splay_tree_key) TREE_OPERAND (ind, 0));
6268 df = (tree) n->value;
6269 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6270 ptr = build_fold_indirect_ref (arg);
6271 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
75a70cf9 6272 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6273 build_fold_addr_expr (dst));
fd6481cf 6274 append_to_statement_list (t, &list);
6275 }
6276
6277 t = build1 (RETURN_EXPR, void_type_node, NULL);
6278 append_to_statement_list (t, &list);
6279
6280 if (tcctx.cb.decl_map)
6281 pointer_map_destroy (tcctx.cb.decl_map);
6282 pop_gimplify_context (NULL);
6283 BIND_EXPR_BODY (bind) = list;
6284 pop_cfun ();
6285 current_function_decl = ctx->cb.src_fn;
6286}
6287
75a70cf9 6288/* Lower the OpenMP parallel or task directive in the current statement
6289 in GSI_P. CTX holds context information for the directive. */
773c5ba7 6290
6291static void
75a70cf9 6292lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
773c5ba7 6293{
75a70cf9 6294 tree clauses;
6295 tree child_fn, t;
6296 gimple stmt = gsi_stmt (*gsi_p);
6297 gimple par_bind, bind;
6298 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
dac18d1a 6299 struct gimplify_ctx gctx;
773c5ba7 6300
75a70cf9 6301 clauses = gimple_omp_taskreg_clauses (stmt);
6302 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6303 par_body = gimple_bind_body (par_bind);
773c5ba7 6304 child_fn = ctx->cb.dst_fn;
75a70cf9 6305 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6306 && !gimple_omp_parallel_combined_p (stmt))
de7ef844 6307 {
6308 struct walk_stmt_info wi;
6309 int ws_num = 0;
6310
6311 memset (&wi, 0, sizeof (wi));
de7ef844 6312 wi.info = &ws_num;
6313 wi.val_only = true;
75a70cf9 6314 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
de7ef844 6315 if (ws_num == 1)
75a70cf9 6316 gimple_omp_parallel_set_combined_p (stmt, true);
de7ef844 6317 }
fd6481cf 6318 if (ctx->srecord_type)
6319 create_task_copyfn (stmt, ctx);
773c5ba7 6320
dac18d1a 6321 push_gimplify_context (&gctx);
773c5ba7 6322
75a70cf9 6323 par_olist = NULL;
6324 par_ilist = NULL;
773c5ba7 6325 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
75a70cf9 6326 lower_omp (par_body, ctx);
6327 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
fd6481cf 6328 lower_reduction_clauses (clauses, &par_olist, ctx);
773c5ba7 6329
6330 /* Declare all the variables created by mapping and the variables
6331 declared in the scope of the parallel body. */
6332 record_vars_into (ctx->block_vars, child_fn);
75a70cf9 6333 record_vars_into (gimple_bind_vars (par_bind), child_fn);
773c5ba7 6334
6335 if (ctx->record_type)
6336 {
fd6481cf 6337 ctx->sender_decl
6338 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6339 : ctx->record_type, ".omp_data_o");
75a70cf9 6340 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
773c5ba7 6341 }
6342
75a70cf9 6343 olist = NULL;
6344 ilist = NULL;
773c5ba7 6345 lower_send_clauses (clauses, &ilist, &olist, ctx);
6346 lower_send_shared_vars (&ilist, &olist, ctx);
6347
6348 /* Once all the expansions are done, sequence all the different
75a70cf9 6349 fragments inside gimple_omp_body. */
773c5ba7 6350
75a70cf9 6351 new_body = NULL;
773c5ba7 6352
6353 if (ctx->record_type)
6354 {
6355 t = build_fold_addr_expr (ctx->sender_decl);
cc6b725b 6356 /* fixup_child_record_type might have changed receiver_decl's type. */
6357 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
75a70cf9 6358 gimple_seq_add_stmt (&new_body,
6359 gimple_build_assign (ctx->receiver_decl, t));
773c5ba7 6360 }
6361
75a70cf9 6362 gimple_seq_add_seq (&new_body, par_ilist);
6363 gimple_seq_add_seq (&new_body, par_body);
6364 gimple_seq_add_seq (&new_body, par_olist);
6365 new_body = maybe_catch_exception (new_body);
6366 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6367 gimple_omp_set_body (stmt, new_body);
773c5ba7 6368
75a70cf9 6369 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6370 gimple_bind_add_stmt (bind, stmt);
1d22f541 6371 if (ilist || olist)
6372 {
75a70cf9 6373 gimple_seq_add_stmt (&ilist, bind);
6374 gimple_seq_add_seq (&ilist, olist);
6375 bind = gimple_build_bind (NULL, ilist, NULL);
1d22f541 6376 }
773c5ba7 6377
75a70cf9 6378 gsi_replace (gsi_p, bind, true);
773c5ba7 6379
75a70cf9 6380 pop_gimplify_context (NULL);
773c5ba7 6381}
6382
a4890dc9 6383/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
75a70cf9 6384 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6385 of OpenMP context, but with task_shared_vars set. */
46515c92 6386
6387static tree
75a70cf9 6388lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6389 void *data)
46515c92 6390{
a4890dc9 6391 tree t = *tp;
46515c92 6392
a4890dc9 6393 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
75a70cf9 6394 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
9f49e155 6395 return t;
6396
6397 if (task_shared_vars
6398 && DECL_P (t)
6399 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
a4890dc9 6400 return t;
46515c92 6401
a4890dc9 6402 /* If a global variable has been privatized, TREE_CONSTANT on
6403 ADDR_EXPR might be wrong. */
75a70cf9 6404 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
a4890dc9 6405 recompute_tree_invariant_for_addr_expr (t);
46515c92 6406
a4890dc9 6407 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6408 return NULL_TREE;
46515c92 6409}
773c5ba7 6410
a4890dc9 6411static void
75a70cf9 6412lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
1e8e9920 6413{
75a70cf9 6414 gimple stmt = gsi_stmt (*gsi_p);
6415 struct walk_stmt_info wi;
1e8e9920 6416
75a70cf9 6417 if (gimple_has_location (stmt))
6418 input_location = gimple_location (stmt);
a4890dc9 6419
75a70cf9 6420 if (task_shared_vars)
6421 memset (&wi, '\0', sizeof (wi));
a4890dc9 6422
773c5ba7 6423 /* If we have issued syntax errors, avoid doing any heavy lifting.
6424 Just replace the OpenMP directives with a NOP to avoid
6425 confusing RTL expansion. */
75a70cf9 6426 if (errorcount && is_gimple_omp (stmt))
773c5ba7 6427 {
75a70cf9 6428 gsi_replace (gsi_p, gimple_build_nop (), true);
a4890dc9 6429 return;
773c5ba7 6430 }
6431
75a70cf9 6432 switch (gimple_code (stmt))
1e8e9920 6433 {
75a70cf9 6434 case GIMPLE_COND:
fd6481cf 6435 if ((ctx || task_shared_vars)
75a70cf9 6436 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6437 ctx ? NULL : &wi, NULL)
6438 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6439 ctx ? NULL : &wi, NULL)))
6440 gimple_regimplify_operands (stmt, gsi_p);
a4890dc9 6441 break;
75a70cf9 6442 case GIMPLE_CATCH:
6443 lower_omp (gimple_catch_handler (stmt), ctx);
a4890dc9 6444 break;
75a70cf9 6445 case GIMPLE_EH_FILTER:
6446 lower_omp (gimple_eh_filter_failure (stmt), ctx);
a4890dc9 6447 break;
75a70cf9 6448 case GIMPLE_TRY:
6449 lower_omp (gimple_try_eval (stmt), ctx);
6450 lower_omp (gimple_try_cleanup (stmt), ctx);
a4890dc9 6451 break;
75a70cf9 6452 case GIMPLE_BIND:
6453 lower_omp (gimple_bind_body (stmt), ctx);
a4890dc9 6454 break;
75a70cf9 6455 case GIMPLE_OMP_PARALLEL:
6456 case GIMPLE_OMP_TASK:
6457 ctx = maybe_lookup_ctx (stmt);
6458 lower_omp_taskreg (gsi_p, ctx);
a4890dc9 6459 break;
75a70cf9 6460 case GIMPLE_OMP_FOR:
6461 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6462 gcc_assert (ctx);
75a70cf9 6463 lower_omp_for (gsi_p, ctx);
1e8e9920 6464 break;
75a70cf9 6465 case GIMPLE_OMP_SECTIONS:
6466 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6467 gcc_assert (ctx);
75a70cf9 6468 lower_omp_sections (gsi_p, ctx);
1e8e9920 6469 break;
75a70cf9 6470 case GIMPLE_OMP_SINGLE:
6471 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6472 gcc_assert (ctx);
75a70cf9 6473 lower_omp_single (gsi_p, ctx);
1e8e9920 6474 break;
75a70cf9 6475 case GIMPLE_OMP_MASTER:
6476 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6477 gcc_assert (ctx);
75a70cf9 6478 lower_omp_master (gsi_p, ctx);
1e8e9920 6479 break;
75a70cf9 6480 case GIMPLE_OMP_ORDERED:
6481 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6482 gcc_assert (ctx);
75a70cf9 6483 lower_omp_ordered (gsi_p, ctx);
1e8e9920 6484 break;
75a70cf9 6485 case GIMPLE_OMP_CRITICAL:
6486 ctx = maybe_lookup_ctx (stmt);
1e8e9920 6487 gcc_assert (ctx);
75a70cf9 6488 lower_omp_critical (gsi_p, ctx);
6489 break;
6490 case GIMPLE_OMP_ATOMIC_LOAD:
6491 if ((ctx || task_shared_vars)
6492 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6493 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6494 gimple_regimplify_operands (stmt, gsi_p);
1e8e9920 6495 break;
a4890dc9 6496 default:
fd6481cf 6497 if ((ctx || task_shared_vars)
75a70cf9 6498 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6499 ctx ? NULL : &wi))
6500 gimple_regimplify_operands (stmt, gsi_p);
1e8e9920 6501 break;
1e8e9920 6502 }
1e8e9920 6503}
6504
6505static void
75a70cf9 6506lower_omp (gimple_seq body, omp_context *ctx)
1e8e9920 6507{
1d22f541 6508 location_t saved_location = input_location;
75a70cf9 6509 gimple_stmt_iterator gsi = gsi_start (body);
6510 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6511 lower_omp_1 (&gsi, ctx);
1d22f541 6512 input_location = saved_location;
1e8e9920 6513}
6514\f
6515/* Main entry point. */
6516
2a1990e9 6517static unsigned int
1e8e9920 6518execute_lower_omp (void)
6519{
75a70cf9 6520 gimple_seq body;
6521
1e8e9920 6522 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6523 delete_omp_context);
6524
75a70cf9 6525 body = gimple_body (current_function_decl);
6526 scan_omp (body, NULL);
fd6481cf 6527 gcc_assert (taskreg_nesting_level == 0);
1e8e9920 6528
6529 if (all_contexts->root)
fd6481cf 6530 {
dac18d1a 6531 struct gimplify_ctx gctx;
6532
fd6481cf 6533 if (task_shared_vars)
dac18d1a 6534 push_gimplify_context (&gctx);
75a70cf9 6535 lower_omp (body, NULL);
fd6481cf 6536 if (task_shared_vars)
6537 pop_gimplify_context (NULL);
6538 }
1e8e9920 6539
773c5ba7 6540 if (all_contexts)
6541 {
6542 splay_tree_delete (all_contexts);
6543 all_contexts = NULL;
6544 }
fd6481cf 6545 BITMAP_FREE (task_shared_vars);
2a1990e9 6546 return 0;
1e8e9920 6547}
6548
6549static bool
6550gate_lower_omp (void)
6551{
6552 return flag_openmp != 0;
6553}
6554
20099e35 6555struct gimple_opt_pass pass_lower_omp =
1e8e9920 6556{
20099e35 6557 {
6558 GIMPLE_PASS,
1e8e9920 6559 "omplower", /* name */
6560 gate_lower_omp, /* gate */
6561 execute_lower_omp, /* execute */
6562 NULL, /* sub */
6563 NULL, /* next */
6564 0, /* static_pass_number */
6565 0, /* tv_id */
6566 PROP_gimple_any, /* properties_required */
6567 PROP_gimple_lomp, /* properties_provided */
6568 0, /* properties_destroyed */
6569 0, /* todo_flags_start */
20099e35 6570 TODO_dump_func /* todo_flags_finish */
6571 }
1e8e9920 6572};
1e8e9920 6573\f
6574/* The following is a utility to diagnose OpenMP structured block violations.
61e47ac8 6575 It is not part of the "omplower" pass, as that's invoked too late. It
6576 should be invoked by the respective front ends after gimplification. */
1e8e9920 6577
6578static splay_tree all_labels;
6579
6580/* Check for mismatched contexts and generate an error if needed. Return
6581 true if an error is detected. */
6582
6583static bool
75a70cf9 6584diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6585 gimple branch_ctx, gimple label_ctx)
1e8e9920 6586{
75a70cf9 6587 if (label_ctx == branch_ctx)
1e8e9920 6588 return false;
6589
75a70cf9 6590
6591 /*
6592 Previously we kept track of the label's entire context in diagnose_sb_[12]
6593 so we could traverse it and issue a correct "exit" or "enter" error
6594 message upon a structured block violation.
6595
6596 We built the context by building a list with tree_cons'ing, but there is
6597 no easy counterpart in gimple tuples. It seems like far too much work
6598 for issuing exit/enter error messages. If someone really misses the
6599 distinct error message... patches welcome.
6600 */
6601
6602#if 0
1e8e9920 6603 /* Try to avoid confusing the user by producing and error message
f0b5f617 6604 with correct "exit" or "enter" verbiage. We prefer "exit"
1e8e9920 6605 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6606 if (branch_ctx == NULL)
6607 exit_p = false;
6608 else
6609 {
6610 while (label_ctx)
6611 {
6612 if (TREE_VALUE (label_ctx) == branch_ctx)
6613 {
6614 exit_p = false;
6615 break;
6616 }
6617 label_ctx = TREE_CHAIN (label_ctx);
6618 }
6619 }
6620
6621 if (exit_p)
6622 error ("invalid exit from OpenMP structured block");
6623 else
6624 error ("invalid entry to OpenMP structured block");
75a70cf9 6625#endif
1e8e9920 6626
75a70cf9 6627 /* If it's obvious we have an invalid entry, be specific about the error. */
6628 if (branch_ctx == NULL)
6629 error ("invalid entry to OpenMP structured block");
6630 else
6631 /* Otherwise, be vague and lazy, but efficient. */
6632 error ("invalid branch to/from an OpenMP structured block");
6633
6634 gsi_replace (gsi_p, gimple_build_nop (), false);
1e8e9920 6635 return true;
6636}
6637
6638/* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
75a70cf9 6639 where each label is found. */
1e8e9920 6640
6641static tree
75a70cf9 6642diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6643 struct walk_stmt_info *wi)
1e8e9920 6644{
75a70cf9 6645 gimple context = (gimple) wi->info;
6646 gimple inner_context;
6647 gimple stmt = gsi_stmt (*gsi_p);
1e8e9920 6648
75a70cf9 6649 *handled_ops_p = true;
6650
6651 switch (gimple_code (stmt))
1e8e9920 6652 {
75a70cf9 6653 WALK_SUBSTMTS;
6654
6655 case GIMPLE_OMP_PARALLEL:
6656 case GIMPLE_OMP_TASK:
6657 case GIMPLE_OMP_SECTIONS:
6658 case GIMPLE_OMP_SINGLE:
6659 case GIMPLE_OMP_SECTION:
6660 case GIMPLE_OMP_MASTER:
6661 case GIMPLE_OMP_ORDERED:
6662 case GIMPLE_OMP_CRITICAL:
6663 /* The minimal context here is just the current OMP construct. */
6664 inner_context = stmt;
1e8e9920 6665 wi->info = inner_context;
75a70cf9 6666 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
1e8e9920 6667 wi->info = context;
6668 break;
6669
75a70cf9 6670 case GIMPLE_OMP_FOR:
6671 inner_context = stmt;
1e8e9920 6672 wi->info = inner_context;
75a70cf9 6673 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6674 walk them. */
6675 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6676 diagnose_sb_1, NULL, wi);
6677 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
1e8e9920 6678 wi->info = context;
6679 break;
6680
75a70cf9 6681 case GIMPLE_LABEL:
6682 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
1e8e9920 6683 (splay_tree_value) context);
6684 break;
6685
6686 default:
6687 break;
6688 }
6689
6690 return NULL_TREE;
6691}
6692
6693/* Pass 2: Check each branch and see if its context differs from that of
6694 the destination label's context. */
6695
6696static tree
75a70cf9 6697diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6698 struct walk_stmt_info *wi)
1e8e9920 6699{
75a70cf9 6700 gimple context = (gimple) wi->info;
1e8e9920 6701 splay_tree_node n;
75a70cf9 6702 gimple stmt = gsi_stmt (*gsi_p);
1e8e9920 6703
75a70cf9 6704 *handled_ops_p = true;
6705
6706 switch (gimple_code (stmt))
1e8e9920 6707 {
75a70cf9 6708 WALK_SUBSTMTS;
6709
6710 case GIMPLE_OMP_PARALLEL:
6711 case GIMPLE_OMP_TASK:
6712 case GIMPLE_OMP_SECTIONS:
6713 case GIMPLE_OMP_SINGLE:
6714 case GIMPLE_OMP_SECTION:
6715 case GIMPLE_OMP_MASTER:
6716 case GIMPLE_OMP_ORDERED:
6717 case GIMPLE_OMP_CRITICAL:
6718 wi->info = stmt;
6719 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
1e8e9920 6720 wi->info = context;
6721 break;
6722
75a70cf9 6723 case GIMPLE_OMP_FOR:
6724 wi->info = stmt;
6725 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6726 walk them. */
6727 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6728 diagnose_sb_2, NULL, wi);
6729 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
1e8e9920 6730 wi->info = context;
6731 break;
6732
75a70cf9 6733 case GIMPLE_GOTO:
1e8e9920 6734 {
75a70cf9 6735 tree lab = gimple_goto_dest (stmt);
1e8e9920 6736 if (TREE_CODE (lab) != LABEL_DECL)
6737 break;
6738
6739 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
75a70cf9 6740 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
1e8e9920 6741 }
6742 break;
6743
75a70cf9 6744 case GIMPLE_SWITCH:
1e8e9920 6745 {
75a70cf9 6746 unsigned int i;
6747 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
1e8e9920 6748 {
75a70cf9 6749 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
1e8e9920 6750 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
75a70cf9 6751 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
1e8e9920 6752 break;
6753 }
6754 }
6755 break;
6756
75a70cf9 6757 case GIMPLE_RETURN:
6758 diagnose_sb_0 (gsi_p, context, NULL);
1e8e9920 6759 break;
6760
6761 default:
6762 break;
6763 }
6764
6765 return NULL_TREE;
6766}
6767
6768void
6769diagnose_omp_structured_block_errors (tree fndecl)
6770{
6771 tree save_current = current_function_decl;
6772 struct walk_stmt_info wi;
75a70cf9 6773 struct function *old_cfun = cfun;
6774 gimple_seq body = gimple_body (fndecl);
1e8e9920 6775
6776 current_function_decl = fndecl;
75a70cf9 6777 set_cfun (DECL_STRUCT_FUNCTION (fndecl));
1e8e9920 6778
6779 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6780
6781 memset (&wi, 0, sizeof (wi));
75a70cf9 6782 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
1e8e9920 6783
6784 memset (&wi, 0, sizeof (wi));
1e8e9920 6785 wi.want_locations = true;
75a70cf9 6786 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
1e8e9920 6787
6788 splay_tree_delete (all_labels);
6789 all_labels = NULL;
6790
75a70cf9 6791 set_cfun (old_cfun);
1e8e9920 6792 current_function_decl = save_current;
6793}
6794
6795#include "gt-omp-low.h"