]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/omp-low.c
dumping cleanup phase 1 -- Removing TODO_dump_func
[thirdparty/gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
7 Free Software Foundation, Inc.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "gimple.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
37 #include "timevar.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
47
48
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
55
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
59
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63 typedef struct omp_context
64 {
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
74
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
106
107
108 struct omp_for_data_loop
109 {
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112 };
113
114 /* A structure describing the main elements of a parallel loop. */
115
116 struct omp_for_data
117 {
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
126 };
127
128
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
133
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
136
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
145
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
147
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
150 {
151 struct walk_stmt_info wi;
152
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
156
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158 }
159
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
163
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
165
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
168 {
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
172
173 return NULL_TREE;
174 }
175
176 /* Return true if CTX is for an omp parallel. */
177
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
180 {
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
182 }
183
184
185 /* Return true if CTX is for an omp task. */
186
187 static inline bool
188 is_task_ctx (omp_context *ctx)
189 {
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
191 }
192
193
194 /* Return true if CTX is for an omp parallel or omp task. */
195
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
198 {
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
201 }
202
203
204 /* Return true if REGION is a combined parallel+workshare region. */
205
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
208 {
209 return region->is_combined_parallel;
210 }
211
212
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
215
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
219 {
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225 location_t loc = gimple_location (for_stmt);
226
227 fd->for_stmt = for_stmt;
228 fd->pre = NULL;
229 fd->collapse = gimple_omp_for_collapse (for_stmt);
230 if (fd->collapse > 1)
231 fd->loops = loops;
232 else
233 fd->loops = &fd->loop;
234
235 fd->have_nowait = fd->have_ordered = false;
236 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
237 fd->chunk_size = NULL_TREE;
238 collapse_iter = NULL;
239 collapse_count = NULL;
240
241 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
242 switch (OMP_CLAUSE_CODE (t))
243 {
244 case OMP_CLAUSE_NOWAIT:
245 fd->have_nowait = true;
246 break;
247 case OMP_CLAUSE_ORDERED:
248 fd->have_ordered = true;
249 break;
250 case OMP_CLAUSE_SCHEDULE:
251 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
252 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
253 break;
254 case OMP_CLAUSE_COLLAPSE:
255 if (fd->collapse > 1)
256 {
257 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
258 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
259 }
260 default:
261 break;
262 }
263
264 /* FIXME: for now map schedule(auto) to schedule(static).
265 There should be analysis to determine whether all iterations
266 are approximately the same amount of work (then schedule(static)
267 is best) or if it varies (then schedule(dynamic,N) is better). */
268 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
269 {
270 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
271 gcc_assert (fd->chunk_size == NULL);
272 }
273 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
274 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
275 gcc_assert (fd->chunk_size == NULL);
276 else if (fd->chunk_size == NULL)
277 {
278 /* We only need to compute a default chunk size for ordered
279 static loops and dynamic loops. */
280 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
281 || fd->have_ordered
282 || fd->collapse > 1)
283 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
284 ? integer_zero_node : integer_one_node;
285 }
286
287 for (i = 0; i < fd->collapse; i++)
288 {
289 if (fd->collapse == 1)
290 loop = &fd->loop;
291 else if (loops != NULL)
292 loop = loops + i;
293 else
294 loop = &dummy_loop;
295
296
297 loop->v = gimple_omp_for_index (for_stmt, i);
298 gcc_assert (SSA_VAR_P (loop->v));
299 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
300 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
301 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
302 loop->n1 = gimple_omp_for_initial (for_stmt, i);
303
304 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
305 loop->n2 = gimple_omp_for_final (for_stmt, i);
306 switch (loop->cond_code)
307 {
308 case LT_EXPR:
309 case GT_EXPR:
310 break;
311 case LE_EXPR:
312 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
313 loop->n2 = fold_build2_loc (loc,
314 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
315 loop->n2, size_one_node);
316 else
317 loop->n2 = fold_build2_loc (loc,
318 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
319 build_int_cst (TREE_TYPE (loop->n2), 1));
320 loop->cond_code = LT_EXPR;
321 break;
322 case GE_EXPR:
323 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
324 loop->n2 = fold_build2_loc (loc,
325 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
326 loop->n2, size_int (-1));
327 else
328 loop->n2 = fold_build2_loc (loc,
329 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
330 build_int_cst (TREE_TYPE (loop->n2), 1));
331 loop->cond_code = GT_EXPR;
332 break;
333 default:
334 gcc_unreachable ();
335 }
336
337 t = gimple_omp_for_incr (for_stmt, i);
338 gcc_assert (TREE_OPERAND (t, 0) == var);
339 switch (TREE_CODE (t))
340 {
341 case PLUS_EXPR:
342 case POINTER_PLUS_EXPR:
343 loop->step = TREE_OPERAND (t, 1);
344 break;
345 case MINUS_EXPR:
346 loop->step = TREE_OPERAND (t, 1);
347 loop->step = fold_build1_loc (loc,
348 NEGATE_EXPR, TREE_TYPE (loop->step),
349 loop->step);
350 break;
351 default:
352 gcc_unreachable ();
353 }
354
355 if (iter_type != long_long_unsigned_type_node)
356 {
357 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
358 iter_type = long_long_unsigned_type_node;
359 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
360 && TYPE_PRECISION (TREE_TYPE (loop->v))
361 >= TYPE_PRECISION (iter_type))
362 {
363 tree n;
364
365 if (loop->cond_code == LT_EXPR)
366 n = fold_build2_loc (loc,
367 PLUS_EXPR, TREE_TYPE (loop->v),
368 loop->n2, loop->step);
369 else
370 n = loop->n1;
371 if (TREE_CODE (n) != INTEGER_CST
372 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
373 iter_type = long_long_unsigned_type_node;
374 }
375 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
376 > TYPE_PRECISION (iter_type))
377 {
378 tree n1, n2;
379
380 if (loop->cond_code == LT_EXPR)
381 {
382 n1 = loop->n1;
383 n2 = fold_build2_loc (loc,
384 PLUS_EXPR, TREE_TYPE (loop->v),
385 loop->n2, loop->step);
386 }
387 else
388 {
389 n1 = fold_build2_loc (loc,
390 MINUS_EXPR, TREE_TYPE (loop->v),
391 loop->n2, loop->step);
392 n2 = loop->n1;
393 }
394 if (TREE_CODE (n1) != INTEGER_CST
395 || TREE_CODE (n2) != INTEGER_CST
396 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
397 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
398 iter_type = long_long_unsigned_type_node;
399 }
400 }
401
402 if (collapse_count && *collapse_count == NULL)
403 {
404 if ((i == 0 || count != NULL_TREE)
405 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
406 && TREE_CONSTANT (loop->n1)
407 && TREE_CONSTANT (loop->n2)
408 && TREE_CODE (loop->step) == INTEGER_CST)
409 {
410 tree itype = TREE_TYPE (loop->v);
411
412 if (POINTER_TYPE_P (itype))
413 itype
414 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
415 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
416 t = fold_build2_loc (loc,
417 PLUS_EXPR, itype,
418 fold_convert_loc (loc, itype, loop->step), t);
419 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
420 fold_convert_loc (loc, itype, loop->n2));
421 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
422 fold_convert_loc (loc, itype, loop->n1));
423 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
424 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
425 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
426 fold_build1_loc (loc, NEGATE_EXPR, itype,
427 fold_convert_loc (loc, itype,
428 loop->step)));
429 else
430 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
431 fold_convert_loc (loc, itype, loop->step));
432 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
433 if (count != NULL_TREE)
434 count = fold_build2_loc (loc,
435 MULT_EXPR, long_long_unsigned_type_node,
436 count, t);
437 else
438 count = t;
439 if (TREE_CODE (count) != INTEGER_CST)
440 count = NULL_TREE;
441 }
442 else
443 count = NULL_TREE;
444 }
445 }
446
447 if (count)
448 {
449 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
450 iter_type = long_long_unsigned_type_node;
451 else
452 iter_type = long_integer_type_node;
453 }
454 else if (collapse_iter && *collapse_iter != NULL)
455 iter_type = TREE_TYPE (*collapse_iter);
456 fd->iter_type = iter_type;
457 if (collapse_iter && *collapse_iter == NULL)
458 *collapse_iter = create_tmp_var (iter_type, ".iter");
459 if (collapse_count && *collapse_count == NULL)
460 {
461 if (count)
462 *collapse_count = fold_convert_loc (loc, iter_type, count);
463 else
464 *collapse_count = create_tmp_var (iter_type, ".count");
465 }
466
467 if (fd->collapse > 1)
468 {
469 fd->loop.v = *collapse_iter;
470 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
471 fd->loop.n2 = *collapse_count;
472 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
473 fd->loop.cond_code = LT_EXPR;
474 }
475 }
476
477
478 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
479 is the immediate dominator of PAR_ENTRY_BB, return true if there
480 are no data dependencies that would prevent expanding the parallel
481 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
482
483 When expanding a combined parallel+workshare region, the call to
484 the child function may need additional arguments in the case of
485 GIMPLE_OMP_FOR regions. In some cases, these arguments are
486 computed out of variables passed in from the parent to the child
487 via 'struct .omp_data_s'. For instance:
488
489 #pragma omp parallel for schedule (guided, i * 4)
490 for (j ...)
491
492 Is lowered into:
493
494 # BLOCK 2 (PAR_ENTRY_BB)
495 .omp_data_o.i = i;
496 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
497
498 # BLOCK 3 (WS_ENTRY_BB)
499 .omp_data_i = &.omp_data_o;
500 D.1667 = .omp_data_i->i;
501 D.1598 = D.1667 * 4;
502 #pragma omp for schedule (guided, D.1598)
503
504 When we outline the parallel region, the call to the child function
505 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
506 that value is computed *after* the call site. So, in principle we
507 cannot do the transformation.
508
509 To see whether the code in WS_ENTRY_BB blocks the combined
510 parallel+workshare call, we collect all the variables used in the
511 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
512 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
513 call.
514
515 FIXME. If we had the SSA form built at this point, we could merely
516 hoist the code in block 3 into block 2 and be done with it. But at
517 this point we don't have dataflow information and though we could
518 hack something up here, it is really not worth the aggravation. */
519
520 static bool
521 workshare_safe_to_combine_p (basic_block ws_entry_bb)
522 {
523 struct omp_for_data fd;
524 gimple ws_stmt = last_stmt (ws_entry_bb);
525
526 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
527 return true;
528
529 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
530
531 extract_omp_for_data (ws_stmt, &fd, NULL);
532
533 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
534 return false;
535 if (fd.iter_type != long_integer_type_node)
536 return false;
537
538 /* FIXME. We give up too easily here. If any of these arguments
539 are not constants, they will likely involve variables that have
540 been mapped into fields of .omp_data_s for sharing with the child
541 function. With appropriate data flow, it would be possible to
542 see through this. */
543 if (!is_gimple_min_invariant (fd.loop.n1)
544 || !is_gimple_min_invariant (fd.loop.n2)
545 || !is_gimple_min_invariant (fd.loop.step)
546 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
547 return false;
548
549 return true;
550 }
551
552
553 /* Collect additional arguments needed to emit a combined
554 parallel+workshare call. WS_STMT is the workshare directive being
555 expanded. */
556
557 static VEC(tree,gc) *
558 get_ws_args_for (gimple ws_stmt)
559 {
560 tree t;
561 location_t loc = gimple_location (ws_stmt);
562 VEC(tree,gc) *ws_args;
563
564 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
565 {
566 struct omp_for_data fd;
567
568 extract_omp_for_data (ws_stmt, &fd, NULL);
569
570 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
571
572 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
573 VEC_quick_push (tree, ws_args, t);
574
575 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
576 VEC_quick_push (tree, ws_args, t);
577
578 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
579 VEC_quick_push (tree, ws_args, t);
580
581 if (fd.chunk_size)
582 {
583 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
584 VEC_quick_push (tree, ws_args, t);
585 }
586
587 return ws_args;
588 }
589 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
590 {
591 /* Number of sections is equal to the number of edges from the
592 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
593 the exit of the sections region. */
594 basic_block bb = single_succ (gimple_bb (ws_stmt));
595 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
596 ws_args = VEC_alloc (tree, gc, 1);
597 VEC_quick_push (tree, ws_args, t);
598 return ws_args;
599 }
600
601 gcc_unreachable ();
602 }
603
604
605 /* Discover whether REGION is a combined parallel+workshare region. */
606
607 static void
608 determine_parallel_type (struct omp_region *region)
609 {
610 basic_block par_entry_bb, par_exit_bb;
611 basic_block ws_entry_bb, ws_exit_bb;
612
613 if (region == NULL || region->inner == NULL
614 || region->exit == NULL || region->inner->exit == NULL
615 || region->inner->cont == NULL)
616 return;
617
618 /* We only support parallel+for and parallel+sections. */
619 if (region->type != GIMPLE_OMP_PARALLEL
620 || (region->inner->type != GIMPLE_OMP_FOR
621 && region->inner->type != GIMPLE_OMP_SECTIONS))
622 return;
623
624 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
625 WS_EXIT_BB -> PAR_EXIT_BB. */
626 par_entry_bb = region->entry;
627 par_exit_bb = region->exit;
628 ws_entry_bb = region->inner->entry;
629 ws_exit_bb = region->inner->exit;
630
631 if (single_succ (par_entry_bb) == ws_entry_bb
632 && single_succ (ws_exit_bb) == par_exit_bb
633 && workshare_safe_to_combine_p (ws_entry_bb)
634 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
635 || (last_and_only_stmt (ws_entry_bb)
636 && last_and_only_stmt (par_exit_bb))))
637 {
638 gimple ws_stmt = last_stmt (ws_entry_bb);
639
640 if (region->inner->type == GIMPLE_OMP_FOR)
641 {
642 /* If this is a combined parallel loop, we need to determine
643 whether or not to use the combined library calls. There
644 are two cases where we do not apply the transformation:
645 static loops and any kind of ordered loop. In the first
646 case, we already open code the loop so there is no need
647 to do anything else. In the latter case, the combined
648 parallel loop call would still need extra synchronization
649 to implement ordered semantics, so there would not be any
650 gain in using the combined call. */
651 tree clauses = gimple_omp_for_clauses (ws_stmt);
652 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
653 if (c == NULL
654 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
655 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
656 {
657 region->is_combined_parallel = false;
658 region->inner->is_combined_parallel = false;
659 return;
660 }
661 }
662
663 region->is_combined_parallel = true;
664 region->inner->is_combined_parallel = true;
665 region->ws_args = get_ws_args_for (ws_stmt);
666 }
667 }
668
669
670 /* Return true if EXPR is variable sized. */
671
672 static inline bool
673 is_variable_sized (const_tree expr)
674 {
675 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
676 }
677
678 /* Return true if DECL is a reference type. */
679
680 static inline bool
681 is_reference (tree decl)
682 {
683 return lang_hooks.decls.omp_privatize_by_reference (decl);
684 }
685
686 /* Lookup variables in the decl or field splay trees. The "maybe" form
687 allows for the variable form to not have been entered, otherwise we
688 assert that the variable must have been entered. */
689
690 static inline tree
691 lookup_decl (tree var, omp_context *ctx)
692 {
693 tree *n;
694 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
695 return *n;
696 }
697
698 static inline tree
699 maybe_lookup_decl (const_tree var, omp_context *ctx)
700 {
701 tree *n;
702 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
703 return n ? *n : NULL_TREE;
704 }
705
706 static inline tree
707 lookup_field (tree var, omp_context *ctx)
708 {
709 splay_tree_node n;
710 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
711 return (tree) n->value;
712 }
713
714 static inline tree
715 lookup_sfield (tree var, omp_context *ctx)
716 {
717 splay_tree_node n;
718 n = splay_tree_lookup (ctx->sfield_map
719 ? ctx->sfield_map : ctx->field_map,
720 (splay_tree_key) var);
721 return (tree) n->value;
722 }
723
724 static inline tree
725 maybe_lookup_field (tree var, omp_context *ctx)
726 {
727 splay_tree_node n;
728 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
729 return n ? (tree) n->value : NULL_TREE;
730 }
731
732 /* Return true if DECL should be copied by pointer. SHARED_CTX is
733 the parallel context if DECL is to be shared. */
734
735 static bool
736 use_pointer_for_field (tree decl, omp_context *shared_ctx)
737 {
738 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
739 return true;
740
741 /* We can only use copy-in/copy-out semantics for shared variables
742 when we know the value is not accessible from an outer scope. */
743 if (shared_ctx)
744 {
745 /* ??? Trivially accessible from anywhere. But why would we even
746 be passing an address in this case? Should we simply assert
747 this to be false, or should we have a cleanup pass that removes
748 these from the list of mappings? */
749 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
750 return true;
751
752 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
753 without analyzing the expression whether or not its location
754 is accessible to anyone else. In the case of nested parallel
755 regions it certainly may be. */
756 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
757 return true;
758
759 /* Do not use copy-in/copy-out for variables that have their
760 address taken. */
761 if (TREE_ADDRESSABLE (decl))
762 return true;
763
764 /* Disallow copy-in/out in nested parallel if
765 decl is shared in outer parallel, otherwise
766 each thread could store the shared variable
767 in its own copy-in location, making the
768 variable no longer really shared. */
769 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
770 {
771 omp_context *up;
772
773 for (up = shared_ctx->outer; up; up = up->outer)
774 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
775 break;
776
777 if (up)
778 {
779 tree c;
780
781 for (c = gimple_omp_taskreg_clauses (up->stmt);
782 c; c = OMP_CLAUSE_CHAIN (c))
783 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
784 && OMP_CLAUSE_DECL (c) == decl)
785 break;
786
787 if (c)
788 return true;
789 }
790 }
791
792 /* For tasks avoid using copy-in/out, unless they are readonly
793 (in which case just copy-in is used). As tasks can be
794 deferred or executed in different thread, when GOMP_task
795 returns, the task hasn't necessarily terminated. */
796 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
797 {
798 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
799 if (is_gimple_reg (outer))
800 {
801 /* Taking address of OUTER in lower_send_shared_vars
802 might need regimplification of everything that uses the
803 variable. */
804 if (!task_shared_vars)
805 task_shared_vars = BITMAP_ALLOC (NULL);
806 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
807 TREE_ADDRESSABLE (outer) = 1;
808 }
809 return true;
810 }
811 }
812
813 return false;
814 }
815
816 /* Create a new VAR_DECL and copy information from VAR to it. */
817
818 tree
819 copy_var_decl (tree var, tree name, tree type)
820 {
821 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
822
823 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
824 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
825 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
826 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
827 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
828 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
829 TREE_USED (copy) = 1;
830 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
831
832 return copy;
833 }
834
835 /* Construct a new automatic decl similar to VAR. */
836
837 static tree
838 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
839 {
840 tree copy = copy_var_decl (var, name, type);
841
842 DECL_CONTEXT (copy) = current_function_decl;
843 DECL_CHAIN (copy) = ctx->block_vars;
844 ctx->block_vars = copy;
845
846 return copy;
847 }
848
849 static tree
850 omp_copy_decl_1 (tree var, omp_context *ctx)
851 {
852 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
853 }
854
855 /* Build tree nodes to access the field for VAR on the receiver side. */
856
857 static tree
858 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
859 {
860 tree x, field = lookup_field (var, ctx);
861
862 /* If the receiver record type was remapped in the child function,
863 remap the field into the new record type. */
864 x = maybe_lookup_field (field, ctx);
865 if (x != NULL)
866 field = x;
867
868 x = build_simple_mem_ref (ctx->receiver_decl);
869 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
870 if (by_ref)
871 x = build_simple_mem_ref (x);
872
873 return x;
874 }
875
876 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
877 of a parallel, this is a component reference; for workshare constructs
878 this is some variable. */
879
880 static tree
881 build_outer_var_ref (tree var, omp_context *ctx)
882 {
883 tree x;
884
885 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
886 x = var;
887 else if (is_variable_sized (var))
888 {
889 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
890 x = build_outer_var_ref (x, ctx);
891 x = build_simple_mem_ref (x);
892 }
893 else if (is_taskreg_ctx (ctx))
894 {
895 bool by_ref = use_pointer_for_field (var, NULL);
896 x = build_receiver_ref (var, by_ref, ctx);
897 }
898 else if (ctx->outer)
899 x = lookup_decl (var, ctx->outer);
900 else if (is_reference (var))
901 /* This can happen with orphaned constructs. If var is reference, it is
902 possible it is shared and as such valid. */
903 x = var;
904 else
905 gcc_unreachable ();
906
907 if (is_reference (var))
908 x = build_simple_mem_ref (x);
909
910 return x;
911 }
912
913 /* Build tree nodes to access the field for VAR on the sender side. */
914
915 static tree
916 build_sender_ref (tree var, omp_context *ctx)
917 {
918 tree field = lookup_sfield (var, ctx);
919 return build3 (COMPONENT_REF, TREE_TYPE (field),
920 ctx->sender_decl, field, NULL);
921 }
922
923 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
924
925 static void
926 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
927 {
928 tree field, type, sfield = NULL_TREE;
929
930 gcc_assert ((mask & 1) == 0
931 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
932 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
933 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
934
935 type = TREE_TYPE (var);
936 if (by_ref)
937 type = build_pointer_type (type);
938 else if ((mask & 3) == 1 && is_reference (var))
939 type = TREE_TYPE (type);
940
941 field = build_decl (DECL_SOURCE_LOCATION (var),
942 FIELD_DECL, DECL_NAME (var), type);
943
944 /* Remember what variable this field was created for. This does have a
945 side effect of making dwarf2out ignore this member, so for helpful
946 debugging we clear it later in delete_omp_context. */
947 DECL_ABSTRACT_ORIGIN (field) = var;
948 if (type == TREE_TYPE (var))
949 {
950 DECL_ALIGN (field) = DECL_ALIGN (var);
951 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
952 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
953 }
954 else
955 DECL_ALIGN (field) = TYPE_ALIGN (type);
956
957 if ((mask & 3) == 3)
958 {
959 insert_field_into_struct (ctx->record_type, field);
960 if (ctx->srecord_type)
961 {
962 sfield = build_decl (DECL_SOURCE_LOCATION (var),
963 FIELD_DECL, DECL_NAME (var), type);
964 DECL_ABSTRACT_ORIGIN (sfield) = var;
965 DECL_ALIGN (sfield) = DECL_ALIGN (field);
966 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
967 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
968 insert_field_into_struct (ctx->srecord_type, sfield);
969 }
970 }
971 else
972 {
973 if (ctx->srecord_type == NULL_TREE)
974 {
975 tree t;
976
977 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
978 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
979 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
980 {
981 sfield = build_decl (DECL_SOURCE_LOCATION (var),
982 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
983 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
984 insert_field_into_struct (ctx->srecord_type, sfield);
985 splay_tree_insert (ctx->sfield_map,
986 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
987 (splay_tree_value) sfield);
988 }
989 }
990 sfield = field;
991 insert_field_into_struct ((mask & 1) ? ctx->record_type
992 : ctx->srecord_type, field);
993 }
994
995 if (mask & 1)
996 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
997 (splay_tree_value) field);
998 if ((mask & 2) && ctx->sfield_map)
999 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1000 (splay_tree_value) sfield);
1001 }
1002
1003 static tree
1004 install_var_local (tree var, omp_context *ctx)
1005 {
1006 tree new_var = omp_copy_decl_1 (var, ctx);
1007 insert_decl_map (&ctx->cb, var, new_var);
1008 return new_var;
1009 }
1010
1011 /* Adjust the replacement for DECL in CTX for the new context. This means
1012 copying the DECL_VALUE_EXPR, and fixing up the type. */
1013
1014 static void
1015 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1016 {
1017 tree new_decl, size;
1018
1019 new_decl = lookup_decl (decl, ctx);
1020
1021 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1022
1023 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1024 && DECL_HAS_VALUE_EXPR_P (decl))
1025 {
1026 tree ve = DECL_VALUE_EXPR (decl);
1027 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1028 SET_DECL_VALUE_EXPR (new_decl, ve);
1029 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1030 }
1031
1032 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1033 {
1034 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1035 if (size == error_mark_node)
1036 size = TYPE_SIZE (TREE_TYPE (new_decl));
1037 DECL_SIZE (new_decl) = size;
1038
1039 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1040 if (size == error_mark_node)
1041 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1042 DECL_SIZE_UNIT (new_decl) = size;
1043 }
1044 }
1045
1046 /* The callback for remap_decl. Search all containing contexts for a
1047 mapping of the variable; this avoids having to duplicate the splay
1048 tree ahead of time. We know a mapping doesn't already exist in the
1049 given context. Create new mappings to implement default semantics. */
1050
1051 static tree
1052 omp_copy_decl (tree var, copy_body_data *cb)
1053 {
1054 omp_context *ctx = (omp_context *) cb;
1055 tree new_var;
1056
1057 if (TREE_CODE (var) == LABEL_DECL)
1058 {
1059 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1060 DECL_CONTEXT (new_var) = current_function_decl;
1061 insert_decl_map (&ctx->cb, var, new_var);
1062 return new_var;
1063 }
1064
1065 while (!is_taskreg_ctx (ctx))
1066 {
1067 ctx = ctx->outer;
1068 if (ctx == NULL)
1069 return var;
1070 new_var = maybe_lookup_decl (var, ctx);
1071 if (new_var)
1072 return new_var;
1073 }
1074
1075 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1076 return var;
1077
1078 return error_mark_node;
1079 }
1080
1081
1082 /* Return the parallel region associated with STMT. */
1083
1084 /* Debugging dumps for parallel regions. */
1085 void dump_omp_region (FILE *, struct omp_region *, int);
1086 void debug_omp_region (struct omp_region *);
1087 void debug_all_omp_regions (void);
1088
1089 /* Dump the parallel region tree rooted at REGION. */
1090
1091 void
1092 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1093 {
1094 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1095 gimple_code_name[region->type]);
1096
1097 if (region->inner)
1098 dump_omp_region (file, region->inner, indent + 4);
1099
1100 if (region->cont)
1101 {
1102 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1103 region->cont->index);
1104 }
1105
1106 if (region->exit)
1107 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1108 region->exit->index);
1109 else
1110 fprintf (file, "%*s[no exit marker]\n", indent, "");
1111
1112 if (region->next)
1113 dump_omp_region (file, region->next, indent);
1114 }
1115
1116 DEBUG_FUNCTION void
1117 debug_omp_region (struct omp_region *region)
1118 {
1119 dump_omp_region (stderr, region, 0);
1120 }
1121
1122 DEBUG_FUNCTION void
1123 debug_all_omp_regions (void)
1124 {
1125 dump_omp_region (stderr, root_omp_region, 0);
1126 }
1127
1128
1129 /* Create a new parallel region starting at STMT inside region PARENT. */
1130
1131 struct omp_region *
1132 new_omp_region (basic_block bb, enum gimple_code type,
1133 struct omp_region *parent)
1134 {
1135 struct omp_region *region = XCNEW (struct omp_region);
1136
1137 region->outer = parent;
1138 region->entry = bb;
1139 region->type = type;
1140
1141 if (parent)
1142 {
1143 /* This is a nested region. Add it to the list of inner
1144 regions in PARENT. */
1145 region->next = parent->inner;
1146 parent->inner = region;
1147 }
1148 else
1149 {
1150 /* This is a toplevel region. Add it to the list of toplevel
1151 regions in ROOT_OMP_REGION. */
1152 region->next = root_omp_region;
1153 root_omp_region = region;
1154 }
1155
1156 return region;
1157 }
1158
1159 /* Release the memory associated with the region tree rooted at REGION. */
1160
1161 static void
1162 free_omp_region_1 (struct omp_region *region)
1163 {
1164 struct omp_region *i, *n;
1165
1166 for (i = region->inner; i ; i = n)
1167 {
1168 n = i->next;
1169 free_omp_region_1 (i);
1170 }
1171
1172 free (region);
1173 }
1174
1175 /* Release the memory for the entire omp region tree. */
1176
1177 void
1178 free_omp_regions (void)
1179 {
1180 struct omp_region *r, *n;
1181 for (r = root_omp_region; r ; r = n)
1182 {
1183 n = r->next;
1184 free_omp_region_1 (r);
1185 }
1186 root_omp_region = NULL;
1187 }
1188
1189
1190 /* Create a new context, with OUTER_CTX being the surrounding context. */
1191
1192 static omp_context *
1193 new_omp_context (gimple stmt, omp_context *outer_ctx)
1194 {
1195 omp_context *ctx = XCNEW (omp_context);
1196
1197 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1198 (splay_tree_value) ctx);
1199 ctx->stmt = stmt;
1200
1201 if (outer_ctx)
1202 {
1203 ctx->outer = outer_ctx;
1204 ctx->cb = outer_ctx->cb;
1205 ctx->cb.block = NULL;
1206 ctx->depth = outer_ctx->depth + 1;
1207 }
1208 else
1209 {
1210 ctx->cb.src_fn = current_function_decl;
1211 ctx->cb.dst_fn = current_function_decl;
1212 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1213 gcc_checking_assert (ctx->cb.src_node);
1214 ctx->cb.dst_node = ctx->cb.src_node;
1215 ctx->cb.src_cfun = cfun;
1216 ctx->cb.copy_decl = omp_copy_decl;
1217 ctx->cb.eh_lp_nr = 0;
1218 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1219 ctx->depth = 1;
1220 }
1221
1222 ctx->cb.decl_map = pointer_map_create ();
1223
1224 return ctx;
1225 }
1226
1227 static gimple_seq maybe_catch_exception (gimple_seq);
1228
1229 /* Finalize task copyfn. */
1230
1231 static void
1232 finalize_task_copyfn (gimple task_stmt)
1233 {
1234 struct function *child_cfun;
1235 tree child_fn, old_fn;
1236 gimple_seq seq, new_seq;
1237 gimple bind;
1238
1239 child_fn = gimple_omp_task_copy_fn (task_stmt);
1240 if (child_fn == NULL_TREE)
1241 return;
1242
1243 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1244
1245 /* Inform the callgraph about the new function. */
1246 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1247 = cfun->curr_properties;
1248
1249 old_fn = current_function_decl;
1250 push_cfun (child_cfun);
1251 current_function_decl = child_fn;
1252 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1253 seq = gimple_seq_alloc ();
1254 gimple_seq_add_stmt (&seq, bind);
1255 new_seq = maybe_catch_exception (seq);
1256 if (new_seq != seq)
1257 {
1258 bind = gimple_build_bind (NULL, new_seq, NULL);
1259 seq = gimple_seq_alloc ();
1260 gimple_seq_add_stmt (&seq, bind);
1261 }
1262 gimple_set_body (child_fn, seq);
1263 pop_cfun ();
1264 current_function_decl = old_fn;
1265
1266 cgraph_add_new_function (child_fn, false);
1267 }
1268
1269 /* Destroy a omp_context data structures. Called through the splay tree
1270 value delete callback. */
1271
1272 static void
1273 delete_omp_context (splay_tree_value value)
1274 {
1275 omp_context *ctx = (omp_context *) value;
1276
1277 pointer_map_destroy (ctx->cb.decl_map);
1278
1279 if (ctx->field_map)
1280 splay_tree_delete (ctx->field_map);
1281 if (ctx->sfield_map)
1282 splay_tree_delete (ctx->sfield_map);
1283
1284 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1285 it produces corrupt debug information. */
1286 if (ctx->record_type)
1287 {
1288 tree t;
1289 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1290 DECL_ABSTRACT_ORIGIN (t) = NULL;
1291 }
1292 if (ctx->srecord_type)
1293 {
1294 tree t;
1295 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1296 DECL_ABSTRACT_ORIGIN (t) = NULL;
1297 }
1298
1299 if (is_task_ctx (ctx))
1300 finalize_task_copyfn (ctx->stmt);
1301
1302 XDELETE (ctx);
1303 }
1304
1305 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1306 context. */
1307
1308 static void
1309 fixup_child_record_type (omp_context *ctx)
1310 {
1311 tree f, type = ctx->record_type;
1312
1313 /* ??? It isn't sufficient to just call remap_type here, because
1314 variably_modified_type_p doesn't work the way we expect for
1315 record types. Testing each field for whether it needs remapping
1316 and creating a new record by hand works, however. */
1317 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1318 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1319 break;
1320 if (f)
1321 {
1322 tree name, new_fields = NULL;
1323
1324 type = lang_hooks.types.make_type (RECORD_TYPE);
1325 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1326 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1327 TYPE_DECL, name, type);
1328 TYPE_NAME (type) = name;
1329
1330 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1331 {
1332 tree new_f = copy_node (f);
1333 DECL_CONTEXT (new_f) = type;
1334 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1335 DECL_CHAIN (new_f) = new_fields;
1336 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1337 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1338 &ctx->cb, NULL);
1339 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1340 &ctx->cb, NULL);
1341 new_fields = new_f;
1342
1343 /* Arrange to be able to look up the receiver field
1344 given the sender field. */
1345 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1346 (splay_tree_value) new_f);
1347 }
1348 TYPE_FIELDS (type) = nreverse (new_fields);
1349 layout_type (type);
1350 }
1351
1352 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1353 }
1354
1355 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1356 specified by CLAUSES. */
1357
1358 static void
1359 scan_sharing_clauses (tree clauses, omp_context *ctx)
1360 {
1361 tree c, decl;
1362 bool scan_array_reductions = false;
1363
1364 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1365 {
1366 bool by_ref;
1367
1368 switch (OMP_CLAUSE_CODE (c))
1369 {
1370 case OMP_CLAUSE_PRIVATE:
1371 decl = OMP_CLAUSE_DECL (c);
1372 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1373 goto do_private;
1374 else if (!is_variable_sized (decl))
1375 install_var_local (decl, ctx);
1376 break;
1377
1378 case OMP_CLAUSE_SHARED:
1379 gcc_assert (is_taskreg_ctx (ctx));
1380 decl = OMP_CLAUSE_DECL (c);
1381 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1382 || !is_variable_sized (decl));
1383 /* Global variables don't need to be copied,
1384 the receiver side will use them directly. */
1385 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1386 break;
1387 by_ref = use_pointer_for_field (decl, ctx);
1388 if (! TREE_READONLY (decl)
1389 || TREE_ADDRESSABLE (decl)
1390 || by_ref
1391 || is_reference (decl))
1392 {
1393 install_var_field (decl, by_ref, 3, ctx);
1394 install_var_local (decl, ctx);
1395 break;
1396 }
1397 /* We don't need to copy const scalar vars back. */
1398 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1399 goto do_private;
1400
1401 case OMP_CLAUSE_LASTPRIVATE:
1402 /* Let the corresponding firstprivate clause create
1403 the variable. */
1404 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1405 break;
1406 /* FALLTHRU */
1407
1408 case OMP_CLAUSE_FIRSTPRIVATE:
1409 case OMP_CLAUSE_REDUCTION:
1410 decl = OMP_CLAUSE_DECL (c);
1411 do_private:
1412 if (is_variable_sized (decl))
1413 {
1414 if (is_task_ctx (ctx))
1415 install_var_field (decl, false, 1, ctx);
1416 break;
1417 }
1418 else if (is_taskreg_ctx (ctx))
1419 {
1420 bool global
1421 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1422 by_ref = use_pointer_for_field (decl, NULL);
1423
1424 if (is_task_ctx (ctx)
1425 && (global || by_ref || is_reference (decl)))
1426 {
1427 install_var_field (decl, false, 1, ctx);
1428 if (!global)
1429 install_var_field (decl, by_ref, 2, ctx);
1430 }
1431 else if (!global)
1432 install_var_field (decl, by_ref, 3, ctx);
1433 }
1434 install_var_local (decl, ctx);
1435 break;
1436
1437 case OMP_CLAUSE_COPYPRIVATE:
1438 case OMP_CLAUSE_COPYIN:
1439 decl = OMP_CLAUSE_DECL (c);
1440 by_ref = use_pointer_for_field (decl, NULL);
1441 install_var_field (decl, by_ref, 3, ctx);
1442 break;
1443
1444 case OMP_CLAUSE_DEFAULT:
1445 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1446 break;
1447
1448 case OMP_CLAUSE_IF:
1449 case OMP_CLAUSE_NUM_THREADS:
1450 case OMP_CLAUSE_SCHEDULE:
1451 if (ctx->outer)
1452 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1453 break;
1454
1455 case OMP_CLAUSE_NOWAIT:
1456 case OMP_CLAUSE_ORDERED:
1457 case OMP_CLAUSE_COLLAPSE:
1458 case OMP_CLAUSE_UNTIED:
1459 break;
1460
1461 default:
1462 gcc_unreachable ();
1463 }
1464 }
1465
1466 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1467 {
1468 switch (OMP_CLAUSE_CODE (c))
1469 {
1470 case OMP_CLAUSE_LASTPRIVATE:
1471 /* Let the corresponding firstprivate clause create
1472 the variable. */
1473 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1474 scan_array_reductions = true;
1475 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1476 break;
1477 /* FALLTHRU */
1478
1479 case OMP_CLAUSE_PRIVATE:
1480 case OMP_CLAUSE_FIRSTPRIVATE:
1481 case OMP_CLAUSE_REDUCTION:
1482 decl = OMP_CLAUSE_DECL (c);
1483 if (is_variable_sized (decl))
1484 install_var_local (decl, ctx);
1485 fixup_remapped_decl (decl, ctx,
1486 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1487 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1488 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1489 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1490 scan_array_reductions = true;
1491 break;
1492
1493 case OMP_CLAUSE_SHARED:
1494 decl = OMP_CLAUSE_DECL (c);
1495 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1496 fixup_remapped_decl (decl, ctx, false);
1497 break;
1498
1499 case OMP_CLAUSE_COPYPRIVATE:
1500 case OMP_CLAUSE_COPYIN:
1501 case OMP_CLAUSE_DEFAULT:
1502 case OMP_CLAUSE_IF:
1503 case OMP_CLAUSE_NUM_THREADS:
1504 case OMP_CLAUSE_SCHEDULE:
1505 case OMP_CLAUSE_NOWAIT:
1506 case OMP_CLAUSE_ORDERED:
1507 case OMP_CLAUSE_COLLAPSE:
1508 case OMP_CLAUSE_UNTIED:
1509 break;
1510
1511 default:
1512 gcc_unreachable ();
1513 }
1514 }
1515
1516 if (scan_array_reductions)
1517 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1518 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1519 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1520 {
1521 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1522 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1523 }
1524 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1525 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1526 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1527 }
1528
1529 /* Create a new name for omp child function. Returns an identifier. */
1530
1531 static GTY(()) unsigned int tmp_ompfn_id_num;
1532
1533 static tree
1534 create_omp_child_function_name (bool task_copy)
1535 {
1536 return (clone_function_name (current_function_decl,
1537 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1538 }
1539
1540 /* Build a decl for the omp child function. It'll not contain a body
1541 yet, just the bare decl. */
1542
1543 static void
1544 create_omp_child_function (omp_context *ctx, bool task_copy)
1545 {
1546 tree decl, type, name, t;
1547
1548 name = create_omp_child_function_name (task_copy);
1549 if (task_copy)
1550 type = build_function_type_list (void_type_node, ptr_type_node,
1551 ptr_type_node, NULL_TREE);
1552 else
1553 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1554
1555 decl = build_decl (gimple_location (ctx->stmt),
1556 FUNCTION_DECL, name, type);
1557
1558 if (!task_copy)
1559 ctx->cb.dst_fn = decl;
1560 else
1561 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1562
1563 TREE_STATIC (decl) = 1;
1564 TREE_USED (decl) = 1;
1565 DECL_ARTIFICIAL (decl) = 1;
1566 DECL_NAMELESS (decl) = 1;
1567 DECL_IGNORED_P (decl) = 0;
1568 TREE_PUBLIC (decl) = 0;
1569 DECL_UNINLINABLE (decl) = 1;
1570 DECL_EXTERNAL (decl) = 0;
1571 DECL_CONTEXT (decl) = NULL_TREE;
1572 DECL_INITIAL (decl) = make_node (BLOCK);
1573
1574 t = build_decl (DECL_SOURCE_LOCATION (decl),
1575 RESULT_DECL, NULL_TREE, void_type_node);
1576 DECL_ARTIFICIAL (t) = 1;
1577 DECL_IGNORED_P (t) = 1;
1578 DECL_CONTEXT (t) = decl;
1579 DECL_RESULT (decl) = t;
1580
1581 t = build_decl (DECL_SOURCE_LOCATION (decl),
1582 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1583 DECL_ARTIFICIAL (t) = 1;
1584 DECL_NAMELESS (t) = 1;
1585 DECL_ARG_TYPE (t) = ptr_type_node;
1586 DECL_CONTEXT (t) = current_function_decl;
1587 TREE_USED (t) = 1;
1588 DECL_ARGUMENTS (decl) = t;
1589 if (!task_copy)
1590 ctx->receiver_decl = t;
1591 else
1592 {
1593 t = build_decl (DECL_SOURCE_LOCATION (decl),
1594 PARM_DECL, get_identifier (".omp_data_o"),
1595 ptr_type_node);
1596 DECL_ARTIFICIAL (t) = 1;
1597 DECL_NAMELESS (t) = 1;
1598 DECL_ARG_TYPE (t) = ptr_type_node;
1599 DECL_CONTEXT (t) = current_function_decl;
1600 TREE_USED (t) = 1;
1601 TREE_ADDRESSABLE (t) = 1;
1602 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1603 DECL_ARGUMENTS (decl) = t;
1604 }
1605
1606 /* Allocate memory for the function structure. The call to
1607 allocate_struct_function clobbers CFUN, so we need to restore
1608 it afterward. */
1609 push_struct_function (decl);
1610 cfun->function_end_locus = gimple_location (ctx->stmt);
1611 pop_cfun ();
1612 }
1613
1614
1615 /* Scan an OpenMP parallel directive. */
1616
1617 static void
1618 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1619 {
1620 omp_context *ctx;
1621 tree name;
1622 gimple stmt = gsi_stmt (*gsi);
1623
1624 /* Ignore parallel directives with empty bodies, unless there
1625 are copyin clauses. */
1626 if (optimize > 0
1627 && empty_body_p (gimple_omp_body (stmt))
1628 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1629 OMP_CLAUSE_COPYIN) == NULL)
1630 {
1631 gsi_replace (gsi, gimple_build_nop (), false);
1632 return;
1633 }
1634
1635 ctx = new_omp_context (stmt, outer_ctx);
1636 if (taskreg_nesting_level > 1)
1637 ctx->is_nested = true;
1638 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1639 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1640 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1641 name = create_tmp_var_name (".omp_data_s");
1642 name = build_decl (gimple_location (stmt),
1643 TYPE_DECL, name, ctx->record_type);
1644 DECL_ARTIFICIAL (name) = 1;
1645 DECL_NAMELESS (name) = 1;
1646 TYPE_NAME (ctx->record_type) = name;
1647 create_omp_child_function (ctx, false);
1648 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1649
1650 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1651 scan_omp (gimple_omp_body (stmt), ctx);
1652
1653 if (TYPE_FIELDS (ctx->record_type) == NULL)
1654 ctx->record_type = ctx->receiver_decl = NULL;
1655 else
1656 {
1657 layout_type (ctx->record_type);
1658 fixup_child_record_type (ctx);
1659 }
1660 }
1661
1662 /* Scan an OpenMP task directive. */
1663
1664 static void
1665 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1666 {
1667 omp_context *ctx;
1668 tree name, t;
1669 gimple stmt = gsi_stmt (*gsi);
1670 location_t loc = gimple_location (stmt);
1671
1672 /* Ignore task directives with empty bodies. */
1673 if (optimize > 0
1674 && empty_body_p (gimple_omp_body (stmt)))
1675 {
1676 gsi_replace (gsi, gimple_build_nop (), false);
1677 return;
1678 }
1679
1680 ctx = new_omp_context (stmt, outer_ctx);
1681 if (taskreg_nesting_level > 1)
1682 ctx->is_nested = true;
1683 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1684 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1685 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1686 name = create_tmp_var_name (".omp_data_s");
1687 name = build_decl (gimple_location (stmt),
1688 TYPE_DECL, name, ctx->record_type);
1689 DECL_ARTIFICIAL (name) = 1;
1690 DECL_NAMELESS (name) = 1;
1691 TYPE_NAME (ctx->record_type) = name;
1692 create_omp_child_function (ctx, false);
1693 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1694
1695 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1696
1697 if (ctx->srecord_type)
1698 {
1699 name = create_tmp_var_name (".omp_data_a");
1700 name = build_decl (gimple_location (stmt),
1701 TYPE_DECL, name, ctx->srecord_type);
1702 DECL_ARTIFICIAL (name) = 1;
1703 DECL_NAMELESS (name) = 1;
1704 TYPE_NAME (ctx->srecord_type) = name;
1705 create_omp_child_function (ctx, true);
1706 }
1707
1708 scan_omp (gimple_omp_body (stmt), ctx);
1709
1710 if (TYPE_FIELDS (ctx->record_type) == NULL)
1711 {
1712 ctx->record_type = ctx->receiver_decl = NULL;
1713 t = build_int_cst (long_integer_type_node, 0);
1714 gimple_omp_task_set_arg_size (stmt, t);
1715 t = build_int_cst (long_integer_type_node, 1);
1716 gimple_omp_task_set_arg_align (stmt, t);
1717 }
1718 else
1719 {
1720 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1721 /* Move VLA fields to the end. */
1722 p = &TYPE_FIELDS (ctx->record_type);
1723 while (*p)
1724 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1725 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1726 {
1727 *q = *p;
1728 *p = TREE_CHAIN (*p);
1729 TREE_CHAIN (*q) = NULL_TREE;
1730 q = &TREE_CHAIN (*q);
1731 }
1732 else
1733 p = &DECL_CHAIN (*p);
1734 *p = vla_fields;
1735 layout_type (ctx->record_type);
1736 fixup_child_record_type (ctx);
1737 if (ctx->srecord_type)
1738 layout_type (ctx->srecord_type);
1739 t = fold_convert_loc (loc, long_integer_type_node,
1740 TYPE_SIZE_UNIT (ctx->record_type));
1741 gimple_omp_task_set_arg_size (stmt, t);
1742 t = build_int_cst (long_integer_type_node,
1743 TYPE_ALIGN_UNIT (ctx->record_type));
1744 gimple_omp_task_set_arg_align (stmt, t);
1745 }
1746 }
1747
1748
1749 /* Scan an OpenMP loop directive. */
1750
1751 static void
1752 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1753 {
1754 omp_context *ctx;
1755 size_t i;
1756
1757 ctx = new_omp_context (stmt, outer_ctx);
1758
1759 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1760
1761 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1762 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1763 {
1764 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1765 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1766 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1767 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1768 }
1769 scan_omp (gimple_omp_body (stmt), ctx);
1770 }
1771
1772 /* Scan an OpenMP sections directive. */
1773
1774 static void
1775 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1776 {
1777 omp_context *ctx;
1778
1779 ctx = new_omp_context (stmt, outer_ctx);
1780 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1781 scan_omp (gimple_omp_body (stmt), ctx);
1782 }
1783
1784 /* Scan an OpenMP single directive. */
1785
1786 static void
1787 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1788 {
1789 omp_context *ctx;
1790 tree name;
1791
1792 ctx = new_omp_context (stmt, outer_ctx);
1793 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1794 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1795 name = create_tmp_var_name (".omp_copy_s");
1796 name = build_decl (gimple_location (stmt),
1797 TYPE_DECL, name, ctx->record_type);
1798 TYPE_NAME (ctx->record_type) = name;
1799
1800 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1801 scan_omp (gimple_omp_body (stmt), ctx);
1802
1803 if (TYPE_FIELDS (ctx->record_type) == NULL)
1804 ctx->record_type = NULL;
1805 else
1806 layout_type (ctx->record_type);
1807 }
1808
1809
1810 /* Check OpenMP nesting restrictions. */
1811 static void
1812 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1813 {
1814 switch (gimple_code (stmt))
1815 {
1816 case GIMPLE_OMP_FOR:
1817 case GIMPLE_OMP_SECTIONS:
1818 case GIMPLE_OMP_SINGLE:
1819 case GIMPLE_CALL:
1820 for (; ctx != NULL; ctx = ctx->outer)
1821 switch (gimple_code (ctx->stmt))
1822 {
1823 case GIMPLE_OMP_FOR:
1824 case GIMPLE_OMP_SECTIONS:
1825 case GIMPLE_OMP_SINGLE:
1826 case GIMPLE_OMP_ORDERED:
1827 case GIMPLE_OMP_MASTER:
1828 case GIMPLE_OMP_TASK:
1829 if (is_gimple_call (stmt))
1830 {
1831 warning (0, "barrier region may not be closely nested inside "
1832 "of work-sharing, critical, ordered, master or "
1833 "explicit task region");
1834 return;
1835 }
1836 warning (0, "work-sharing region may not be closely nested inside "
1837 "of work-sharing, critical, ordered, master or explicit "
1838 "task region");
1839 return;
1840 case GIMPLE_OMP_PARALLEL:
1841 return;
1842 default:
1843 break;
1844 }
1845 break;
1846 case GIMPLE_OMP_MASTER:
1847 for (; ctx != NULL; ctx = ctx->outer)
1848 switch (gimple_code (ctx->stmt))
1849 {
1850 case GIMPLE_OMP_FOR:
1851 case GIMPLE_OMP_SECTIONS:
1852 case GIMPLE_OMP_SINGLE:
1853 case GIMPLE_OMP_TASK:
1854 warning (0, "master region may not be closely nested inside "
1855 "of work-sharing or explicit task region");
1856 return;
1857 case GIMPLE_OMP_PARALLEL:
1858 return;
1859 default:
1860 break;
1861 }
1862 break;
1863 case GIMPLE_OMP_ORDERED:
1864 for (; ctx != NULL; ctx = ctx->outer)
1865 switch (gimple_code (ctx->stmt))
1866 {
1867 case GIMPLE_OMP_CRITICAL:
1868 case GIMPLE_OMP_TASK:
1869 warning (0, "ordered region may not be closely nested inside "
1870 "of critical or explicit task region");
1871 return;
1872 case GIMPLE_OMP_FOR:
1873 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1874 OMP_CLAUSE_ORDERED) == NULL)
1875 warning (0, "ordered region must be closely nested inside "
1876 "a loop region with an ordered clause");
1877 return;
1878 case GIMPLE_OMP_PARALLEL:
1879 return;
1880 default:
1881 break;
1882 }
1883 break;
1884 case GIMPLE_OMP_CRITICAL:
1885 for (; ctx != NULL; ctx = ctx->outer)
1886 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1887 && (gimple_omp_critical_name (stmt)
1888 == gimple_omp_critical_name (ctx->stmt)))
1889 {
1890 warning (0, "critical region may not be nested inside a critical "
1891 "region with the same name");
1892 return;
1893 }
1894 break;
1895 default:
1896 break;
1897 }
1898 }
1899
1900
1901 /* Helper function scan_omp.
1902
1903 Callback for walk_tree or operators in walk_gimple_stmt used to
1904 scan for OpenMP directives in TP. */
1905
1906 static tree
1907 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1908 {
1909 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1910 omp_context *ctx = (omp_context *) wi->info;
1911 tree t = *tp;
1912
1913 switch (TREE_CODE (t))
1914 {
1915 case VAR_DECL:
1916 case PARM_DECL:
1917 case LABEL_DECL:
1918 case RESULT_DECL:
1919 if (ctx)
1920 *tp = remap_decl (t, &ctx->cb);
1921 break;
1922
1923 default:
1924 if (ctx && TYPE_P (t))
1925 *tp = remap_type (t, &ctx->cb);
1926 else if (!DECL_P (t))
1927 {
1928 *walk_subtrees = 1;
1929 if (ctx)
1930 {
1931 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1932 if (tem != TREE_TYPE (t))
1933 {
1934 if (TREE_CODE (t) == INTEGER_CST)
1935 *tp = build_int_cst_wide (tem,
1936 TREE_INT_CST_LOW (t),
1937 TREE_INT_CST_HIGH (t));
1938 else
1939 TREE_TYPE (t) = tem;
1940 }
1941 }
1942 }
1943 break;
1944 }
1945
1946 return NULL_TREE;
1947 }
1948
1949
1950 /* Helper function for scan_omp.
1951
1952 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1953 the current statement in GSI. */
1954
1955 static tree
1956 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1957 struct walk_stmt_info *wi)
1958 {
1959 gimple stmt = gsi_stmt (*gsi);
1960 omp_context *ctx = (omp_context *) wi->info;
1961
1962 if (gimple_has_location (stmt))
1963 input_location = gimple_location (stmt);
1964
1965 /* Check the OpenMP nesting restrictions. */
1966 if (ctx != NULL)
1967 {
1968 if (is_gimple_omp (stmt))
1969 check_omp_nesting_restrictions (stmt, ctx);
1970 else if (is_gimple_call (stmt))
1971 {
1972 tree fndecl = gimple_call_fndecl (stmt);
1973 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1974 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1975 check_omp_nesting_restrictions (stmt, ctx);
1976 }
1977 }
1978
1979 *handled_ops_p = true;
1980
1981 switch (gimple_code (stmt))
1982 {
1983 case GIMPLE_OMP_PARALLEL:
1984 taskreg_nesting_level++;
1985 scan_omp_parallel (gsi, ctx);
1986 taskreg_nesting_level--;
1987 break;
1988
1989 case GIMPLE_OMP_TASK:
1990 taskreg_nesting_level++;
1991 scan_omp_task (gsi, ctx);
1992 taskreg_nesting_level--;
1993 break;
1994
1995 case GIMPLE_OMP_FOR:
1996 scan_omp_for (stmt, ctx);
1997 break;
1998
1999 case GIMPLE_OMP_SECTIONS:
2000 scan_omp_sections (stmt, ctx);
2001 break;
2002
2003 case GIMPLE_OMP_SINGLE:
2004 scan_omp_single (stmt, ctx);
2005 break;
2006
2007 case GIMPLE_OMP_SECTION:
2008 case GIMPLE_OMP_MASTER:
2009 case GIMPLE_OMP_ORDERED:
2010 case GIMPLE_OMP_CRITICAL:
2011 ctx = new_omp_context (stmt, ctx);
2012 scan_omp (gimple_omp_body (stmt), ctx);
2013 break;
2014
2015 case GIMPLE_BIND:
2016 {
2017 tree var;
2018
2019 *handled_ops_p = false;
2020 if (ctx)
2021 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2022 insert_decl_map (&ctx->cb, var, var);
2023 }
2024 break;
2025 default:
2026 *handled_ops_p = false;
2027 break;
2028 }
2029
2030 return NULL_TREE;
2031 }
2032
2033
2034 /* Scan all the statements starting at the current statement. CTX
2035 contains context information about the OpenMP directives and
2036 clauses found during the scan. */
2037
2038 static void
2039 scan_omp (gimple_seq body, omp_context *ctx)
2040 {
2041 location_t saved_location;
2042 struct walk_stmt_info wi;
2043
2044 memset (&wi, 0, sizeof (wi));
2045 wi.info = ctx;
2046 wi.want_locations = true;
2047
2048 saved_location = input_location;
2049 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2050 input_location = saved_location;
2051 }
2052 \f
2053 /* Re-gimplification and code generation routines. */
2054
2055 /* Build a call to GOMP_barrier. */
2056
2057 static tree
2058 build_omp_barrier (void)
2059 {
2060 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2061 }
2062
2063 /* If a context was created for STMT when it was scanned, return it. */
2064
2065 static omp_context *
2066 maybe_lookup_ctx (gimple stmt)
2067 {
2068 splay_tree_node n;
2069 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2070 return n ? (omp_context *) n->value : NULL;
2071 }
2072
2073
2074 /* Find the mapping for DECL in CTX or the immediately enclosing
2075 context that has a mapping for DECL.
2076
2077 If CTX is a nested parallel directive, we may have to use the decl
2078 mappings created in CTX's parent context. Suppose that we have the
2079 following parallel nesting (variable UIDs showed for clarity):
2080
2081 iD.1562 = 0;
2082 #omp parallel shared(iD.1562) -> outer parallel
2083 iD.1562 = iD.1562 + 1;
2084
2085 #omp parallel shared (iD.1562) -> inner parallel
2086 iD.1562 = iD.1562 - 1;
2087
2088 Each parallel structure will create a distinct .omp_data_s structure
2089 for copying iD.1562 in/out of the directive:
2090
2091 outer parallel .omp_data_s.1.i -> iD.1562
2092 inner parallel .omp_data_s.2.i -> iD.1562
2093
2094 A shared variable mapping will produce a copy-out operation before
2095 the parallel directive and a copy-in operation after it. So, in
2096 this case we would have:
2097
2098 iD.1562 = 0;
2099 .omp_data_o.1.i = iD.1562;
2100 #omp parallel shared(iD.1562) -> outer parallel
2101 .omp_data_i.1 = &.omp_data_o.1
2102 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2103
2104 .omp_data_o.2.i = iD.1562; -> **
2105 #omp parallel shared(iD.1562) -> inner parallel
2106 .omp_data_i.2 = &.omp_data_o.2
2107 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2108
2109
2110 ** This is a problem. The symbol iD.1562 cannot be referenced
2111 inside the body of the outer parallel region. But since we are
2112 emitting this copy operation while expanding the inner parallel
2113 directive, we need to access the CTX structure of the outer
2114 parallel directive to get the correct mapping:
2115
2116 .omp_data_o.2.i = .omp_data_i.1->i
2117
2118 Since there may be other workshare or parallel directives enclosing
2119 the parallel directive, it may be necessary to walk up the context
2120 parent chain. This is not a problem in general because nested
2121 parallelism happens only rarely. */
2122
2123 static tree
2124 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2125 {
2126 tree t;
2127 omp_context *up;
2128
2129 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2130 t = maybe_lookup_decl (decl, up);
2131
2132 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2133
2134 return t ? t : decl;
2135 }
2136
2137
2138 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2139 in outer contexts. */
2140
2141 static tree
2142 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2143 {
2144 tree t = NULL;
2145 omp_context *up;
2146
2147 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2148 t = maybe_lookup_decl (decl, up);
2149
2150 return t ? t : decl;
2151 }
2152
2153
2154 /* Construct the initialization value for reduction CLAUSE. */
2155
2156 tree
2157 omp_reduction_init (tree clause, tree type)
2158 {
2159 location_t loc = OMP_CLAUSE_LOCATION (clause);
2160 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2161 {
2162 case PLUS_EXPR:
2163 case MINUS_EXPR:
2164 case BIT_IOR_EXPR:
2165 case BIT_XOR_EXPR:
2166 case TRUTH_OR_EXPR:
2167 case TRUTH_ORIF_EXPR:
2168 case TRUTH_XOR_EXPR:
2169 case NE_EXPR:
2170 return build_zero_cst (type);
2171
2172 case MULT_EXPR:
2173 case TRUTH_AND_EXPR:
2174 case TRUTH_ANDIF_EXPR:
2175 case EQ_EXPR:
2176 return fold_convert_loc (loc, type, integer_one_node);
2177
2178 case BIT_AND_EXPR:
2179 return fold_convert_loc (loc, type, integer_minus_one_node);
2180
2181 case MAX_EXPR:
2182 if (SCALAR_FLOAT_TYPE_P (type))
2183 {
2184 REAL_VALUE_TYPE max, min;
2185 if (HONOR_INFINITIES (TYPE_MODE (type)))
2186 {
2187 real_inf (&max);
2188 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2189 }
2190 else
2191 real_maxval (&min, 1, TYPE_MODE (type));
2192 return build_real (type, min);
2193 }
2194 else
2195 {
2196 gcc_assert (INTEGRAL_TYPE_P (type));
2197 return TYPE_MIN_VALUE (type);
2198 }
2199
2200 case MIN_EXPR:
2201 if (SCALAR_FLOAT_TYPE_P (type))
2202 {
2203 REAL_VALUE_TYPE max;
2204 if (HONOR_INFINITIES (TYPE_MODE (type)))
2205 real_inf (&max);
2206 else
2207 real_maxval (&max, 0, TYPE_MODE (type));
2208 return build_real (type, max);
2209 }
2210 else
2211 {
2212 gcc_assert (INTEGRAL_TYPE_P (type));
2213 return TYPE_MAX_VALUE (type);
2214 }
2215
2216 default:
2217 gcc_unreachable ();
2218 }
2219 }
2220
2221 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2222 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2223 private variables. Initialization statements go in ILIST, while calls
2224 to destructors go in DLIST. */
2225
2226 static void
2227 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2228 omp_context *ctx)
2229 {
2230 gimple_stmt_iterator diter;
2231 tree c, dtor, copyin_seq, x, ptr;
2232 bool copyin_by_ref = false;
2233 bool lastprivate_firstprivate = false;
2234 int pass;
2235
2236 *dlist = gimple_seq_alloc ();
2237 diter = gsi_start (*dlist);
2238 copyin_seq = NULL;
2239
2240 /* Do all the fixed sized types in the first pass, and the variable sized
2241 types in the second pass. This makes sure that the scalar arguments to
2242 the variable sized types are processed before we use them in the
2243 variable sized operations. */
2244 for (pass = 0; pass < 2; ++pass)
2245 {
2246 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2247 {
2248 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2249 tree var, new_var;
2250 bool by_ref;
2251 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2252
2253 switch (c_kind)
2254 {
2255 case OMP_CLAUSE_PRIVATE:
2256 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2257 continue;
2258 break;
2259 case OMP_CLAUSE_SHARED:
2260 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2261 {
2262 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2263 continue;
2264 }
2265 case OMP_CLAUSE_FIRSTPRIVATE:
2266 case OMP_CLAUSE_COPYIN:
2267 case OMP_CLAUSE_REDUCTION:
2268 break;
2269 case OMP_CLAUSE_LASTPRIVATE:
2270 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2271 {
2272 lastprivate_firstprivate = true;
2273 if (pass != 0)
2274 continue;
2275 }
2276 break;
2277 default:
2278 continue;
2279 }
2280
2281 new_var = var = OMP_CLAUSE_DECL (c);
2282 if (c_kind != OMP_CLAUSE_COPYIN)
2283 new_var = lookup_decl (var, ctx);
2284
2285 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2286 {
2287 if (pass != 0)
2288 continue;
2289 }
2290 else if (is_variable_sized (var))
2291 {
2292 /* For variable sized types, we need to allocate the
2293 actual storage here. Call alloca and store the
2294 result in the pointer decl that we created elsewhere. */
2295 if (pass == 0)
2296 continue;
2297
2298 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2299 {
2300 gimple stmt;
2301 tree tmp;
2302
2303 ptr = DECL_VALUE_EXPR (new_var);
2304 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2305 ptr = TREE_OPERAND (ptr, 0);
2306 gcc_assert (DECL_P (ptr));
2307 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2308
2309 /* void *tmp = __builtin_alloca */
2310 stmt
2311 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2312 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2313 gimple_add_tmp_var (tmp);
2314 gimple_call_set_lhs (stmt, tmp);
2315
2316 gimple_seq_add_stmt (ilist, stmt);
2317
2318 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2319 gimplify_assign (ptr, x, ilist);
2320 }
2321 }
2322 else if (is_reference (var))
2323 {
2324 /* For references that are being privatized for Fortran,
2325 allocate new backing storage for the new pointer
2326 variable. This allows us to avoid changing all the
2327 code that expects a pointer to something that expects
2328 a direct variable. Note that this doesn't apply to
2329 C++, since reference types are disallowed in data
2330 sharing clauses there, except for NRV optimized
2331 return values. */
2332 if (pass == 0)
2333 continue;
2334
2335 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2336 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2337 {
2338 x = build_receiver_ref (var, false, ctx);
2339 x = build_fold_addr_expr_loc (clause_loc, x);
2340 }
2341 else if (TREE_CONSTANT (x))
2342 {
2343 const char *name = NULL;
2344 if (DECL_NAME (var))
2345 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2346
2347 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2348 name);
2349 gimple_add_tmp_var (x);
2350 TREE_ADDRESSABLE (x) = 1;
2351 x = build_fold_addr_expr_loc (clause_loc, x);
2352 }
2353 else
2354 {
2355 x = build_call_expr_loc (clause_loc,
2356 built_in_decls[BUILT_IN_ALLOCA], 1, x);
2357 }
2358
2359 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2360 gimplify_assign (new_var, x, ilist);
2361
2362 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2363 }
2364 else if (c_kind == OMP_CLAUSE_REDUCTION
2365 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2366 {
2367 if (pass == 0)
2368 continue;
2369 }
2370 else if (pass != 0)
2371 continue;
2372
2373 switch (OMP_CLAUSE_CODE (c))
2374 {
2375 case OMP_CLAUSE_SHARED:
2376 /* Shared global vars are just accessed directly. */
2377 if (is_global_var (new_var))
2378 break;
2379 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2380 needs to be delayed until after fixup_child_record_type so
2381 that we get the correct type during the dereference. */
2382 by_ref = use_pointer_for_field (var, ctx);
2383 x = build_receiver_ref (var, by_ref, ctx);
2384 SET_DECL_VALUE_EXPR (new_var, x);
2385 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2386
2387 /* ??? If VAR is not passed by reference, and the variable
2388 hasn't been initialized yet, then we'll get a warning for
2389 the store into the omp_data_s structure. Ideally, we'd be
2390 able to notice this and not store anything at all, but
2391 we're generating code too early. Suppress the warning. */
2392 if (!by_ref)
2393 TREE_NO_WARNING (var) = 1;
2394 break;
2395
2396 case OMP_CLAUSE_LASTPRIVATE:
2397 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2398 break;
2399 /* FALLTHRU */
2400
2401 case OMP_CLAUSE_PRIVATE:
2402 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2403 x = build_outer_var_ref (var, ctx);
2404 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2405 {
2406 if (is_task_ctx (ctx))
2407 x = build_receiver_ref (var, false, ctx);
2408 else
2409 x = build_outer_var_ref (var, ctx);
2410 }
2411 else
2412 x = NULL;
2413 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2414 if (x)
2415 gimplify_and_add (x, ilist);
2416 /* FALLTHRU */
2417
2418 do_dtor:
2419 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2420 if (x)
2421 {
2422 gimple_seq tseq = NULL;
2423
2424 dtor = x;
2425 gimplify_stmt (&dtor, &tseq);
2426 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2427 }
2428 break;
2429
2430 case OMP_CLAUSE_FIRSTPRIVATE:
2431 if (is_task_ctx (ctx))
2432 {
2433 if (is_reference (var) || is_variable_sized (var))
2434 goto do_dtor;
2435 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2436 ctx))
2437 || use_pointer_for_field (var, NULL))
2438 {
2439 x = build_receiver_ref (var, false, ctx);
2440 SET_DECL_VALUE_EXPR (new_var, x);
2441 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2442 goto do_dtor;
2443 }
2444 }
2445 x = build_outer_var_ref (var, ctx);
2446 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2447 gimplify_and_add (x, ilist);
2448 goto do_dtor;
2449 break;
2450
2451 case OMP_CLAUSE_COPYIN:
2452 by_ref = use_pointer_for_field (var, NULL);
2453 x = build_receiver_ref (var, by_ref, ctx);
2454 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2455 append_to_statement_list (x, &copyin_seq);
2456 copyin_by_ref |= by_ref;
2457 break;
2458
2459 case OMP_CLAUSE_REDUCTION:
2460 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2461 {
2462 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2463 x = build_outer_var_ref (var, ctx);
2464
2465 if (is_reference (var))
2466 x = build_fold_addr_expr_loc (clause_loc, x);
2467 SET_DECL_VALUE_EXPR (placeholder, x);
2468 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2469 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2470 gimple_seq_add_seq (ilist,
2471 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2472 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2473 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2474 }
2475 else
2476 {
2477 x = omp_reduction_init (c, TREE_TYPE (new_var));
2478 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2479 gimplify_assign (new_var, x, ilist);
2480 }
2481 break;
2482
2483 default:
2484 gcc_unreachable ();
2485 }
2486 }
2487 }
2488
2489 /* The copyin sequence is not to be executed by the main thread, since
2490 that would result in self-copies. Perhaps not visible to scalars,
2491 but it certainly is to C++ operator=. */
2492 if (copyin_seq)
2493 {
2494 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2495 x = build2 (NE_EXPR, boolean_type_node, x,
2496 build_int_cst (TREE_TYPE (x), 0));
2497 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2498 gimplify_and_add (x, ilist);
2499 }
2500
2501 /* If any copyin variable is passed by reference, we must ensure the
2502 master thread doesn't modify it before it is copied over in all
2503 threads. Similarly for variables in both firstprivate and
2504 lastprivate clauses we need to ensure the lastprivate copying
2505 happens after firstprivate copying in all threads. */
2506 if (copyin_by_ref || lastprivate_firstprivate)
2507 gimplify_and_add (build_omp_barrier (), ilist);
2508 }
2509
2510
2511 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2512 both parallel and workshare constructs. PREDICATE may be NULL if it's
2513 always true. */
2514
2515 static void
2516 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2517 omp_context *ctx)
2518 {
2519 tree x, c, label = NULL;
2520 bool par_clauses = false;
2521
2522 /* Early exit if there are no lastprivate clauses. */
2523 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2524 if (clauses == NULL)
2525 {
2526 /* If this was a workshare clause, see if it had been combined
2527 with its parallel. In that case, look for the clauses on the
2528 parallel statement itself. */
2529 if (is_parallel_ctx (ctx))
2530 return;
2531
2532 ctx = ctx->outer;
2533 if (ctx == NULL || !is_parallel_ctx (ctx))
2534 return;
2535
2536 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2537 OMP_CLAUSE_LASTPRIVATE);
2538 if (clauses == NULL)
2539 return;
2540 par_clauses = true;
2541 }
2542
2543 if (predicate)
2544 {
2545 gimple stmt;
2546 tree label_true, arm1, arm2;
2547
2548 label = create_artificial_label (UNKNOWN_LOCATION);
2549 label_true = create_artificial_label (UNKNOWN_LOCATION);
2550 arm1 = TREE_OPERAND (predicate, 0);
2551 arm2 = TREE_OPERAND (predicate, 1);
2552 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2553 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2554 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2555 label_true, label);
2556 gimple_seq_add_stmt (stmt_list, stmt);
2557 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2558 }
2559
2560 for (c = clauses; c ;)
2561 {
2562 tree var, new_var;
2563 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2564
2565 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2566 {
2567 var = OMP_CLAUSE_DECL (c);
2568 new_var = lookup_decl (var, ctx);
2569
2570 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2571 {
2572 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2573 gimple_seq_add_seq (stmt_list,
2574 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2575 }
2576 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2577
2578 x = build_outer_var_ref (var, ctx);
2579 if (is_reference (var))
2580 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2581 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2582 gimplify_and_add (x, stmt_list);
2583 }
2584 c = OMP_CLAUSE_CHAIN (c);
2585 if (c == NULL && !par_clauses)
2586 {
2587 /* If this was a workshare clause, see if it had been combined
2588 with its parallel. In that case, continue looking for the
2589 clauses also on the parallel statement itself. */
2590 if (is_parallel_ctx (ctx))
2591 break;
2592
2593 ctx = ctx->outer;
2594 if (ctx == NULL || !is_parallel_ctx (ctx))
2595 break;
2596
2597 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2598 OMP_CLAUSE_LASTPRIVATE);
2599 par_clauses = true;
2600 }
2601 }
2602
2603 if (label)
2604 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2605 }
2606
2607
2608 /* Generate code to implement the REDUCTION clauses. */
2609
2610 static void
2611 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2612 {
2613 gimple_seq sub_seq = NULL;
2614 gimple stmt;
2615 tree x, c;
2616 int count = 0;
2617
2618 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2619 update in that case, otherwise use a lock. */
2620 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2621 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2622 {
2623 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2624 {
2625 /* Never use OMP_ATOMIC for array reductions. */
2626 count = -1;
2627 break;
2628 }
2629 count++;
2630 }
2631
2632 if (count == 0)
2633 return;
2634
2635 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2636 {
2637 tree var, ref, new_var;
2638 enum tree_code code;
2639 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2640
2641 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2642 continue;
2643
2644 var = OMP_CLAUSE_DECL (c);
2645 new_var = lookup_decl (var, ctx);
2646 if (is_reference (var))
2647 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2648 ref = build_outer_var_ref (var, ctx);
2649 code = OMP_CLAUSE_REDUCTION_CODE (c);
2650
2651 /* reduction(-:var) sums up the partial results, so it acts
2652 identically to reduction(+:var). */
2653 if (code == MINUS_EXPR)
2654 code = PLUS_EXPR;
2655
2656 if (count == 1)
2657 {
2658 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2659
2660 addr = save_expr (addr);
2661 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2662 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2663 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2664 gimplify_and_add (x, stmt_seqp);
2665 return;
2666 }
2667
2668 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2669 {
2670 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2671
2672 if (is_reference (var))
2673 ref = build_fold_addr_expr_loc (clause_loc, ref);
2674 SET_DECL_VALUE_EXPR (placeholder, ref);
2675 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2676 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2677 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2678 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2679 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2680 }
2681 else
2682 {
2683 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2684 ref = build_outer_var_ref (var, ctx);
2685 gimplify_assign (ref, x, &sub_seq);
2686 }
2687 }
2688
2689 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2690 gimple_seq_add_stmt (stmt_seqp, stmt);
2691
2692 gimple_seq_add_seq (stmt_seqp, sub_seq);
2693
2694 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2695 gimple_seq_add_stmt (stmt_seqp, stmt);
2696 }
2697
2698
2699 /* Generate code to implement the COPYPRIVATE clauses. */
2700
2701 static void
2702 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2703 omp_context *ctx)
2704 {
2705 tree c;
2706
2707 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2708 {
2709 tree var, new_var, ref, x;
2710 bool by_ref;
2711 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2712
2713 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2714 continue;
2715
2716 var = OMP_CLAUSE_DECL (c);
2717 by_ref = use_pointer_for_field (var, NULL);
2718
2719 ref = build_sender_ref (var, ctx);
2720 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2721 if (by_ref)
2722 {
2723 x = build_fold_addr_expr_loc (clause_loc, new_var);
2724 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2725 }
2726 gimplify_assign (ref, x, slist);
2727
2728 ref = build_receiver_ref (var, false, ctx);
2729 if (by_ref)
2730 {
2731 ref = fold_convert_loc (clause_loc,
2732 build_pointer_type (TREE_TYPE (new_var)),
2733 ref);
2734 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2735 }
2736 if (is_reference (var))
2737 {
2738 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2739 ref = build_simple_mem_ref_loc (clause_loc, ref);
2740 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2741 }
2742 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2743 gimplify_and_add (x, rlist);
2744 }
2745 }
2746
2747
2748 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2749 and REDUCTION from the sender (aka parent) side. */
2750
2751 static void
2752 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2753 omp_context *ctx)
2754 {
2755 tree c;
2756
2757 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2758 {
2759 tree val, ref, x, var;
2760 bool by_ref, do_in = false, do_out = false;
2761 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2762
2763 switch (OMP_CLAUSE_CODE (c))
2764 {
2765 case OMP_CLAUSE_PRIVATE:
2766 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2767 break;
2768 continue;
2769 case OMP_CLAUSE_FIRSTPRIVATE:
2770 case OMP_CLAUSE_COPYIN:
2771 case OMP_CLAUSE_LASTPRIVATE:
2772 case OMP_CLAUSE_REDUCTION:
2773 break;
2774 default:
2775 continue;
2776 }
2777
2778 val = OMP_CLAUSE_DECL (c);
2779 var = lookup_decl_in_outer_ctx (val, ctx);
2780
2781 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2782 && is_global_var (var))
2783 continue;
2784 if (is_variable_sized (val))
2785 continue;
2786 by_ref = use_pointer_for_field (val, NULL);
2787
2788 switch (OMP_CLAUSE_CODE (c))
2789 {
2790 case OMP_CLAUSE_PRIVATE:
2791 case OMP_CLAUSE_FIRSTPRIVATE:
2792 case OMP_CLAUSE_COPYIN:
2793 do_in = true;
2794 break;
2795
2796 case OMP_CLAUSE_LASTPRIVATE:
2797 if (by_ref || is_reference (val))
2798 {
2799 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2800 continue;
2801 do_in = true;
2802 }
2803 else
2804 {
2805 do_out = true;
2806 if (lang_hooks.decls.omp_private_outer_ref (val))
2807 do_in = true;
2808 }
2809 break;
2810
2811 case OMP_CLAUSE_REDUCTION:
2812 do_in = true;
2813 do_out = !(by_ref || is_reference (val));
2814 break;
2815
2816 default:
2817 gcc_unreachable ();
2818 }
2819
2820 if (do_in)
2821 {
2822 ref = build_sender_ref (val, ctx);
2823 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2824 gimplify_assign (ref, x, ilist);
2825 if (is_task_ctx (ctx))
2826 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2827 }
2828
2829 if (do_out)
2830 {
2831 ref = build_sender_ref (val, ctx);
2832 gimplify_assign (var, ref, olist);
2833 }
2834 }
2835 }
2836
2837 /* Generate code to implement SHARED from the sender (aka parent)
2838 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2839 list things that got automatically shared. */
2840
2841 static void
2842 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2843 {
2844 tree var, ovar, nvar, f, x, record_type;
2845
2846 if (ctx->record_type == NULL)
2847 return;
2848
2849 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2850 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2851 {
2852 ovar = DECL_ABSTRACT_ORIGIN (f);
2853 nvar = maybe_lookup_decl (ovar, ctx);
2854 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2855 continue;
2856
2857 /* If CTX is a nested parallel directive. Find the immediately
2858 enclosing parallel or workshare construct that contains a
2859 mapping for OVAR. */
2860 var = lookup_decl_in_outer_ctx (ovar, ctx);
2861
2862 if (use_pointer_for_field (ovar, ctx))
2863 {
2864 x = build_sender_ref (ovar, ctx);
2865 var = build_fold_addr_expr (var);
2866 gimplify_assign (x, var, ilist);
2867 }
2868 else
2869 {
2870 x = build_sender_ref (ovar, ctx);
2871 gimplify_assign (x, var, ilist);
2872
2873 if (!TREE_READONLY (var)
2874 /* We don't need to receive a new reference to a result
2875 or parm decl. In fact we may not store to it as we will
2876 invalidate any pending RSO and generate wrong gimple
2877 during inlining. */
2878 && !((TREE_CODE (var) == RESULT_DECL
2879 || TREE_CODE (var) == PARM_DECL)
2880 && DECL_BY_REFERENCE (var)))
2881 {
2882 x = build_sender_ref (ovar, ctx);
2883 gimplify_assign (var, x, olist);
2884 }
2885 }
2886 }
2887 }
2888
2889
2890 /* A convenience function to build an empty GIMPLE_COND with just the
2891 condition. */
2892
2893 static gimple
2894 gimple_build_cond_empty (tree cond)
2895 {
2896 enum tree_code pred_code;
2897 tree lhs, rhs;
2898
2899 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2900 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2901 }
2902
2903
2904 /* Build the function calls to GOMP_parallel_start etc to actually
2905 generate the parallel operation. REGION is the parallel region
2906 being expanded. BB is the block where to insert the code. WS_ARGS
2907 will be set if this is a call to a combined parallel+workshare
2908 construct, it contains the list of additional arguments needed by
2909 the workshare construct. */
2910
2911 static void
2912 expand_parallel_call (struct omp_region *region, basic_block bb,
2913 gimple entry_stmt, VEC(tree,gc) *ws_args)
2914 {
2915 tree t, t1, t2, val, cond, c, clauses;
2916 gimple_stmt_iterator gsi;
2917 gimple stmt;
2918 int start_ix;
2919 location_t clause_loc;
2920 VEC(tree,gc) *args;
2921
2922 clauses = gimple_omp_parallel_clauses (entry_stmt);
2923
2924 /* Determine what flavor of GOMP_parallel_start we will be
2925 emitting. */
2926 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2927 if (is_combined_parallel (region))
2928 {
2929 switch (region->inner->type)
2930 {
2931 case GIMPLE_OMP_FOR:
2932 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2933 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2934 + (region->inner->sched_kind
2935 == OMP_CLAUSE_SCHEDULE_RUNTIME
2936 ? 3 : region->inner->sched_kind);
2937 break;
2938 case GIMPLE_OMP_SECTIONS:
2939 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2940 break;
2941 default:
2942 gcc_unreachable ();
2943 }
2944 }
2945
2946 /* By default, the value of NUM_THREADS is zero (selected at run time)
2947 and there is no conditional. */
2948 cond = NULL_TREE;
2949 val = build_int_cst (unsigned_type_node, 0);
2950
2951 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2952 if (c)
2953 cond = OMP_CLAUSE_IF_EXPR (c);
2954
2955 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2956 if (c)
2957 {
2958 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2959 clause_loc = OMP_CLAUSE_LOCATION (c);
2960 }
2961 else
2962 clause_loc = gimple_location (entry_stmt);
2963
2964 /* Ensure 'val' is of the correct type. */
2965 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2966
2967 /* If we found the clause 'if (cond)', build either
2968 (cond != 0) or (cond ? val : 1u). */
2969 if (cond)
2970 {
2971 gimple_stmt_iterator gsi;
2972
2973 cond = gimple_boolify (cond);
2974
2975 if (integer_zerop (val))
2976 val = fold_build2_loc (clause_loc,
2977 EQ_EXPR, unsigned_type_node, cond,
2978 build_int_cst (TREE_TYPE (cond), 0));
2979 else
2980 {
2981 basic_block cond_bb, then_bb, else_bb;
2982 edge e, e_then, e_else;
2983 tree tmp_then, tmp_else, tmp_join, tmp_var;
2984
2985 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2986 if (gimple_in_ssa_p (cfun))
2987 {
2988 tmp_then = make_ssa_name (tmp_var, NULL);
2989 tmp_else = make_ssa_name (tmp_var, NULL);
2990 tmp_join = make_ssa_name (tmp_var, NULL);
2991 }
2992 else
2993 {
2994 tmp_then = tmp_var;
2995 tmp_else = tmp_var;
2996 tmp_join = tmp_var;
2997 }
2998
2999 e = split_block (bb, NULL);
3000 cond_bb = e->src;
3001 bb = e->dest;
3002 remove_edge (e);
3003
3004 then_bb = create_empty_bb (cond_bb);
3005 else_bb = create_empty_bb (then_bb);
3006 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3007 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3008
3009 stmt = gimple_build_cond_empty (cond);
3010 gsi = gsi_start_bb (cond_bb);
3011 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3012
3013 gsi = gsi_start_bb (then_bb);
3014 stmt = gimple_build_assign (tmp_then, val);
3015 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3016
3017 gsi = gsi_start_bb (else_bb);
3018 stmt = gimple_build_assign
3019 (tmp_else, build_int_cst (unsigned_type_node, 1));
3020 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3021
3022 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3023 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3024 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3025 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3026
3027 if (gimple_in_ssa_p (cfun))
3028 {
3029 gimple phi = create_phi_node (tmp_join, bb);
3030 SSA_NAME_DEF_STMT (tmp_join) = phi;
3031 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3032 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3033 }
3034
3035 val = tmp_join;
3036 }
3037
3038 gsi = gsi_start_bb (bb);
3039 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3040 false, GSI_CONTINUE_LINKING);
3041 }
3042
3043 gsi = gsi_last_bb (bb);
3044 t = gimple_omp_parallel_data_arg (entry_stmt);
3045 if (t == NULL)
3046 t1 = null_pointer_node;
3047 else
3048 t1 = build_fold_addr_expr (t);
3049 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3050
3051 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3052 VEC_quick_push (tree, args, t2);
3053 VEC_quick_push (tree, args, t1);
3054 VEC_quick_push (tree, args, val);
3055 VEC_splice (tree, args, ws_args);
3056
3057 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3058 built_in_decls[start_ix], args);
3059
3060 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3061 false, GSI_CONTINUE_LINKING);
3062
3063 t = gimple_omp_parallel_data_arg (entry_stmt);
3064 if (t == NULL)
3065 t = null_pointer_node;
3066 else
3067 t = build_fold_addr_expr (t);
3068 t = build_call_expr_loc (gimple_location (entry_stmt),
3069 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3070 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3071 false, GSI_CONTINUE_LINKING);
3072
3073 t = build_call_expr_loc (gimple_location (entry_stmt),
3074 built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3075 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3076 false, GSI_CONTINUE_LINKING);
3077 }
3078
3079
3080 /* Build the function call to GOMP_task to actually
3081 generate the task operation. BB is the block where to insert the code. */
3082
3083 static void
3084 expand_task_call (basic_block bb, gimple entry_stmt)
3085 {
3086 tree t, t1, t2, t3, flags, cond, c, clauses;
3087 gimple_stmt_iterator gsi;
3088 location_t loc = gimple_location (entry_stmt);
3089
3090 clauses = gimple_omp_task_clauses (entry_stmt);
3091
3092 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3093 if (c)
3094 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3095 else
3096 cond = boolean_true_node;
3097
3098 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3099 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3100
3101 gsi = gsi_last_bb (bb);
3102 t = gimple_omp_task_data_arg (entry_stmt);
3103 if (t == NULL)
3104 t2 = null_pointer_node;
3105 else
3106 t2 = build_fold_addr_expr_loc (loc, t);
3107 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3108 t = gimple_omp_task_copy_fn (entry_stmt);
3109 if (t == NULL)
3110 t3 = null_pointer_node;
3111 else
3112 t3 = build_fold_addr_expr_loc (loc, t);
3113
3114 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3115 gimple_omp_task_arg_size (entry_stmt),
3116 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3117
3118 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3119 false, GSI_CONTINUE_LINKING);
3120 }
3121
3122
3123 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3124 catch handler and return it. This prevents programs from violating the
3125 structured block semantics with throws. */
3126
3127 static gimple_seq
3128 maybe_catch_exception (gimple_seq body)
3129 {
3130 gimple g;
3131 tree decl;
3132
3133 if (!flag_exceptions)
3134 return body;
3135
3136 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3137 decl = lang_hooks.eh_protect_cleanup_actions ();
3138 else
3139 decl = built_in_decls[BUILT_IN_TRAP];
3140
3141 g = gimple_build_eh_must_not_throw (decl);
3142 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3143 GIMPLE_TRY_CATCH);
3144
3145 return gimple_seq_alloc_with_stmt (g);
3146 }
3147
3148 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3149
3150 static tree
3151 vec2chain (VEC(tree,gc) *v)
3152 {
3153 tree chain = NULL_TREE, t;
3154 unsigned ix;
3155
3156 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
3157 {
3158 DECL_CHAIN (t) = chain;
3159 chain = t;
3160 }
3161
3162 return chain;
3163 }
3164
3165
3166 /* Remove barriers in REGION->EXIT's block. Note that this is only
3167 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3168 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3169 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3170 removed. */
3171
3172 static void
3173 remove_exit_barrier (struct omp_region *region)
3174 {
3175 gimple_stmt_iterator gsi;
3176 basic_block exit_bb;
3177 edge_iterator ei;
3178 edge e;
3179 gimple stmt;
3180 int any_addressable_vars = -1;
3181
3182 exit_bb = region->exit;
3183
3184 /* If the parallel region doesn't return, we don't have REGION->EXIT
3185 block at all. */
3186 if (! exit_bb)
3187 return;
3188
3189 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3190 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3191 statements that can appear in between are extremely limited -- no
3192 memory operations at all. Here, we allow nothing at all, so the
3193 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3194 gsi = gsi_last_bb (exit_bb);
3195 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3196 gsi_prev (&gsi);
3197 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3198 return;
3199
3200 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3201 {
3202 gsi = gsi_last_bb (e->src);
3203 if (gsi_end_p (gsi))
3204 continue;
3205 stmt = gsi_stmt (gsi);
3206 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3207 && !gimple_omp_return_nowait_p (stmt))
3208 {
3209 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3210 in many cases. If there could be tasks queued, the barrier
3211 might be needed to let the tasks run before some local
3212 variable of the parallel that the task uses as shared
3213 runs out of scope. The task can be spawned either
3214 from within current function (this would be easy to check)
3215 or from some function it calls and gets passed an address
3216 of such a variable. */
3217 if (any_addressable_vars < 0)
3218 {
3219 gimple parallel_stmt = last_stmt (region->entry);
3220 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3221 tree local_decls, block, decl;
3222 unsigned ix;
3223
3224 any_addressable_vars = 0;
3225 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3226 if (TREE_ADDRESSABLE (decl))
3227 {
3228 any_addressable_vars = 1;
3229 break;
3230 }
3231 for (block = gimple_block (stmt);
3232 !any_addressable_vars
3233 && block
3234 && TREE_CODE (block) == BLOCK;
3235 block = BLOCK_SUPERCONTEXT (block))
3236 {
3237 for (local_decls = BLOCK_VARS (block);
3238 local_decls;
3239 local_decls = DECL_CHAIN (local_decls))
3240 if (TREE_ADDRESSABLE (local_decls))
3241 {
3242 any_addressable_vars = 1;
3243 break;
3244 }
3245 if (block == gimple_block (parallel_stmt))
3246 break;
3247 }
3248 }
3249 if (!any_addressable_vars)
3250 gimple_omp_return_set_nowait (stmt);
3251 }
3252 }
3253 }
3254
3255 static void
3256 remove_exit_barriers (struct omp_region *region)
3257 {
3258 if (region->type == GIMPLE_OMP_PARALLEL)
3259 remove_exit_barrier (region);
3260
3261 if (region->inner)
3262 {
3263 region = region->inner;
3264 remove_exit_barriers (region);
3265 while (region->next)
3266 {
3267 region = region->next;
3268 remove_exit_barriers (region);
3269 }
3270 }
3271 }
3272
3273 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3274 calls. These can't be declared as const functions, but
3275 within one parallel body they are constant, so they can be
3276 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3277 which are declared const. Similarly for task body, except
3278 that in untied task omp_get_thread_num () can change at any task
3279 scheduling point. */
3280
3281 static void
3282 optimize_omp_library_calls (gimple entry_stmt)
3283 {
3284 basic_block bb;
3285 gimple_stmt_iterator gsi;
3286 tree thr_num_id
3287 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3288 tree num_thr_id
3289 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3290 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3291 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3292 OMP_CLAUSE_UNTIED) != NULL);
3293
3294 FOR_EACH_BB (bb)
3295 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3296 {
3297 gimple call = gsi_stmt (gsi);
3298 tree decl;
3299
3300 if (is_gimple_call (call)
3301 && (decl = gimple_call_fndecl (call))
3302 && DECL_EXTERNAL (decl)
3303 && TREE_PUBLIC (decl)
3304 && DECL_INITIAL (decl) == NULL)
3305 {
3306 tree built_in;
3307
3308 if (DECL_NAME (decl) == thr_num_id)
3309 {
3310 /* In #pragma omp task untied omp_get_thread_num () can change
3311 during the execution of the task region. */
3312 if (untied_task)
3313 continue;
3314 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3315 }
3316 else if (DECL_NAME (decl) == num_thr_id)
3317 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3318 else
3319 continue;
3320
3321 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3322 || gimple_call_num_args (call) != 0)
3323 continue;
3324
3325 if (flag_exceptions && !TREE_NOTHROW (decl))
3326 continue;
3327
3328 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3329 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3330 TREE_TYPE (TREE_TYPE (built_in))))
3331 continue;
3332
3333 gimple_call_set_fndecl (call, built_in);
3334 }
3335 }
3336 }
3337
3338 /* Expand the OpenMP parallel or task directive starting at REGION. */
3339
3340 static void
3341 expand_omp_taskreg (struct omp_region *region)
3342 {
3343 basic_block entry_bb, exit_bb, new_bb;
3344 struct function *child_cfun;
3345 tree child_fn, block, t;
3346 tree save_current;
3347 gimple_stmt_iterator gsi;
3348 gimple entry_stmt, stmt;
3349 edge e;
3350 VEC(tree,gc) *ws_args;
3351
3352 entry_stmt = last_stmt (region->entry);
3353 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3354 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3355 /* If this function has been already instrumented, make sure
3356 the child function isn't instrumented again. */
3357 child_cfun->after_tree_profile = cfun->after_tree_profile;
3358
3359 entry_bb = region->entry;
3360 exit_bb = region->exit;
3361
3362 if (is_combined_parallel (region))
3363 ws_args = region->ws_args;
3364 else
3365 ws_args = NULL;
3366
3367 if (child_cfun->cfg)
3368 {
3369 /* Due to inlining, it may happen that we have already outlined
3370 the region, in which case all we need to do is make the
3371 sub-graph unreachable and emit the parallel call. */
3372 edge entry_succ_e, exit_succ_e;
3373 gimple_stmt_iterator gsi;
3374
3375 entry_succ_e = single_succ_edge (entry_bb);
3376
3377 gsi = gsi_last_bb (entry_bb);
3378 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3379 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3380 gsi_remove (&gsi, true);
3381
3382 new_bb = entry_bb;
3383 if (exit_bb)
3384 {
3385 exit_succ_e = single_succ_edge (exit_bb);
3386 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3387 }
3388 remove_edge_and_dominated_blocks (entry_succ_e);
3389 }
3390 else
3391 {
3392 unsigned srcidx, dstidx, num;
3393
3394 /* If the parallel region needs data sent from the parent
3395 function, then the very first statement (except possible
3396 tree profile counter updates) of the parallel body
3397 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3398 &.OMP_DATA_O is passed as an argument to the child function,
3399 we need to replace it with the argument as seen by the child
3400 function.
3401
3402 In most cases, this will end up being the identity assignment
3403 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3404 a function call that has been inlined, the original PARM_DECL
3405 .OMP_DATA_I may have been converted into a different local
3406 variable. In which case, we need to keep the assignment. */
3407 if (gimple_omp_taskreg_data_arg (entry_stmt))
3408 {
3409 basic_block entry_succ_bb = single_succ (entry_bb);
3410 gimple_stmt_iterator gsi;
3411 tree arg, narg;
3412 gimple parcopy_stmt = NULL;
3413
3414 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3415 {
3416 gimple stmt;
3417
3418 gcc_assert (!gsi_end_p (gsi));
3419 stmt = gsi_stmt (gsi);
3420 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3421 continue;
3422
3423 if (gimple_num_ops (stmt) == 2)
3424 {
3425 tree arg = gimple_assign_rhs1 (stmt);
3426
3427 /* We're ignore the subcode because we're
3428 effectively doing a STRIP_NOPS. */
3429
3430 if (TREE_CODE (arg) == ADDR_EXPR
3431 && TREE_OPERAND (arg, 0)
3432 == gimple_omp_taskreg_data_arg (entry_stmt))
3433 {
3434 parcopy_stmt = stmt;
3435 break;
3436 }
3437 }
3438 }
3439
3440 gcc_assert (parcopy_stmt != NULL);
3441 arg = DECL_ARGUMENTS (child_fn);
3442
3443 if (!gimple_in_ssa_p (cfun))
3444 {
3445 if (gimple_assign_lhs (parcopy_stmt) == arg)
3446 gsi_remove (&gsi, true);
3447 else
3448 {
3449 /* ?? Is setting the subcode really necessary ?? */
3450 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3451 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3452 }
3453 }
3454 else
3455 {
3456 /* If we are in ssa form, we must load the value from the default
3457 definition of the argument. That should not be defined now,
3458 since the argument is not used uninitialized. */
3459 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3460 narg = make_ssa_name (arg, gimple_build_nop ());
3461 set_default_def (arg, narg);
3462 /* ?? Is setting the subcode really necessary ?? */
3463 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3464 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3465 update_stmt (parcopy_stmt);
3466 }
3467 }
3468
3469 /* Declare local variables needed in CHILD_CFUN. */
3470 block = DECL_INITIAL (child_fn);
3471 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3472 /* The gimplifier could record temporaries in parallel/task block
3473 rather than in containing function's local_decls chain,
3474 which would mean cgraph missed finalizing them. Do it now. */
3475 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3476 if (TREE_CODE (t) == VAR_DECL
3477 && TREE_STATIC (t)
3478 && !DECL_EXTERNAL (t))
3479 varpool_finalize_decl (t);
3480 DECL_SAVED_TREE (child_fn) = NULL;
3481 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3482 TREE_USED (block) = 1;
3483
3484 /* Reset DECL_CONTEXT on function arguments. */
3485 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3486 DECL_CONTEXT (t) = child_fn;
3487
3488 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3489 so that it can be moved to the child function. */
3490 gsi = gsi_last_bb (entry_bb);
3491 stmt = gsi_stmt (gsi);
3492 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3493 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3494 gsi_remove (&gsi, true);
3495 e = split_block (entry_bb, stmt);
3496 entry_bb = e->dest;
3497 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3498
3499 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3500 if (exit_bb)
3501 {
3502 gsi = gsi_last_bb (exit_bb);
3503 gcc_assert (!gsi_end_p (gsi)
3504 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3505 stmt = gimple_build_return (NULL);
3506 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3507 gsi_remove (&gsi, true);
3508 }
3509
3510 /* Move the parallel region into CHILD_CFUN. */
3511
3512 if (gimple_in_ssa_p (cfun))
3513 {
3514 push_cfun (child_cfun);
3515 init_tree_ssa (child_cfun);
3516 init_ssa_operands ();
3517 cfun->gimple_df->in_ssa_p = true;
3518 pop_cfun ();
3519 block = NULL_TREE;
3520 }
3521 else
3522 block = gimple_block (entry_stmt);
3523
3524 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3525 if (exit_bb)
3526 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3527
3528 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3529 num = VEC_length (tree, child_cfun->local_decls);
3530 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3531 {
3532 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3533 if (DECL_CONTEXT (t) == cfun->decl)
3534 continue;
3535 if (srcidx != dstidx)
3536 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3537 dstidx++;
3538 }
3539 if (dstidx != num)
3540 VEC_truncate (tree, child_cfun->local_decls, dstidx);
3541
3542 /* Inform the callgraph about the new function. */
3543 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3544 = cfun->curr_properties;
3545 cgraph_add_new_function (child_fn, true);
3546
3547 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3548 fixed in a following pass. */
3549 push_cfun (child_cfun);
3550 save_current = current_function_decl;
3551 current_function_decl = child_fn;
3552 if (optimize)
3553 optimize_omp_library_calls (entry_stmt);
3554 rebuild_cgraph_edges ();
3555
3556 /* Some EH regions might become dead, see PR34608. If
3557 pass_cleanup_cfg isn't the first pass to happen with the
3558 new child, these dead EH edges might cause problems.
3559 Clean them up now. */
3560 if (flag_exceptions)
3561 {
3562 basic_block bb;
3563 bool changed = false;
3564
3565 FOR_EACH_BB (bb)
3566 changed |= gimple_purge_dead_eh_edges (bb);
3567 if (changed)
3568 cleanup_tree_cfg ();
3569 }
3570 if (gimple_in_ssa_p (cfun))
3571 update_ssa (TODO_update_ssa);
3572 current_function_decl = save_current;
3573 pop_cfun ();
3574 }
3575
3576 /* Emit a library call to launch the children threads. */
3577 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3578 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3579 else
3580 expand_task_call (new_bb, entry_stmt);
3581 update_ssa (TODO_update_ssa_only_virtuals);
3582 }
3583
3584
3585 /* A subroutine of expand_omp_for. Generate code for a parallel
3586 loop with any schedule. Given parameters:
3587
3588 for (V = N1; V cond N2; V += STEP) BODY;
3589
3590 where COND is "<" or ">", we generate pseudocode
3591
3592 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3593 if (more) goto L0; else goto L3;
3594 L0:
3595 V = istart0;
3596 iend = iend0;
3597 L1:
3598 BODY;
3599 V += STEP;
3600 if (V cond iend) goto L1; else goto L2;
3601 L2:
3602 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3603 L3:
3604
3605 If this is a combined omp parallel loop, instead of the call to
3606 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3607
3608 For collapsed loops, given parameters:
3609 collapse(3)
3610 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3611 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3612 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3613 BODY;
3614
3615 we generate pseudocode
3616
3617 if (cond3 is <)
3618 adj = STEP3 - 1;
3619 else
3620 adj = STEP3 + 1;
3621 count3 = (adj + N32 - N31) / STEP3;
3622 if (cond2 is <)
3623 adj = STEP2 - 1;
3624 else
3625 adj = STEP2 + 1;
3626 count2 = (adj + N22 - N21) / STEP2;
3627 if (cond1 is <)
3628 adj = STEP1 - 1;
3629 else
3630 adj = STEP1 + 1;
3631 count1 = (adj + N12 - N11) / STEP1;
3632 count = count1 * count2 * count3;
3633 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3634 if (more) goto L0; else goto L3;
3635 L0:
3636 V = istart0;
3637 T = V;
3638 V3 = N31 + (T % count3) * STEP3;
3639 T = T / count3;
3640 V2 = N21 + (T % count2) * STEP2;
3641 T = T / count2;
3642 V1 = N11 + T * STEP1;
3643 iend = iend0;
3644 L1:
3645 BODY;
3646 V += 1;
3647 if (V < iend) goto L10; else goto L2;
3648 L10:
3649 V3 += STEP3;
3650 if (V3 cond3 N32) goto L1; else goto L11;
3651 L11:
3652 V3 = N31;
3653 V2 += STEP2;
3654 if (V2 cond2 N22) goto L1; else goto L12;
3655 L12:
3656 V2 = N21;
3657 V1 += STEP1;
3658 goto L1;
3659 L2:
3660 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3661 L3:
3662
3663 */
3664
3665 static void
3666 expand_omp_for_generic (struct omp_region *region,
3667 struct omp_for_data *fd,
3668 enum built_in_function start_fn,
3669 enum built_in_function next_fn)
3670 {
3671 tree type, istart0, iend0, iend;
3672 tree t, vmain, vback, bias = NULL_TREE;
3673 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3674 basic_block l2_bb = NULL, l3_bb = NULL;
3675 gimple_stmt_iterator gsi;
3676 gimple stmt;
3677 bool in_combined_parallel = is_combined_parallel (region);
3678 bool broken_loop = region->cont == NULL;
3679 edge e, ne;
3680 tree *counts = NULL;
3681 int i;
3682
3683 gcc_assert (!broken_loop || !in_combined_parallel);
3684 gcc_assert (fd->iter_type == long_integer_type_node
3685 || !in_combined_parallel);
3686
3687 type = TREE_TYPE (fd->loop.v);
3688 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3689 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3690 TREE_ADDRESSABLE (istart0) = 1;
3691 TREE_ADDRESSABLE (iend0) = 1;
3692 if (gimple_in_ssa_p (cfun))
3693 {
3694 add_referenced_var (istart0);
3695 add_referenced_var (iend0);
3696 }
3697
3698 /* See if we need to bias by LLONG_MIN. */
3699 if (fd->iter_type == long_long_unsigned_type_node
3700 && TREE_CODE (type) == INTEGER_TYPE
3701 && !TYPE_UNSIGNED (type))
3702 {
3703 tree n1, n2;
3704
3705 if (fd->loop.cond_code == LT_EXPR)
3706 {
3707 n1 = fd->loop.n1;
3708 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3709 }
3710 else
3711 {
3712 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3713 n2 = fd->loop.n1;
3714 }
3715 if (TREE_CODE (n1) != INTEGER_CST
3716 || TREE_CODE (n2) != INTEGER_CST
3717 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3718 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3719 }
3720
3721 entry_bb = region->entry;
3722 cont_bb = region->cont;
3723 collapse_bb = NULL;
3724 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3725 gcc_assert (broken_loop
3726 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3727 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3728 l1_bb = single_succ (l0_bb);
3729 if (!broken_loop)
3730 {
3731 l2_bb = create_empty_bb (cont_bb);
3732 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3733 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3734 }
3735 else
3736 l2_bb = NULL;
3737 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3738 exit_bb = region->exit;
3739
3740 gsi = gsi_last_bb (entry_bb);
3741
3742 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3743 if (fd->collapse > 1)
3744 {
3745 /* collapsed loops need work for expansion in SSA form. */
3746 gcc_assert (!gimple_in_ssa_p (cfun));
3747 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3748 for (i = 0; i < fd->collapse; i++)
3749 {
3750 tree itype = TREE_TYPE (fd->loops[i].v);
3751
3752 if (POINTER_TYPE_P (itype))
3753 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3754 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3755 ? -1 : 1));
3756 t = fold_build2 (PLUS_EXPR, itype,
3757 fold_convert (itype, fd->loops[i].step), t);
3758 t = fold_build2 (PLUS_EXPR, itype, t,
3759 fold_convert (itype, fd->loops[i].n2));
3760 t = fold_build2 (MINUS_EXPR, itype, t,
3761 fold_convert (itype, fd->loops[i].n1));
3762 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3763 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3764 fold_build1 (NEGATE_EXPR, itype, t),
3765 fold_build1 (NEGATE_EXPR, itype,
3766 fold_convert (itype,
3767 fd->loops[i].step)));
3768 else
3769 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3770 fold_convert (itype, fd->loops[i].step));
3771 t = fold_convert (type, t);
3772 if (TREE_CODE (t) == INTEGER_CST)
3773 counts[i] = t;
3774 else
3775 {
3776 counts[i] = create_tmp_var (type, ".count");
3777 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3778 true, GSI_SAME_STMT);
3779 stmt = gimple_build_assign (counts[i], t);
3780 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3781 }
3782 if (SSA_VAR_P (fd->loop.n2))
3783 {
3784 if (i == 0)
3785 t = counts[0];
3786 else
3787 {
3788 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3789 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3790 true, GSI_SAME_STMT);
3791 }
3792 stmt = gimple_build_assign (fd->loop.n2, t);
3793 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3794 }
3795 }
3796 }
3797 if (in_combined_parallel)
3798 {
3799 /* In a combined parallel loop, emit a call to
3800 GOMP_loop_foo_next. */
3801 t = build_call_expr (built_in_decls[next_fn], 2,
3802 build_fold_addr_expr (istart0),
3803 build_fold_addr_expr (iend0));
3804 }
3805 else
3806 {
3807 tree t0, t1, t2, t3, t4;
3808 /* If this is not a combined parallel loop, emit a call to
3809 GOMP_loop_foo_start in ENTRY_BB. */
3810 t4 = build_fold_addr_expr (iend0);
3811 t3 = build_fold_addr_expr (istart0);
3812 t2 = fold_convert (fd->iter_type, fd->loop.step);
3813 if (POINTER_TYPE_P (type)
3814 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3815 {
3816 /* Avoid casting pointers to integer of a different size. */
3817 tree itype
3818 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3819 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3820 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3821 }
3822 else
3823 {
3824 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3825 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3826 }
3827 if (bias)
3828 {
3829 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3830 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3831 }
3832 if (fd->iter_type == long_integer_type_node)
3833 {
3834 if (fd->chunk_size)
3835 {
3836 t = fold_convert (fd->iter_type, fd->chunk_size);
3837 t = build_call_expr (built_in_decls[start_fn], 6,
3838 t0, t1, t2, t, t3, t4);
3839 }
3840 else
3841 t = build_call_expr (built_in_decls[start_fn], 5,
3842 t0, t1, t2, t3, t4);
3843 }
3844 else
3845 {
3846 tree t5;
3847 tree c_bool_type;
3848
3849 /* The GOMP_loop_ull_*start functions have additional boolean
3850 argument, true for < loops and false for > loops.
3851 In Fortran, the C bool type can be different from
3852 boolean_type_node. */
3853 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3854 t5 = build_int_cst (c_bool_type,
3855 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3856 if (fd->chunk_size)
3857 {
3858 t = fold_convert (fd->iter_type, fd->chunk_size);
3859 t = build_call_expr (built_in_decls[start_fn], 7,
3860 t5, t0, t1, t2, t, t3, t4);
3861 }
3862 else
3863 t = build_call_expr (built_in_decls[start_fn], 6,
3864 t5, t0, t1, t2, t3, t4);
3865 }
3866 }
3867 if (TREE_TYPE (t) != boolean_type_node)
3868 t = fold_build2 (NE_EXPR, boolean_type_node,
3869 t, build_int_cst (TREE_TYPE (t), 0));
3870 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3871 true, GSI_SAME_STMT);
3872 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3873
3874 /* Remove the GIMPLE_OMP_FOR statement. */
3875 gsi_remove (&gsi, true);
3876
3877 /* Iteration setup for sequential loop goes in L0_BB. */
3878 gsi = gsi_start_bb (l0_bb);
3879 t = istart0;
3880 if (bias)
3881 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3882 if (POINTER_TYPE_P (type))
3883 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3884 0), t);
3885 t = fold_convert (type, t);
3886 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3887 false, GSI_CONTINUE_LINKING);
3888 stmt = gimple_build_assign (fd->loop.v, t);
3889 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3890
3891 t = iend0;
3892 if (bias)
3893 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3894 if (POINTER_TYPE_P (type))
3895 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3896 0), t);
3897 t = fold_convert (type, t);
3898 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3899 false, GSI_CONTINUE_LINKING);
3900 if (fd->collapse > 1)
3901 {
3902 tree tem = create_tmp_var (type, ".tem");
3903
3904 stmt = gimple_build_assign (tem, fd->loop.v);
3905 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3906 for (i = fd->collapse - 1; i >= 0; i--)
3907 {
3908 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3909 itype = vtype;
3910 if (POINTER_TYPE_P (vtype))
3911 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3912 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3913 t = fold_convert (itype, t);
3914 t = fold_build2 (MULT_EXPR, itype, t,
3915 fold_convert (itype, fd->loops[i].step));
3916 if (POINTER_TYPE_P (vtype))
3917 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3918 fd->loops[i].n1, fold_convert (sizetype, t));
3919 else
3920 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3921 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3922 false, GSI_CONTINUE_LINKING);
3923 stmt = gimple_build_assign (fd->loops[i].v, t);
3924 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3925 if (i != 0)
3926 {
3927 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3928 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3929 false, GSI_CONTINUE_LINKING);
3930 stmt = gimple_build_assign (tem, t);
3931 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3932 }
3933 }
3934 }
3935
3936 if (!broken_loop)
3937 {
3938 /* Code to control the increment and predicate for the sequential
3939 loop goes in the CONT_BB. */
3940 gsi = gsi_last_bb (cont_bb);
3941 stmt = gsi_stmt (gsi);
3942 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3943 vmain = gimple_omp_continue_control_use (stmt);
3944 vback = gimple_omp_continue_control_def (stmt);
3945
3946 if (POINTER_TYPE_P (type))
3947 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3948 fold_convert (sizetype, fd->loop.step));
3949 else
3950 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3951 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3952 true, GSI_SAME_STMT);
3953 stmt = gimple_build_assign (vback, t);
3954 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3955
3956 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3957 stmt = gimple_build_cond_empty (t);
3958 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3959
3960 /* Remove GIMPLE_OMP_CONTINUE. */
3961 gsi_remove (&gsi, true);
3962
3963 if (fd->collapse > 1)
3964 {
3965 basic_block last_bb, bb;
3966
3967 last_bb = cont_bb;
3968 for (i = fd->collapse - 1; i >= 0; i--)
3969 {
3970 tree vtype = TREE_TYPE (fd->loops[i].v);
3971
3972 bb = create_empty_bb (last_bb);
3973 gsi = gsi_start_bb (bb);
3974
3975 if (i < fd->collapse - 1)
3976 {
3977 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3978 e->probability = REG_BR_PROB_BASE / 8;
3979
3980 t = fd->loops[i + 1].n1;
3981 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3982 false, GSI_CONTINUE_LINKING);
3983 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3984 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3985 }
3986 else
3987 collapse_bb = bb;
3988
3989 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3990
3991 if (POINTER_TYPE_P (vtype))
3992 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3993 fd->loops[i].v,
3994 fold_convert (sizetype, fd->loops[i].step));
3995 else
3996 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3997 fd->loops[i].step);
3998 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3999 false, GSI_CONTINUE_LINKING);
4000 stmt = gimple_build_assign (fd->loops[i].v, t);
4001 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4002
4003 if (i > 0)
4004 {
4005 t = fd->loops[i].n2;
4006 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4007 false, GSI_CONTINUE_LINKING);
4008 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4009 fd->loops[i].v, t);
4010 stmt = gimple_build_cond_empty (t);
4011 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4012 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4013 e->probability = REG_BR_PROB_BASE * 7 / 8;
4014 }
4015 else
4016 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4017 last_bb = bb;
4018 }
4019 }
4020
4021 /* Emit code to get the next parallel iteration in L2_BB. */
4022 gsi = gsi_start_bb (l2_bb);
4023
4024 t = build_call_expr (built_in_decls[next_fn], 2,
4025 build_fold_addr_expr (istart0),
4026 build_fold_addr_expr (iend0));
4027 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4028 false, GSI_CONTINUE_LINKING);
4029 if (TREE_TYPE (t) != boolean_type_node)
4030 t = fold_build2 (NE_EXPR, boolean_type_node,
4031 t, build_int_cst (TREE_TYPE (t), 0));
4032 stmt = gimple_build_cond_empty (t);
4033 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4034 }
4035
4036 /* Add the loop cleanup function. */
4037 gsi = gsi_last_bb (exit_bb);
4038 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4039 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
4040 else
4041 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
4042 stmt = gimple_build_call (t, 0);
4043 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4044 gsi_remove (&gsi, true);
4045
4046 /* Connect the new blocks. */
4047 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4048 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4049
4050 if (!broken_loop)
4051 {
4052 gimple_seq phis;
4053
4054 e = find_edge (cont_bb, l3_bb);
4055 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4056
4057 phis = phi_nodes (l3_bb);
4058 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4059 {
4060 gimple phi = gsi_stmt (gsi);
4061 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4062 PHI_ARG_DEF_FROM_EDGE (phi, e));
4063 }
4064 remove_edge (e);
4065
4066 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4067 if (fd->collapse > 1)
4068 {
4069 e = find_edge (cont_bb, l1_bb);
4070 remove_edge (e);
4071 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4072 }
4073 else
4074 {
4075 e = find_edge (cont_bb, l1_bb);
4076 e->flags = EDGE_TRUE_VALUE;
4077 }
4078 e->probability = REG_BR_PROB_BASE * 7 / 8;
4079 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4080 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4081
4082 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4083 recompute_dominator (CDI_DOMINATORS, l2_bb));
4084 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4085 recompute_dominator (CDI_DOMINATORS, l3_bb));
4086 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4087 recompute_dominator (CDI_DOMINATORS, l0_bb));
4088 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4089 recompute_dominator (CDI_DOMINATORS, l1_bb));
4090 }
4091 }
4092
4093
4094 /* A subroutine of expand_omp_for. Generate code for a parallel
4095 loop with static schedule and no specified chunk size. Given
4096 parameters:
4097
4098 for (V = N1; V cond N2; V += STEP) BODY;
4099
4100 where COND is "<" or ">", we generate pseudocode
4101
4102 if (cond is <)
4103 adj = STEP - 1;
4104 else
4105 adj = STEP + 1;
4106 if ((__typeof (V)) -1 > 0 && cond is >)
4107 n = -(adj + N2 - N1) / -STEP;
4108 else
4109 n = (adj + N2 - N1) / STEP;
4110 q = n / nthreads;
4111 q += (q * nthreads != n);
4112 s0 = q * threadid;
4113 e0 = min(s0 + q, n);
4114 V = s0 * STEP + N1;
4115 if (s0 >= e0) goto L2; else goto L0;
4116 L0:
4117 e = e0 * STEP + N1;
4118 L1:
4119 BODY;
4120 V += STEP;
4121 if (V cond e) goto L1;
4122 L2:
4123 */
4124
4125 static void
4126 expand_omp_for_static_nochunk (struct omp_region *region,
4127 struct omp_for_data *fd)
4128 {
4129 tree n, q, s0, e0, e, t, nthreads, threadid;
4130 tree type, itype, vmain, vback;
4131 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4132 basic_block fin_bb;
4133 gimple_stmt_iterator gsi;
4134 gimple stmt;
4135
4136 itype = type = TREE_TYPE (fd->loop.v);
4137 if (POINTER_TYPE_P (type))
4138 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4139
4140 entry_bb = region->entry;
4141 cont_bb = region->cont;
4142 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4143 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4144 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4145 body_bb = single_succ (seq_start_bb);
4146 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4147 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4148 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4149 exit_bb = region->exit;
4150
4151 /* Iteration space partitioning goes in ENTRY_BB. */
4152 gsi = gsi_last_bb (entry_bb);
4153 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4154
4155 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4156 t = fold_convert (itype, t);
4157 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4158 true, GSI_SAME_STMT);
4159
4160 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4161 t = fold_convert (itype, t);
4162 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4163 true, GSI_SAME_STMT);
4164
4165 fd->loop.n1
4166 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4167 true, NULL_TREE, true, GSI_SAME_STMT);
4168 fd->loop.n2
4169 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4170 true, NULL_TREE, true, GSI_SAME_STMT);
4171 fd->loop.step
4172 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4173 true, NULL_TREE, true, GSI_SAME_STMT);
4174
4175 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4176 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4177 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4178 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4179 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4180 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4181 fold_build1 (NEGATE_EXPR, itype, t),
4182 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4183 else
4184 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4185 t = fold_convert (itype, t);
4186 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4187
4188 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4189 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4190
4191 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4192 t = fold_build2 (NE_EXPR, itype, t, n);
4193 t = fold_build2 (PLUS_EXPR, itype, q, t);
4194 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4195
4196 t = build2 (MULT_EXPR, itype, q, threadid);
4197 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4198
4199 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4200 t = fold_build2 (MIN_EXPR, itype, t, n);
4201 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4202
4203 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4204 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4205
4206 /* Remove the GIMPLE_OMP_FOR statement. */
4207 gsi_remove (&gsi, true);
4208
4209 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4210 gsi = gsi_start_bb (seq_start_bb);
4211
4212 t = fold_convert (itype, s0);
4213 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4214 if (POINTER_TYPE_P (type))
4215 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4216 fold_convert (sizetype, t));
4217 else
4218 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4219 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4220 false, GSI_CONTINUE_LINKING);
4221 stmt = gimple_build_assign (fd->loop.v, t);
4222 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4223
4224 t = fold_convert (itype, e0);
4225 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4226 if (POINTER_TYPE_P (type))
4227 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4228 fold_convert (sizetype, t));
4229 else
4230 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4231 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4232 false, GSI_CONTINUE_LINKING);
4233
4234 /* The code controlling the sequential loop replaces the
4235 GIMPLE_OMP_CONTINUE. */
4236 gsi = gsi_last_bb (cont_bb);
4237 stmt = gsi_stmt (gsi);
4238 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4239 vmain = gimple_omp_continue_control_use (stmt);
4240 vback = gimple_omp_continue_control_def (stmt);
4241
4242 if (POINTER_TYPE_P (type))
4243 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4244 fold_convert (sizetype, fd->loop.step));
4245 else
4246 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4247 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4248 true, GSI_SAME_STMT);
4249 stmt = gimple_build_assign (vback, t);
4250 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4251
4252 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4253 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4254
4255 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4256 gsi_remove (&gsi, true);
4257
4258 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4259 gsi = gsi_last_bb (exit_bb);
4260 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4261 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4262 false, GSI_SAME_STMT);
4263 gsi_remove (&gsi, true);
4264
4265 /* Connect all the blocks. */
4266 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4267 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4268
4269 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4270 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4271
4272 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4273 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4274 recompute_dominator (CDI_DOMINATORS, body_bb));
4275 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4276 recompute_dominator (CDI_DOMINATORS, fin_bb));
4277 }
4278
4279
4280 /* A subroutine of expand_omp_for. Generate code for a parallel
4281 loop with static schedule and a specified chunk size. Given
4282 parameters:
4283
4284 for (V = N1; V cond N2; V += STEP) BODY;
4285
4286 where COND is "<" or ">", we generate pseudocode
4287
4288 if (cond is <)
4289 adj = STEP - 1;
4290 else
4291 adj = STEP + 1;
4292 if ((__typeof (V)) -1 > 0 && cond is >)
4293 n = -(adj + N2 - N1) / -STEP;
4294 else
4295 n = (adj + N2 - N1) / STEP;
4296 trip = 0;
4297 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4298 here so that V is defined
4299 if the loop is not entered
4300 L0:
4301 s0 = (trip * nthreads + threadid) * CHUNK;
4302 e0 = min(s0 + CHUNK, n);
4303 if (s0 < n) goto L1; else goto L4;
4304 L1:
4305 V = s0 * STEP + N1;
4306 e = e0 * STEP + N1;
4307 L2:
4308 BODY;
4309 V += STEP;
4310 if (V cond e) goto L2; else goto L3;
4311 L3:
4312 trip += 1;
4313 goto L0;
4314 L4:
4315 */
4316
4317 static void
4318 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4319 {
4320 tree n, s0, e0, e, t;
4321 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4322 tree type, itype, v_main, v_back, v_extra;
4323 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4324 basic_block trip_update_bb, cont_bb, fin_bb;
4325 gimple_stmt_iterator si;
4326 gimple stmt;
4327 edge se;
4328
4329 itype = type = TREE_TYPE (fd->loop.v);
4330 if (POINTER_TYPE_P (type))
4331 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4332
4333 entry_bb = region->entry;
4334 se = split_block (entry_bb, last_stmt (entry_bb));
4335 entry_bb = se->src;
4336 iter_part_bb = se->dest;
4337 cont_bb = region->cont;
4338 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4339 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4340 == FALLTHRU_EDGE (cont_bb)->dest);
4341 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4342 body_bb = single_succ (seq_start_bb);
4343 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4344 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4345 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4346 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4347 exit_bb = region->exit;
4348
4349 /* Trip and adjustment setup goes in ENTRY_BB. */
4350 si = gsi_last_bb (entry_bb);
4351 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4352
4353 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4354 t = fold_convert (itype, t);
4355 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4356 true, GSI_SAME_STMT);
4357
4358 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4359 t = fold_convert (itype, t);
4360 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4361 true, GSI_SAME_STMT);
4362
4363 fd->loop.n1
4364 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4365 true, NULL_TREE, true, GSI_SAME_STMT);
4366 fd->loop.n2
4367 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4368 true, NULL_TREE, true, GSI_SAME_STMT);
4369 fd->loop.step
4370 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4371 true, NULL_TREE, true, GSI_SAME_STMT);
4372 fd->chunk_size
4373 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4374 true, NULL_TREE, true, GSI_SAME_STMT);
4375
4376 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4377 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4378 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4379 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4380 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4381 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4382 fold_build1 (NEGATE_EXPR, itype, t),
4383 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4384 else
4385 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4386 t = fold_convert (itype, t);
4387 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4388 true, GSI_SAME_STMT);
4389
4390 trip_var = create_tmp_var (itype, ".trip");
4391 if (gimple_in_ssa_p (cfun))
4392 {
4393 add_referenced_var (trip_var);
4394 trip_init = make_ssa_name (trip_var, NULL);
4395 trip_main = make_ssa_name (trip_var, NULL);
4396 trip_back = make_ssa_name (trip_var, NULL);
4397 }
4398 else
4399 {
4400 trip_init = trip_var;
4401 trip_main = trip_var;
4402 trip_back = trip_var;
4403 }
4404
4405 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4406 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4407
4408 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4409 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4410 if (POINTER_TYPE_P (type))
4411 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4412 fold_convert (sizetype, t));
4413 else
4414 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4415 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4416 true, GSI_SAME_STMT);
4417
4418 /* Remove the GIMPLE_OMP_FOR. */
4419 gsi_remove (&si, true);
4420
4421 /* Iteration space partitioning goes in ITER_PART_BB. */
4422 si = gsi_last_bb (iter_part_bb);
4423
4424 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4425 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4426 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4427 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4428 false, GSI_CONTINUE_LINKING);
4429
4430 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4431 t = fold_build2 (MIN_EXPR, itype, t, n);
4432 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4433 false, GSI_CONTINUE_LINKING);
4434
4435 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4436 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4437
4438 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4439 si = gsi_start_bb (seq_start_bb);
4440
4441 t = fold_convert (itype, s0);
4442 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4443 if (POINTER_TYPE_P (type))
4444 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4445 fold_convert (sizetype, t));
4446 else
4447 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4448 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4449 false, GSI_CONTINUE_LINKING);
4450 stmt = gimple_build_assign (fd->loop.v, t);
4451 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4452
4453 t = fold_convert (itype, e0);
4454 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4455 if (POINTER_TYPE_P (type))
4456 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4457 fold_convert (sizetype, t));
4458 else
4459 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4460 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4461 false, GSI_CONTINUE_LINKING);
4462
4463 /* The code controlling the sequential loop goes in CONT_BB,
4464 replacing the GIMPLE_OMP_CONTINUE. */
4465 si = gsi_last_bb (cont_bb);
4466 stmt = gsi_stmt (si);
4467 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4468 v_main = gimple_omp_continue_control_use (stmt);
4469 v_back = gimple_omp_continue_control_def (stmt);
4470
4471 if (POINTER_TYPE_P (type))
4472 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4473 fold_convert (sizetype, fd->loop.step));
4474 else
4475 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4476 stmt = gimple_build_assign (v_back, t);
4477 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4478
4479 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4480 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4481
4482 /* Remove GIMPLE_OMP_CONTINUE. */
4483 gsi_remove (&si, true);
4484
4485 /* Trip update code goes into TRIP_UPDATE_BB. */
4486 si = gsi_start_bb (trip_update_bb);
4487
4488 t = build_int_cst (itype, 1);
4489 t = build2 (PLUS_EXPR, itype, trip_main, t);
4490 stmt = gimple_build_assign (trip_back, t);
4491 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4492
4493 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4494 si = gsi_last_bb (exit_bb);
4495 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4496 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4497 false, GSI_SAME_STMT);
4498 gsi_remove (&si, true);
4499
4500 /* Connect the new blocks. */
4501 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4502 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4503
4504 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4505 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4506
4507 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4508
4509 if (gimple_in_ssa_p (cfun))
4510 {
4511 gimple_stmt_iterator psi;
4512 gimple phi;
4513 edge re, ene;
4514 edge_var_map_vector head;
4515 edge_var_map *vm;
4516 size_t i;
4517
4518 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4519 remove arguments of the phi nodes in fin_bb. We need to create
4520 appropriate phi nodes in iter_part_bb instead. */
4521 se = single_pred_edge (fin_bb);
4522 re = single_succ_edge (trip_update_bb);
4523 head = redirect_edge_var_map_vector (re);
4524 ene = single_succ_edge (entry_bb);
4525
4526 psi = gsi_start_phis (fin_bb);
4527 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4528 gsi_next (&psi), ++i)
4529 {
4530 gimple nphi;
4531 source_location locus;
4532
4533 phi = gsi_stmt (psi);
4534 t = gimple_phi_result (phi);
4535 gcc_assert (t == redirect_edge_var_map_result (vm));
4536 nphi = create_phi_node (t, iter_part_bb);
4537 SSA_NAME_DEF_STMT (t) = nphi;
4538
4539 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4540 locus = gimple_phi_arg_location_from_edge (phi, se);
4541
4542 /* A special case -- fd->loop.v is not yet computed in
4543 iter_part_bb, we need to use v_extra instead. */
4544 if (t == fd->loop.v)
4545 t = v_extra;
4546 add_phi_arg (nphi, t, ene, locus);
4547 locus = redirect_edge_var_map_location (vm);
4548 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4549 }
4550 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4551 redirect_edge_var_map_clear (re);
4552 while (1)
4553 {
4554 psi = gsi_start_phis (fin_bb);
4555 if (gsi_end_p (psi))
4556 break;
4557 remove_phi_node (&psi, false);
4558 }
4559
4560 /* Make phi node for trip. */
4561 phi = create_phi_node (trip_main, iter_part_bb);
4562 SSA_NAME_DEF_STMT (trip_main) = phi;
4563 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4564 UNKNOWN_LOCATION);
4565 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4566 UNKNOWN_LOCATION);
4567 }
4568
4569 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4570 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4571 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4572 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4573 recompute_dominator (CDI_DOMINATORS, fin_bb));
4574 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4575 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4576 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4577 recompute_dominator (CDI_DOMINATORS, body_bb));
4578 }
4579
4580
4581 /* Expand the OpenMP loop defined by REGION. */
4582
4583 static void
4584 expand_omp_for (struct omp_region *region)
4585 {
4586 struct omp_for_data fd;
4587 struct omp_for_data_loop *loops;
4588
4589 loops
4590 = (struct omp_for_data_loop *)
4591 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4592 * sizeof (struct omp_for_data_loop));
4593 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4594 region->sched_kind = fd.sched_kind;
4595
4596 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4597 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4598 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4599 if (region->cont)
4600 {
4601 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4602 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4603 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4604 }
4605
4606 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4607 && !fd.have_ordered
4608 && fd.collapse == 1
4609 && region->cont != NULL)
4610 {
4611 if (fd.chunk_size == NULL)
4612 expand_omp_for_static_nochunk (region, &fd);
4613 else
4614 expand_omp_for_static_chunk (region, &fd);
4615 }
4616 else
4617 {
4618 int fn_index, start_ix, next_ix;
4619
4620 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4621 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4622 ? 3 : fd.sched_kind;
4623 fn_index += fd.have_ordered * 4;
4624 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4625 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4626 if (fd.iter_type == long_long_unsigned_type_node)
4627 {
4628 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4629 - BUILT_IN_GOMP_LOOP_STATIC_START;
4630 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4631 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4632 }
4633 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4634 (enum built_in_function) next_ix);
4635 }
4636
4637 update_ssa (TODO_update_ssa_only_virtuals);
4638 }
4639
4640
4641 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4642
4643 v = GOMP_sections_start (n);
4644 L0:
4645 switch (v)
4646 {
4647 case 0:
4648 goto L2;
4649 case 1:
4650 section 1;
4651 goto L1;
4652 case 2:
4653 ...
4654 case n:
4655 ...
4656 default:
4657 abort ();
4658 }
4659 L1:
4660 v = GOMP_sections_next ();
4661 goto L0;
4662 L2:
4663 reduction;
4664
4665 If this is a combined parallel sections, replace the call to
4666 GOMP_sections_start with call to GOMP_sections_next. */
4667
4668 static void
4669 expand_omp_sections (struct omp_region *region)
4670 {
4671 tree t, u, vin = NULL, vmain, vnext, l2;
4672 VEC (tree,heap) *label_vec;
4673 unsigned len;
4674 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4675 gimple_stmt_iterator si, switch_si;
4676 gimple sections_stmt, stmt, cont;
4677 edge_iterator ei;
4678 edge e;
4679 struct omp_region *inner;
4680 unsigned i, casei;
4681 bool exit_reachable = region->cont != NULL;
4682
4683 gcc_assert (exit_reachable == (region->exit != NULL));
4684 entry_bb = region->entry;
4685 l0_bb = single_succ (entry_bb);
4686 l1_bb = region->cont;
4687 l2_bb = region->exit;
4688 if (exit_reachable)
4689 {
4690 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4691 l2 = gimple_block_label (l2_bb);
4692 else
4693 {
4694 /* This can happen if there are reductions. */
4695 len = EDGE_COUNT (l0_bb->succs);
4696 gcc_assert (len > 0);
4697 e = EDGE_SUCC (l0_bb, len - 1);
4698 si = gsi_last_bb (e->dest);
4699 l2 = NULL_TREE;
4700 if (gsi_end_p (si)
4701 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4702 l2 = gimple_block_label (e->dest);
4703 else
4704 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4705 {
4706 si = gsi_last_bb (e->dest);
4707 if (gsi_end_p (si)
4708 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4709 {
4710 l2 = gimple_block_label (e->dest);
4711 break;
4712 }
4713 }
4714 }
4715 default_bb = create_empty_bb (l1_bb->prev_bb);
4716 }
4717 else
4718 {
4719 default_bb = create_empty_bb (l0_bb);
4720 l2 = gimple_block_label (default_bb);
4721 }
4722
4723 /* We will build a switch() with enough cases for all the
4724 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4725 and a default case to abort if something goes wrong. */
4726 len = EDGE_COUNT (l0_bb->succs);
4727
4728 /* Use VEC_quick_push on label_vec throughout, since we know the size
4729 in advance. */
4730 label_vec = VEC_alloc (tree, heap, len);
4731
4732 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4733 GIMPLE_OMP_SECTIONS statement. */
4734 si = gsi_last_bb (entry_bb);
4735 sections_stmt = gsi_stmt (si);
4736 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4737 vin = gimple_omp_sections_control (sections_stmt);
4738 if (!is_combined_parallel (region))
4739 {
4740 /* If we are not inside a combined parallel+sections region,
4741 call GOMP_sections_start. */
4742 t = build_int_cst (unsigned_type_node,
4743 exit_reachable ? len - 1 : len);
4744 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4745 stmt = gimple_build_call (u, 1, t);
4746 }
4747 else
4748 {
4749 /* Otherwise, call GOMP_sections_next. */
4750 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4751 stmt = gimple_build_call (u, 0);
4752 }
4753 gimple_call_set_lhs (stmt, vin);
4754 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4755 gsi_remove (&si, true);
4756
4757 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4758 L0_BB. */
4759 switch_si = gsi_last_bb (l0_bb);
4760 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4761 if (exit_reachable)
4762 {
4763 cont = last_stmt (l1_bb);
4764 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4765 vmain = gimple_omp_continue_control_use (cont);
4766 vnext = gimple_omp_continue_control_def (cont);
4767 }
4768 else
4769 {
4770 vmain = vin;
4771 vnext = NULL_TREE;
4772 }
4773
4774 i = 0;
4775 if (exit_reachable)
4776 {
4777 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
4778 VEC_quick_push (tree, label_vec, t);
4779 i++;
4780 }
4781
4782 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4783 for (inner = region->inner, casei = 1;
4784 inner;
4785 inner = inner->next, i++, casei++)
4786 {
4787 basic_block s_entry_bb, s_exit_bb;
4788
4789 /* Skip optional reduction region. */
4790 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4791 {
4792 --i;
4793 --casei;
4794 continue;
4795 }
4796
4797 s_entry_bb = inner->entry;
4798 s_exit_bb = inner->exit;
4799
4800 t = gimple_block_label (s_entry_bb);
4801 u = build_int_cst (unsigned_type_node, casei);
4802 u = build_case_label (u, NULL, t);
4803 VEC_quick_push (tree, label_vec, u);
4804
4805 si = gsi_last_bb (s_entry_bb);
4806 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4807 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4808 gsi_remove (&si, true);
4809 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4810
4811 if (s_exit_bb == NULL)
4812 continue;
4813
4814 si = gsi_last_bb (s_exit_bb);
4815 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4816 gsi_remove (&si, true);
4817
4818 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4819 }
4820
4821 /* Error handling code goes in DEFAULT_BB. */
4822 t = gimple_block_label (default_bb);
4823 u = build_case_label (NULL, NULL, t);
4824 make_edge (l0_bb, default_bb, 0);
4825
4826 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4827 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4828 gsi_remove (&switch_si, true);
4829 VEC_free (tree, heap, label_vec);
4830
4831 si = gsi_start_bb (default_bb);
4832 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4833 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4834
4835 if (exit_reachable)
4836 {
4837 /* Code to get the next section goes in L1_BB. */
4838 si = gsi_last_bb (l1_bb);
4839 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4840
4841 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4842 gimple_call_set_lhs (stmt, vnext);
4843 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4844 gsi_remove (&si, true);
4845
4846 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4847
4848 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4849 si = gsi_last_bb (l2_bb);
4850 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4851 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4852 else
4853 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4854 stmt = gimple_build_call (t, 0);
4855 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4856 gsi_remove (&si, true);
4857 }
4858
4859 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4860 }
4861
4862
4863 /* Expand code for an OpenMP single directive. We've already expanded
4864 much of the code, here we simply place the GOMP_barrier call. */
4865
4866 static void
4867 expand_omp_single (struct omp_region *region)
4868 {
4869 basic_block entry_bb, exit_bb;
4870 gimple_stmt_iterator si;
4871 bool need_barrier = false;
4872
4873 entry_bb = region->entry;
4874 exit_bb = region->exit;
4875
4876 si = gsi_last_bb (entry_bb);
4877 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4878 be removed. We need to ensure that the thread that entered the single
4879 does not exit before the data is copied out by the other threads. */
4880 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4881 OMP_CLAUSE_COPYPRIVATE))
4882 need_barrier = true;
4883 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4884 gsi_remove (&si, true);
4885 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4886
4887 si = gsi_last_bb (exit_bb);
4888 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4889 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4890 false, GSI_SAME_STMT);
4891 gsi_remove (&si, true);
4892 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4893 }
4894
4895
4896 /* Generic expansion for OpenMP synchronization directives: master,
4897 ordered and critical. All we need to do here is remove the entry
4898 and exit markers for REGION. */
4899
4900 static void
4901 expand_omp_synch (struct omp_region *region)
4902 {
4903 basic_block entry_bb, exit_bb;
4904 gimple_stmt_iterator si;
4905
4906 entry_bb = region->entry;
4907 exit_bb = region->exit;
4908
4909 si = gsi_last_bb (entry_bb);
4910 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4911 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4912 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4913 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4914 gsi_remove (&si, true);
4915 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4916
4917 if (exit_bb)
4918 {
4919 si = gsi_last_bb (exit_bb);
4920 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4921 gsi_remove (&si, true);
4922 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4923 }
4924 }
4925
4926 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4927 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4928 size of the data type, and thus usable to find the index of the builtin
4929 decl. Returns false if the expression is not of the proper form. */
4930
4931 static bool
4932 expand_omp_atomic_fetch_op (basic_block load_bb,
4933 tree addr, tree loaded_val,
4934 tree stored_val, int index)
4935 {
4936 enum built_in_function base;
4937 tree decl, itype, call;
4938 direct_optab optab;
4939 tree rhs;
4940 basic_block store_bb = single_succ (load_bb);
4941 gimple_stmt_iterator gsi;
4942 gimple stmt;
4943 location_t loc;
4944
4945 /* We expect to find the following sequences:
4946
4947 load_bb:
4948 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4949
4950 store_bb:
4951 val = tmp OP something; (or: something OP tmp)
4952 GIMPLE_OMP_STORE (val)
4953
4954 ???FIXME: Allow a more flexible sequence.
4955 Perhaps use data flow to pick the statements.
4956
4957 */
4958
4959 gsi = gsi_after_labels (store_bb);
4960 stmt = gsi_stmt (gsi);
4961 loc = gimple_location (stmt);
4962 if (!is_gimple_assign (stmt))
4963 return false;
4964 gsi_next (&gsi);
4965 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4966 return false;
4967
4968 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4969 return false;
4970
4971 /* Check for one of the supported fetch-op operations. */
4972 switch (gimple_assign_rhs_code (stmt))
4973 {
4974 case PLUS_EXPR:
4975 case POINTER_PLUS_EXPR:
4976 base = BUILT_IN_FETCH_AND_ADD_N;
4977 optab = sync_add_optab;
4978 break;
4979 case MINUS_EXPR:
4980 base = BUILT_IN_FETCH_AND_SUB_N;
4981 optab = sync_add_optab;
4982 break;
4983 case BIT_AND_EXPR:
4984 base = BUILT_IN_FETCH_AND_AND_N;
4985 optab = sync_and_optab;
4986 break;
4987 case BIT_IOR_EXPR:
4988 base = BUILT_IN_FETCH_AND_OR_N;
4989 optab = sync_ior_optab;
4990 break;
4991 case BIT_XOR_EXPR:
4992 base = BUILT_IN_FETCH_AND_XOR_N;
4993 optab = sync_xor_optab;
4994 break;
4995 default:
4996 return false;
4997 }
4998 /* Make sure the expression is of the proper form. */
4999 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5000 rhs = gimple_assign_rhs2 (stmt);
5001 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5002 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5003 rhs = gimple_assign_rhs1 (stmt);
5004 else
5005 return false;
5006
5007 decl = built_in_decls[base + index + 1];
5008 if (decl == NULL_TREE)
5009 return false;
5010 itype = TREE_TYPE (TREE_TYPE (decl));
5011
5012 if (direct_optab_handler (optab, TYPE_MODE (itype)) == CODE_FOR_nothing)
5013 return false;
5014
5015 gsi = gsi_last_bb (load_bb);
5016 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5017 call = build_call_expr_loc (loc,
5018 decl, 2, addr,
5019 fold_convert_loc (loc, itype, rhs));
5020 call = fold_convert_loc (loc, void_type_node, call);
5021 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5022 gsi_remove (&gsi, true);
5023
5024 gsi = gsi_last_bb (store_bb);
5025 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5026 gsi_remove (&gsi, true);
5027 gsi = gsi_last_bb (store_bb);
5028 gsi_remove (&gsi, true);
5029
5030 if (gimple_in_ssa_p (cfun))
5031 update_ssa (TODO_update_ssa_no_phi);
5032
5033 return true;
5034 }
5035
5036 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5037
5038 oldval = *addr;
5039 repeat:
5040 newval = rhs; // with oldval replacing *addr in rhs
5041 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5042 if (oldval != newval)
5043 goto repeat;
5044
5045 INDEX is log2 of the size of the data type, and thus usable to find the
5046 index of the builtin decl. */
5047
5048 static bool
5049 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5050 tree addr, tree loaded_val, tree stored_val,
5051 int index)
5052 {
5053 tree loadedi, storedi, initial, new_storedi, old_vali;
5054 tree type, itype, cmpxchg, iaddr;
5055 gimple_stmt_iterator si;
5056 basic_block loop_header = single_succ (load_bb);
5057 gimple phi, stmt;
5058 edge e;
5059
5060 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
5061 if (cmpxchg == NULL_TREE)
5062 return false;
5063 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5064 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5065
5066 if (direct_optab_handler (sync_compare_and_swap_optab, TYPE_MODE (itype))
5067 == CODE_FOR_nothing)
5068 return false;
5069
5070 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5071 si = gsi_last_bb (load_bb);
5072 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5073
5074 /* For floating-point values, we'll need to view-convert them to integers
5075 so that we can perform the atomic compare and swap. Simplify the
5076 following code by always setting up the "i"ntegral variables. */
5077 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5078 {
5079 tree iaddr_val;
5080
5081 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5082 true), NULL);
5083 iaddr_val
5084 = force_gimple_operand_gsi (&si,
5085 fold_convert (TREE_TYPE (iaddr), addr),
5086 false, NULL_TREE, true, GSI_SAME_STMT);
5087 stmt = gimple_build_assign (iaddr, iaddr_val);
5088 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5089 loadedi = create_tmp_var (itype, NULL);
5090 if (gimple_in_ssa_p (cfun))
5091 {
5092 add_referenced_var (iaddr);
5093 add_referenced_var (loadedi);
5094 loadedi = make_ssa_name (loadedi, NULL);
5095 }
5096 }
5097 else
5098 {
5099 iaddr = addr;
5100 loadedi = loaded_val;
5101 }
5102
5103 initial
5104 = force_gimple_operand_gsi (&si,
5105 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5106 iaddr,
5107 build_int_cst (TREE_TYPE (iaddr), 0)),
5108 true, NULL_TREE, true, GSI_SAME_STMT);
5109
5110 /* Move the value to the LOADEDI temporary. */
5111 if (gimple_in_ssa_p (cfun))
5112 {
5113 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5114 phi = create_phi_node (loadedi, loop_header);
5115 SSA_NAME_DEF_STMT (loadedi) = phi;
5116 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5117 initial);
5118 }
5119 else
5120 gsi_insert_before (&si,
5121 gimple_build_assign (loadedi, initial),
5122 GSI_SAME_STMT);
5123 if (loadedi != loaded_val)
5124 {
5125 gimple_stmt_iterator gsi2;
5126 tree x;
5127
5128 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5129 gsi2 = gsi_start_bb (loop_header);
5130 if (gimple_in_ssa_p (cfun))
5131 {
5132 gimple stmt;
5133 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5134 true, GSI_SAME_STMT);
5135 stmt = gimple_build_assign (loaded_val, x);
5136 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5137 }
5138 else
5139 {
5140 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5141 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5142 true, GSI_SAME_STMT);
5143 }
5144 }
5145 gsi_remove (&si, true);
5146
5147 si = gsi_last_bb (store_bb);
5148 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5149
5150 if (iaddr == addr)
5151 storedi = stored_val;
5152 else
5153 storedi =
5154 force_gimple_operand_gsi (&si,
5155 build1 (VIEW_CONVERT_EXPR, itype,
5156 stored_val), true, NULL_TREE, true,
5157 GSI_SAME_STMT);
5158
5159 /* Build the compare&swap statement. */
5160 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5161 new_storedi = force_gimple_operand_gsi (&si,
5162 fold_convert (TREE_TYPE (loadedi),
5163 new_storedi),
5164 true, NULL_TREE,
5165 true, GSI_SAME_STMT);
5166
5167 if (gimple_in_ssa_p (cfun))
5168 old_vali = loadedi;
5169 else
5170 {
5171 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5172 if (gimple_in_ssa_p (cfun))
5173 add_referenced_var (old_vali);
5174 stmt = gimple_build_assign (old_vali, loadedi);
5175 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5176
5177 stmt = gimple_build_assign (loadedi, new_storedi);
5178 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5179 }
5180
5181 /* Note that we always perform the comparison as an integer, even for
5182 floating point. This allows the atomic operation to properly
5183 succeed even with NaNs and -0.0. */
5184 stmt = gimple_build_cond_empty
5185 (build2 (NE_EXPR, boolean_type_node,
5186 new_storedi, old_vali));
5187 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5188
5189 /* Update cfg. */
5190 e = single_succ_edge (store_bb);
5191 e->flags &= ~EDGE_FALLTHRU;
5192 e->flags |= EDGE_FALSE_VALUE;
5193
5194 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5195
5196 /* Copy the new value to loadedi (we already did that before the condition
5197 if we are not in SSA). */
5198 if (gimple_in_ssa_p (cfun))
5199 {
5200 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5201 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5202 }
5203
5204 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5205 gsi_remove (&si, true);
5206
5207 if (gimple_in_ssa_p (cfun))
5208 update_ssa (TODO_update_ssa_no_phi);
5209
5210 return true;
5211 }
5212
5213 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5214
5215 GOMP_atomic_start ();
5216 *addr = rhs;
5217 GOMP_atomic_end ();
5218
5219 The result is not globally atomic, but works so long as all parallel
5220 references are within #pragma omp atomic directives. According to
5221 responses received from omp@openmp.org, appears to be within spec.
5222 Which makes sense, since that's how several other compilers handle
5223 this situation as well.
5224 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5225 expanding. STORED_VAL is the operand of the matching
5226 GIMPLE_OMP_ATOMIC_STORE.
5227
5228 We replace
5229 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5230 loaded_val = *addr;
5231
5232 and replace
5233 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5234 *addr = stored_val;
5235 */
5236
5237 static bool
5238 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5239 tree addr, tree loaded_val, tree stored_val)
5240 {
5241 gimple_stmt_iterator si;
5242 gimple stmt;
5243 tree t;
5244
5245 si = gsi_last_bb (load_bb);
5246 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5247
5248 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5249 t = build_call_expr (t, 0);
5250 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5251
5252 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5253 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5254 gsi_remove (&si, true);
5255
5256 si = gsi_last_bb (store_bb);
5257 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5258
5259 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5260 stored_val);
5261 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5262
5263 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5264 t = build_call_expr (t, 0);
5265 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5266 gsi_remove (&si, true);
5267
5268 if (gimple_in_ssa_p (cfun))
5269 update_ssa (TODO_update_ssa_no_phi);
5270 return true;
5271 }
5272
5273 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5274 using expand_omp_atomic_fetch_op. If it failed, we try to
5275 call expand_omp_atomic_pipeline, and if it fails too, the
5276 ultimate fallback is wrapping the operation in a mutex
5277 (expand_omp_atomic_mutex). REGION is the atomic region built
5278 by build_omp_regions_1(). */
5279
5280 static void
5281 expand_omp_atomic (struct omp_region *region)
5282 {
5283 basic_block load_bb = region->entry, store_bb = region->exit;
5284 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5285 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5286 tree addr = gimple_omp_atomic_load_rhs (load);
5287 tree stored_val = gimple_omp_atomic_store_val (store);
5288 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5289 HOST_WIDE_INT index;
5290
5291 /* Make sure the type is one of the supported sizes. */
5292 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5293 index = exact_log2 (index);
5294 if (index >= 0 && index <= 4)
5295 {
5296 unsigned int align = TYPE_ALIGN_UNIT (type);
5297
5298 /* __sync builtins require strict data alignment. */
5299 if (exact_log2 (align) >= index)
5300 {
5301 /* When possible, use specialized atomic update functions. */
5302 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5303 && store_bb == single_succ (load_bb))
5304 {
5305 if (expand_omp_atomic_fetch_op (load_bb, addr,
5306 loaded_val, stored_val, index))
5307 return;
5308 }
5309
5310 /* If we don't have specialized __sync builtins, try and implement
5311 as a compare and swap loop. */
5312 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5313 loaded_val, stored_val, index))
5314 return;
5315 }
5316 }
5317
5318 /* The ultimate fallback is wrapping the operation in a mutex. */
5319 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5320 }
5321
5322
5323 /* Expand the parallel region tree rooted at REGION. Expansion
5324 proceeds in depth-first order. Innermost regions are expanded
5325 first. This way, parallel regions that require a new function to
5326 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5327 internal dependencies in their body. */
5328
5329 static void
5330 expand_omp (struct omp_region *region)
5331 {
5332 while (region)
5333 {
5334 location_t saved_location;
5335
5336 /* First, determine whether this is a combined parallel+workshare
5337 region. */
5338 if (region->type == GIMPLE_OMP_PARALLEL)
5339 determine_parallel_type (region);
5340
5341 if (region->inner)
5342 expand_omp (region->inner);
5343
5344 saved_location = input_location;
5345 if (gimple_has_location (last_stmt (region->entry)))
5346 input_location = gimple_location (last_stmt (region->entry));
5347
5348 switch (region->type)
5349 {
5350 case GIMPLE_OMP_PARALLEL:
5351 case GIMPLE_OMP_TASK:
5352 expand_omp_taskreg (region);
5353 break;
5354
5355 case GIMPLE_OMP_FOR:
5356 expand_omp_for (region);
5357 break;
5358
5359 case GIMPLE_OMP_SECTIONS:
5360 expand_omp_sections (region);
5361 break;
5362
5363 case GIMPLE_OMP_SECTION:
5364 /* Individual omp sections are handled together with their
5365 parent GIMPLE_OMP_SECTIONS region. */
5366 break;
5367
5368 case GIMPLE_OMP_SINGLE:
5369 expand_omp_single (region);
5370 break;
5371
5372 case GIMPLE_OMP_MASTER:
5373 case GIMPLE_OMP_ORDERED:
5374 case GIMPLE_OMP_CRITICAL:
5375 expand_omp_synch (region);
5376 break;
5377
5378 case GIMPLE_OMP_ATOMIC_LOAD:
5379 expand_omp_atomic (region);
5380 break;
5381
5382 default:
5383 gcc_unreachable ();
5384 }
5385
5386 input_location = saved_location;
5387 region = region->next;
5388 }
5389 }
5390
5391
5392 /* Helper for build_omp_regions. Scan the dominator tree starting at
5393 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5394 true, the function ends once a single tree is built (otherwise, whole
5395 forest of OMP constructs may be built). */
5396
5397 static void
5398 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5399 bool single_tree)
5400 {
5401 gimple_stmt_iterator gsi;
5402 gimple stmt;
5403 basic_block son;
5404
5405 gsi = gsi_last_bb (bb);
5406 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5407 {
5408 struct omp_region *region;
5409 enum gimple_code code;
5410
5411 stmt = gsi_stmt (gsi);
5412 code = gimple_code (stmt);
5413 if (code == GIMPLE_OMP_RETURN)
5414 {
5415 /* STMT is the return point out of region PARENT. Mark it
5416 as the exit point and make PARENT the immediately
5417 enclosing region. */
5418 gcc_assert (parent);
5419 region = parent;
5420 region->exit = bb;
5421 parent = parent->outer;
5422 }
5423 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5424 {
5425 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5426 GIMPLE_OMP_RETURN, but matches with
5427 GIMPLE_OMP_ATOMIC_LOAD. */
5428 gcc_assert (parent);
5429 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5430 region = parent;
5431 region->exit = bb;
5432 parent = parent->outer;
5433 }
5434
5435 else if (code == GIMPLE_OMP_CONTINUE)
5436 {
5437 gcc_assert (parent);
5438 parent->cont = bb;
5439 }
5440 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5441 {
5442 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5443 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5444 ;
5445 }
5446 else
5447 {
5448 /* Otherwise, this directive becomes the parent for a new
5449 region. */
5450 region = new_omp_region (bb, code, parent);
5451 parent = region;
5452 }
5453 }
5454
5455 if (single_tree && !parent)
5456 return;
5457
5458 for (son = first_dom_son (CDI_DOMINATORS, bb);
5459 son;
5460 son = next_dom_son (CDI_DOMINATORS, son))
5461 build_omp_regions_1 (son, parent, single_tree);
5462 }
5463
5464 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5465 root_omp_region. */
5466
5467 static void
5468 build_omp_regions_root (basic_block root)
5469 {
5470 gcc_assert (root_omp_region == NULL);
5471 build_omp_regions_1 (root, NULL, true);
5472 gcc_assert (root_omp_region != NULL);
5473 }
5474
5475 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5476
5477 void
5478 omp_expand_local (basic_block head)
5479 {
5480 build_omp_regions_root (head);
5481 if (dump_file && (dump_flags & TDF_DETAILS))
5482 {
5483 fprintf (dump_file, "\nOMP region tree\n\n");
5484 dump_omp_region (dump_file, root_omp_region, 0);
5485 fprintf (dump_file, "\n");
5486 }
5487
5488 remove_exit_barriers (root_omp_region);
5489 expand_omp (root_omp_region);
5490
5491 free_omp_regions ();
5492 }
5493
5494 /* Scan the CFG and build a tree of OMP regions. Return the root of
5495 the OMP region tree. */
5496
5497 static void
5498 build_omp_regions (void)
5499 {
5500 gcc_assert (root_omp_region == NULL);
5501 calculate_dominance_info (CDI_DOMINATORS);
5502 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5503 }
5504
5505 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5506
5507 static unsigned int
5508 execute_expand_omp (void)
5509 {
5510 build_omp_regions ();
5511
5512 if (!root_omp_region)
5513 return 0;
5514
5515 if (dump_file)
5516 {
5517 fprintf (dump_file, "\nOMP region tree\n\n");
5518 dump_omp_region (dump_file, root_omp_region, 0);
5519 fprintf (dump_file, "\n");
5520 }
5521
5522 remove_exit_barriers (root_omp_region);
5523
5524 expand_omp (root_omp_region);
5525
5526 cleanup_tree_cfg ();
5527
5528 free_omp_regions ();
5529
5530 return 0;
5531 }
5532
5533 /* OMP expansion -- the default pass, run before creation of SSA form. */
5534
5535 static bool
5536 gate_expand_omp (void)
5537 {
5538 return (flag_openmp != 0 && !seen_error ());
5539 }
5540
5541 struct gimple_opt_pass pass_expand_omp =
5542 {
5543 {
5544 GIMPLE_PASS,
5545 "ompexp", /* name */
5546 gate_expand_omp, /* gate */
5547 execute_expand_omp, /* execute */
5548 NULL, /* sub */
5549 NULL, /* next */
5550 0, /* static_pass_number */
5551 TV_NONE, /* tv_id */
5552 PROP_gimple_any, /* properties_required */
5553 0, /* properties_provided */
5554 0, /* properties_destroyed */
5555 0, /* todo_flags_start */
5556 0 /* todo_flags_finish */
5557 }
5558 };
5559 \f
5560 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5561
5562 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5563 CTX is the enclosing OMP context for the current statement. */
5564
5565 static void
5566 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5567 {
5568 tree block, control;
5569 gimple_stmt_iterator tgsi;
5570 unsigned i, len;
5571 gimple stmt, new_stmt, bind, t;
5572 gimple_seq ilist, dlist, olist, new_body, body;
5573 struct gimplify_ctx gctx;
5574
5575 stmt = gsi_stmt (*gsi_p);
5576
5577 push_gimplify_context (&gctx);
5578
5579 dlist = NULL;
5580 ilist = NULL;
5581 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5582 &ilist, &dlist, ctx);
5583
5584 tgsi = gsi_start (gimple_omp_body (stmt));
5585 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5586 continue;
5587
5588 tgsi = gsi_start (gimple_omp_body (stmt));
5589 body = NULL;
5590 for (i = 0; i < len; i++, gsi_next (&tgsi))
5591 {
5592 omp_context *sctx;
5593 gimple sec_start;
5594
5595 sec_start = gsi_stmt (tgsi);
5596 sctx = maybe_lookup_ctx (sec_start);
5597 gcc_assert (sctx);
5598
5599 gimple_seq_add_stmt (&body, sec_start);
5600
5601 lower_omp (gimple_omp_body (sec_start), sctx);
5602 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5603 gimple_omp_set_body (sec_start, NULL);
5604
5605 if (i == len - 1)
5606 {
5607 gimple_seq l = NULL;
5608 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5609 &l, ctx);
5610 gimple_seq_add_seq (&body, l);
5611 gimple_omp_section_set_last (sec_start);
5612 }
5613
5614 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5615 }
5616
5617 block = make_node (BLOCK);
5618 bind = gimple_build_bind (NULL, body, block);
5619
5620 olist = NULL;
5621 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5622
5623 block = make_node (BLOCK);
5624 new_stmt = gimple_build_bind (NULL, NULL, block);
5625
5626 pop_gimplify_context (new_stmt);
5627 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5628 BLOCK_VARS (block) = gimple_bind_vars (bind);
5629 if (BLOCK_VARS (block))
5630 TREE_USED (block) = 1;
5631
5632 new_body = NULL;
5633 gimple_seq_add_seq (&new_body, ilist);
5634 gimple_seq_add_stmt (&new_body, stmt);
5635 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5636 gimple_seq_add_stmt (&new_body, bind);
5637
5638 control = create_tmp_var (unsigned_type_node, ".section");
5639 t = gimple_build_omp_continue (control, control);
5640 gimple_omp_sections_set_control (stmt, control);
5641 gimple_seq_add_stmt (&new_body, t);
5642
5643 gimple_seq_add_seq (&new_body, olist);
5644 gimple_seq_add_seq (&new_body, dlist);
5645
5646 new_body = maybe_catch_exception (new_body);
5647
5648 t = gimple_build_omp_return
5649 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5650 OMP_CLAUSE_NOWAIT));
5651 gimple_seq_add_stmt (&new_body, t);
5652
5653 gimple_bind_set_body (new_stmt, new_body);
5654 gimple_omp_set_body (stmt, NULL);
5655
5656 gsi_replace (gsi_p, new_stmt, true);
5657 }
5658
5659
5660 /* A subroutine of lower_omp_single. Expand the simple form of
5661 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5662
5663 if (GOMP_single_start ())
5664 BODY;
5665 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5666
5667 FIXME. It may be better to delay expanding the logic of this until
5668 pass_expand_omp. The expanded logic may make the job more difficult
5669 to a synchronization analysis pass. */
5670
5671 static void
5672 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5673 {
5674 location_t loc = gimple_location (single_stmt);
5675 tree tlabel = create_artificial_label (loc);
5676 tree flabel = create_artificial_label (loc);
5677 gimple call, cond;
5678 tree lhs, decl;
5679
5680 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5681 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5682 call = gimple_build_call (decl, 0);
5683 gimple_call_set_lhs (call, lhs);
5684 gimple_seq_add_stmt (pre_p, call);
5685
5686 cond = gimple_build_cond (EQ_EXPR, lhs,
5687 fold_convert_loc (loc, TREE_TYPE (lhs),
5688 boolean_true_node),
5689 tlabel, flabel);
5690 gimple_seq_add_stmt (pre_p, cond);
5691 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5692 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5693 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5694 }
5695
5696
5697 /* A subroutine of lower_omp_single. Expand the simple form of
5698 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5699
5700 #pragma omp single copyprivate (a, b, c)
5701
5702 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5703
5704 {
5705 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5706 {
5707 BODY;
5708 copyout.a = a;
5709 copyout.b = b;
5710 copyout.c = c;
5711 GOMP_single_copy_end (&copyout);
5712 }
5713 else
5714 {
5715 a = copyout_p->a;
5716 b = copyout_p->b;
5717 c = copyout_p->c;
5718 }
5719 GOMP_barrier ();
5720 }
5721
5722 FIXME. It may be better to delay expanding the logic of this until
5723 pass_expand_omp. The expanded logic may make the job more difficult
5724 to a synchronization analysis pass. */
5725
5726 static void
5727 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5728 {
5729 tree ptr_type, t, l0, l1, l2;
5730 gimple_seq copyin_seq;
5731 location_t loc = gimple_location (single_stmt);
5732
5733 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5734
5735 ptr_type = build_pointer_type (ctx->record_type);
5736 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5737
5738 l0 = create_artificial_label (loc);
5739 l1 = create_artificial_label (loc);
5740 l2 = create_artificial_label (loc);
5741
5742 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5743 t = fold_convert_loc (loc, ptr_type, t);
5744 gimplify_assign (ctx->receiver_decl, t, pre_p);
5745
5746 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5747 build_int_cst (ptr_type, 0));
5748 t = build3 (COND_EXPR, void_type_node, t,
5749 build_and_jump (&l0), build_and_jump (&l1));
5750 gimplify_and_add (t, pre_p);
5751
5752 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5753
5754 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5755
5756 copyin_seq = NULL;
5757 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5758 &copyin_seq, ctx);
5759
5760 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5761 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
5762 1, t);
5763 gimplify_and_add (t, pre_p);
5764
5765 t = build_and_jump (&l2);
5766 gimplify_and_add (t, pre_p);
5767
5768 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5769
5770 gimple_seq_add_seq (pre_p, copyin_seq);
5771
5772 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5773 }
5774
5775
5776 /* Expand code for an OpenMP single directive. */
5777
5778 static void
5779 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5780 {
5781 tree block;
5782 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5783 gimple_seq bind_body, dlist;
5784 struct gimplify_ctx gctx;
5785
5786 push_gimplify_context (&gctx);
5787
5788 bind_body = NULL;
5789 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5790 &bind_body, &dlist, ctx);
5791 lower_omp (gimple_omp_body (single_stmt), ctx);
5792
5793 gimple_seq_add_stmt (&bind_body, single_stmt);
5794
5795 if (ctx->record_type)
5796 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5797 else
5798 lower_omp_single_simple (single_stmt, &bind_body);
5799
5800 gimple_omp_set_body (single_stmt, NULL);
5801
5802 gimple_seq_add_seq (&bind_body, dlist);
5803
5804 bind_body = maybe_catch_exception (bind_body);
5805
5806 t = gimple_build_omp_return
5807 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5808 OMP_CLAUSE_NOWAIT));
5809 gimple_seq_add_stmt (&bind_body, t);
5810
5811 block = make_node (BLOCK);
5812 bind = gimple_build_bind (NULL, bind_body, block);
5813
5814 pop_gimplify_context (bind);
5815
5816 gimple_bind_append_vars (bind, ctx->block_vars);
5817 BLOCK_VARS (block) = ctx->block_vars;
5818 gsi_replace (gsi_p, bind, true);
5819 if (BLOCK_VARS (block))
5820 TREE_USED (block) = 1;
5821 }
5822
5823
5824 /* Expand code for an OpenMP master directive. */
5825
5826 static void
5827 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5828 {
5829 tree block, lab = NULL, x;
5830 gimple stmt = gsi_stmt (*gsi_p), bind;
5831 location_t loc = gimple_location (stmt);
5832 gimple_seq tseq;
5833 struct gimplify_ctx gctx;
5834
5835 push_gimplify_context (&gctx);
5836
5837 block = make_node (BLOCK);
5838 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5839 block);
5840
5841 x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5842 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5843 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5844 tseq = NULL;
5845 gimplify_and_add (x, &tseq);
5846 gimple_bind_add_seq (bind, tseq);
5847
5848 lower_omp (gimple_omp_body (stmt), ctx);
5849 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5850 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5851 gimple_omp_set_body (stmt, NULL);
5852
5853 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5854
5855 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5856
5857 pop_gimplify_context (bind);
5858
5859 gimple_bind_append_vars (bind, ctx->block_vars);
5860 BLOCK_VARS (block) = ctx->block_vars;
5861 gsi_replace (gsi_p, bind, true);
5862 }
5863
5864
5865 /* Expand code for an OpenMP ordered directive. */
5866
5867 static void
5868 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5869 {
5870 tree block;
5871 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5872 struct gimplify_ctx gctx;
5873
5874 push_gimplify_context (&gctx);
5875
5876 block = make_node (BLOCK);
5877 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5878 block);
5879
5880 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5881 gimple_bind_add_stmt (bind, x);
5882
5883 lower_omp (gimple_omp_body (stmt), ctx);
5884 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5885 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5886 gimple_omp_set_body (stmt, NULL);
5887
5888 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5889 gimple_bind_add_stmt (bind, x);
5890
5891 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5892
5893 pop_gimplify_context (bind);
5894
5895 gimple_bind_append_vars (bind, ctx->block_vars);
5896 BLOCK_VARS (block) = gimple_bind_vars (bind);
5897 gsi_replace (gsi_p, bind, true);
5898 }
5899
5900
5901 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5902 substitution of a couple of function calls. But in the NAMED case,
5903 requires that languages coordinate a symbol name. It is therefore
5904 best put here in common code. */
5905
5906 static GTY((param1_is (tree), param2_is (tree)))
5907 splay_tree critical_name_mutexes;
5908
5909 static void
5910 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5911 {
5912 tree block;
5913 tree name, lock, unlock;
5914 gimple stmt = gsi_stmt (*gsi_p), bind;
5915 location_t loc = gimple_location (stmt);
5916 gimple_seq tbody;
5917 struct gimplify_ctx gctx;
5918
5919 name = gimple_omp_critical_name (stmt);
5920 if (name)
5921 {
5922 tree decl;
5923 splay_tree_node n;
5924
5925 if (!critical_name_mutexes)
5926 critical_name_mutexes
5927 = splay_tree_new_ggc (splay_tree_compare_pointers,
5928 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
5929 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
5930
5931 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5932 if (n == NULL)
5933 {
5934 char *new_str;
5935
5936 decl = create_tmp_var_raw (ptr_type_node, NULL);
5937
5938 new_str = ACONCAT ((".gomp_critical_user_",
5939 IDENTIFIER_POINTER (name), NULL));
5940 DECL_NAME (decl) = get_identifier (new_str);
5941 TREE_PUBLIC (decl) = 1;
5942 TREE_STATIC (decl) = 1;
5943 DECL_COMMON (decl) = 1;
5944 DECL_ARTIFICIAL (decl) = 1;
5945 DECL_IGNORED_P (decl) = 1;
5946 varpool_finalize_decl (decl);
5947
5948 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5949 (splay_tree_value) decl);
5950 }
5951 else
5952 decl = (tree) n->value;
5953
5954 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5955 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
5956
5957 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5958 unlock = build_call_expr_loc (loc, unlock, 1,
5959 build_fold_addr_expr_loc (loc, decl));
5960 }
5961 else
5962 {
5963 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5964 lock = build_call_expr_loc (loc, lock, 0);
5965
5966 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5967 unlock = build_call_expr_loc (loc, unlock, 0);
5968 }
5969
5970 push_gimplify_context (&gctx);
5971
5972 block = make_node (BLOCK);
5973 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5974
5975 tbody = gimple_bind_body (bind);
5976 gimplify_and_add (lock, &tbody);
5977 gimple_bind_set_body (bind, tbody);
5978
5979 lower_omp (gimple_omp_body (stmt), ctx);
5980 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5981 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5982 gimple_omp_set_body (stmt, NULL);
5983
5984 tbody = gimple_bind_body (bind);
5985 gimplify_and_add (unlock, &tbody);
5986 gimple_bind_set_body (bind, tbody);
5987
5988 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5989
5990 pop_gimplify_context (bind);
5991 gimple_bind_append_vars (bind, ctx->block_vars);
5992 BLOCK_VARS (block) = gimple_bind_vars (bind);
5993 gsi_replace (gsi_p, bind, true);
5994 }
5995
5996
5997 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5998 for a lastprivate clause. Given a loop control predicate of (V
5999 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6000 is appended to *DLIST, iterator initialization is appended to
6001 *BODY_P. */
6002
6003 static void
6004 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6005 gimple_seq *dlist, struct omp_context *ctx)
6006 {
6007 tree clauses, cond, vinit;
6008 enum tree_code cond_code;
6009 gimple_seq stmts;
6010
6011 cond_code = fd->loop.cond_code;
6012 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6013
6014 /* When possible, use a strict equality expression. This can let VRP
6015 type optimizations deduce the value and remove a copy. */
6016 if (host_integerp (fd->loop.step, 0))
6017 {
6018 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6019 if (step == 1 || step == -1)
6020 cond_code = EQ_EXPR;
6021 }
6022
6023 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6024
6025 clauses = gimple_omp_for_clauses (fd->for_stmt);
6026 stmts = NULL;
6027 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6028 if (!gimple_seq_empty_p (stmts))
6029 {
6030 gimple_seq_add_seq (&stmts, *dlist);
6031 *dlist = stmts;
6032
6033 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6034 vinit = fd->loop.n1;
6035 if (cond_code == EQ_EXPR
6036 && host_integerp (fd->loop.n2, 0)
6037 && ! integer_zerop (fd->loop.n2))
6038 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6039
6040 /* Initialize the iterator variable, so that threads that don't execute
6041 any iterations don't execute the lastprivate clauses by accident. */
6042 gimplify_assign (fd->loop.v, vinit, body_p);
6043 }
6044 }
6045
6046
6047 /* Lower code for an OpenMP loop directive. */
6048
6049 static void
6050 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6051 {
6052 tree *rhs_p, block;
6053 struct omp_for_data fd;
6054 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6055 gimple_seq omp_for_body, body, dlist;
6056 size_t i;
6057 struct gimplify_ctx gctx;
6058
6059 push_gimplify_context (&gctx);
6060
6061 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6062 lower_omp (gimple_omp_body (stmt), ctx);
6063
6064 block = make_node (BLOCK);
6065 new_stmt = gimple_build_bind (NULL, NULL, block);
6066
6067 /* Move declaration of temporaries in the loop body before we make
6068 it go away. */
6069 omp_for_body = gimple_omp_body (stmt);
6070 if (!gimple_seq_empty_p (omp_for_body)
6071 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6072 {
6073 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6074 gimple_bind_append_vars (new_stmt, vars);
6075 }
6076
6077 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6078 dlist = NULL;
6079 body = NULL;
6080 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6081 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6082
6083 /* Lower the header expressions. At this point, we can assume that
6084 the header is of the form:
6085
6086 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6087
6088 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6089 using the .omp_data_s mapping, if needed. */
6090 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6091 {
6092 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6093 if (!is_gimple_min_invariant (*rhs_p))
6094 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6095
6096 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6097 if (!is_gimple_min_invariant (*rhs_p))
6098 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6099
6100 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6101 if (!is_gimple_min_invariant (*rhs_p))
6102 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6103 }
6104
6105 /* Once lowered, extract the bounds and clauses. */
6106 extract_omp_for_data (stmt, &fd, NULL);
6107
6108 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6109
6110 gimple_seq_add_stmt (&body, stmt);
6111 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6112
6113 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6114 fd.loop.v));
6115
6116 /* After the loop, add exit clauses. */
6117 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6118 gimple_seq_add_seq (&body, dlist);
6119
6120 body = maybe_catch_exception (body);
6121
6122 /* Region exit marker goes at the end of the loop body. */
6123 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6124
6125 pop_gimplify_context (new_stmt);
6126
6127 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6128 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6129 if (BLOCK_VARS (block))
6130 TREE_USED (block) = 1;
6131
6132 gimple_bind_set_body (new_stmt, body);
6133 gimple_omp_set_body (stmt, NULL);
6134 gimple_omp_for_set_pre_body (stmt, NULL);
6135 gsi_replace (gsi_p, new_stmt, true);
6136 }
6137
6138 /* Callback for walk_stmts. Check if the current statement only contains
6139 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6140
6141 static tree
6142 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6143 bool *handled_ops_p,
6144 struct walk_stmt_info *wi)
6145 {
6146 int *info = (int *) wi->info;
6147 gimple stmt = gsi_stmt (*gsi_p);
6148
6149 *handled_ops_p = true;
6150 switch (gimple_code (stmt))
6151 {
6152 WALK_SUBSTMTS;
6153
6154 case GIMPLE_OMP_FOR:
6155 case GIMPLE_OMP_SECTIONS:
6156 *info = *info == 0 ? 1 : -1;
6157 break;
6158 default:
6159 *info = -1;
6160 break;
6161 }
6162 return NULL;
6163 }
6164
6165 struct omp_taskcopy_context
6166 {
6167 /* This field must be at the beginning, as we do "inheritance": Some
6168 callback functions for tree-inline.c (e.g., omp_copy_decl)
6169 receive a copy_body_data pointer that is up-casted to an
6170 omp_context pointer. */
6171 copy_body_data cb;
6172 omp_context *ctx;
6173 };
6174
6175 static tree
6176 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6177 {
6178 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6179
6180 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6181 return create_tmp_var (TREE_TYPE (var), NULL);
6182
6183 return var;
6184 }
6185
6186 static tree
6187 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6188 {
6189 tree name, new_fields = NULL, type, f;
6190
6191 type = lang_hooks.types.make_type (RECORD_TYPE);
6192 name = DECL_NAME (TYPE_NAME (orig_type));
6193 name = build_decl (gimple_location (tcctx->ctx->stmt),
6194 TYPE_DECL, name, type);
6195 TYPE_NAME (type) = name;
6196
6197 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6198 {
6199 tree new_f = copy_node (f);
6200 DECL_CONTEXT (new_f) = type;
6201 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6202 TREE_CHAIN (new_f) = new_fields;
6203 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6204 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6205 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6206 &tcctx->cb, NULL);
6207 new_fields = new_f;
6208 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6209 }
6210 TYPE_FIELDS (type) = nreverse (new_fields);
6211 layout_type (type);
6212 return type;
6213 }
6214
6215 /* Create task copyfn. */
6216
6217 static void
6218 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6219 {
6220 struct function *child_cfun;
6221 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6222 tree record_type, srecord_type, bind, list;
6223 bool record_needs_remap = false, srecord_needs_remap = false;
6224 splay_tree_node n;
6225 struct omp_taskcopy_context tcctx;
6226 struct gimplify_ctx gctx;
6227 location_t loc = gimple_location (task_stmt);
6228
6229 child_fn = gimple_omp_task_copy_fn (task_stmt);
6230 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6231 gcc_assert (child_cfun->cfg == NULL);
6232 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6233
6234 /* Reset DECL_CONTEXT on function arguments. */
6235 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6236 DECL_CONTEXT (t) = child_fn;
6237
6238 /* Populate the function. */
6239 push_gimplify_context (&gctx);
6240 current_function_decl = child_fn;
6241
6242 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6243 TREE_SIDE_EFFECTS (bind) = 1;
6244 list = NULL;
6245 DECL_SAVED_TREE (child_fn) = bind;
6246 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6247
6248 /* Remap src and dst argument types if needed. */
6249 record_type = ctx->record_type;
6250 srecord_type = ctx->srecord_type;
6251 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6252 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6253 {
6254 record_needs_remap = true;
6255 break;
6256 }
6257 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6258 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6259 {
6260 srecord_needs_remap = true;
6261 break;
6262 }
6263
6264 if (record_needs_remap || srecord_needs_remap)
6265 {
6266 memset (&tcctx, '\0', sizeof (tcctx));
6267 tcctx.cb.src_fn = ctx->cb.src_fn;
6268 tcctx.cb.dst_fn = child_fn;
6269 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6270 gcc_checking_assert (tcctx.cb.src_node);
6271 tcctx.cb.dst_node = tcctx.cb.src_node;
6272 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6273 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6274 tcctx.cb.eh_lp_nr = 0;
6275 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6276 tcctx.cb.decl_map = pointer_map_create ();
6277 tcctx.ctx = ctx;
6278
6279 if (record_needs_remap)
6280 record_type = task_copyfn_remap_type (&tcctx, record_type);
6281 if (srecord_needs_remap)
6282 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6283 }
6284 else
6285 tcctx.cb.decl_map = NULL;
6286
6287 push_cfun (child_cfun);
6288
6289 arg = DECL_ARGUMENTS (child_fn);
6290 TREE_TYPE (arg) = build_pointer_type (record_type);
6291 sarg = DECL_CHAIN (arg);
6292 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6293
6294 /* First pass: initialize temporaries used in record_type and srecord_type
6295 sizes and field offsets. */
6296 if (tcctx.cb.decl_map)
6297 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6298 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6299 {
6300 tree *p;
6301
6302 decl = OMP_CLAUSE_DECL (c);
6303 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6304 if (p == NULL)
6305 continue;
6306 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6307 sf = (tree) n->value;
6308 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6309 src = build_simple_mem_ref_loc (loc, sarg);
6310 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6311 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6312 append_to_statement_list (t, &list);
6313 }
6314
6315 /* Second pass: copy shared var pointers and copy construct non-VLA
6316 firstprivate vars. */
6317 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6318 switch (OMP_CLAUSE_CODE (c))
6319 {
6320 case OMP_CLAUSE_SHARED:
6321 decl = OMP_CLAUSE_DECL (c);
6322 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6323 if (n == NULL)
6324 break;
6325 f = (tree) n->value;
6326 if (tcctx.cb.decl_map)
6327 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6328 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6329 sf = (tree) n->value;
6330 if (tcctx.cb.decl_map)
6331 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6332 src = build_simple_mem_ref_loc (loc, sarg);
6333 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6334 dst = build_simple_mem_ref_loc (loc, arg);
6335 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6336 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6337 append_to_statement_list (t, &list);
6338 break;
6339 case OMP_CLAUSE_FIRSTPRIVATE:
6340 decl = OMP_CLAUSE_DECL (c);
6341 if (is_variable_sized (decl))
6342 break;
6343 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6344 if (n == NULL)
6345 break;
6346 f = (tree) n->value;
6347 if (tcctx.cb.decl_map)
6348 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6349 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6350 if (n != NULL)
6351 {
6352 sf = (tree) n->value;
6353 if (tcctx.cb.decl_map)
6354 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6355 src = build_simple_mem_ref_loc (loc, sarg);
6356 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6357 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6358 src = build_simple_mem_ref_loc (loc, src);
6359 }
6360 else
6361 src = decl;
6362 dst = build_simple_mem_ref_loc (loc, arg);
6363 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6364 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6365 append_to_statement_list (t, &list);
6366 break;
6367 case OMP_CLAUSE_PRIVATE:
6368 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6369 break;
6370 decl = OMP_CLAUSE_DECL (c);
6371 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6372 f = (tree) n->value;
6373 if (tcctx.cb.decl_map)
6374 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6375 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6376 if (n != NULL)
6377 {
6378 sf = (tree) n->value;
6379 if (tcctx.cb.decl_map)
6380 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6381 src = build_simple_mem_ref_loc (loc, sarg);
6382 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6383 if (use_pointer_for_field (decl, NULL))
6384 src = build_simple_mem_ref_loc (loc, src);
6385 }
6386 else
6387 src = decl;
6388 dst = build_simple_mem_ref_loc (loc, arg);
6389 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6390 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6391 append_to_statement_list (t, &list);
6392 break;
6393 default:
6394 break;
6395 }
6396
6397 /* Last pass: handle VLA firstprivates. */
6398 if (tcctx.cb.decl_map)
6399 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6400 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6401 {
6402 tree ind, ptr, df;
6403
6404 decl = OMP_CLAUSE_DECL (c);
6405 if (!is_variable_sized (decl))
6406 continue;
6407 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6408 if (n == NULL)
6409 continue;
6410 f = (tree) n->value;
6411 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6412 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6413 ind = DECL_VALUE_EXPR (decl);
6414 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6415 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6416 n = splay_tree_lookup (ctx->sfield_map,
6417 (splay_tree_key) TREE_OPERAND (ind, 0));
6418 sf = (tree) n->value;
6419 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6420 src = build_simple_mem_ref_loc (loc, sarg);
6421 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6422 src = build_simple_mem_ref_loc (loc, src);
6423 dst = build_simple_mem_ref_loc (loc, arg);
6424 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6425 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6426 append_to_statement_list (t, &list);
6427 n = splay_tree_lookup (ctx->field_map,
6428 (splay_tree_key) TREE_OPERAND (ind, 0));
6429 df = (tree) n->value;
6430 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6431 ptr = build_simple_mem_ref_loc (loc, arg);
6432 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6433 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6434 build_fold_addr_expr_loc (loc, dst));
6435 append_to_statement_list (t, &list);
6436 }
6437
6438 t = build1 (RETURN_EXPR, void_type_node, NULL);
6439 append_to_statement_list (t, &list);
6440
6441 if (tcctx.cb.decl_map)
6442 pointer_map_destroy (tcctx.cb.decl_map);
6443 pop_gimplify_context (NULL);
6444 BIND_EXPR_BODY (bind) = list;
6445 pop_cfun ();
6446 current_function_decl = ctx->cb.src_fn;
6447 }
6448
6449 /* Lower the OpenMP parallel or task directive in the current statement
6450 in GSI_P. CTX holds context information for the directive. */
6451
6452 static void
6453 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6454 {
6455 tree clauses;
6456 tree child_fn, t;
6457 gimple stmt = gsi_stmt (*gsi_p);
6458 gimple par_bind, bind;
6459 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6460 struct gimplify_ctx gctx;
6461 location_t loc = gimple_location (stmt);
6462
6463 clauses = gimple_omp_taskreg_clauses (stmt);
6464 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6465 par_body = gimple_bind_body (par_bind);
6466 child_fn = ctx->cb.dst_fn;
6467 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6468 && !gimple_omp_parallel_combined_p (stmt))
6469 {
6470 struct walk_stmt_info wi;
6471 int ws_num = 0;
6472
6473 memset (&wi, 0, sizeof (wi));
6474 wi.info = &ws_num;
6475 wi.val_only = true;
6476 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6477 if (ws_num == 1)
6478 gimple_omp_parallel_set_combined_p (stmt, true);
6479 }
6480 if (ctx->srecord_type)
6481 create_task_copyfn (stmt, ctx);
6482
6483 push_gimplify_context (&gctx);
6484
6485 par_olist = NULL;
6486 par_ilist = NULL;
6487 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6488 lower_omp (par_body, ctx);
6489 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6490 lower_reduction_clauses (clauses, &par_olist, ctx);
6491
6492 /* Declare all the variables created by mapping and the variables
6493 declared in the scope of the parallel body. */
6494 record_vars_into (ctx->block_vars, child_fn);
6495 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6496
6497 if (ctx->record_type)
6498 {
6499 ctx->sender_decl
6500 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6501 : ctx->record_type, ".omp_data_o");
6502 DECL_NAMELESS (ctx->sender_decl) = 1;
6503 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6504 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6505 }
6506
6507 olist = NULL;
6508 ilist = NULL;
6509 lower_send_clauses (clauses, &ilist, &olist, ctx);
6510 lower_send_shared_vars (&ilist, &olist, ctx);
6511
6512 /* Once all the expansions are done, sequence all the different
6513 fragments inside gimple_omp_body. */
6514
6515 new_body = NULL;
6516
6517 if (ctx->record_type)
6518 {
6519 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6520 /* fixup_child_record_type might have changed receiver_decl's type. */
6521 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6522 gimple_seq_add_stmt (&new_body,
6523 gimple_build_assign (ctx->receiver_decl, t));
6524 }
6525
6526 gimple_seq_add_seq (&new_body, par_ilist);
6527 gimple_seq_add_seq (&new_body, par_body);
6528 gimple_seq_add_seq (&new_body, par_olist);
6529 new_body = maybe_catch_exception (new_body);
6530 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6531 gimple_omp_set_body (stmt, new_body);
6532
6533 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6534 gimple_bind_add_stmt (bind, stmt);
6535 if (ilist || olist)
6536 {
6537 gimple_seq_add_stmt (&ilist, bind);
6538 gimple_seq_add_seq (&ilist, olist);
6539 bind = gimple_build_bind (NULL, ilist, NULL);
6540 }
6541
6542 gsi_replace (gsi_p, bind, true);
6543
6544 pop_gimplify_context (NULL);
6545 }
6546
6547 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6548 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6549 of OpenMP context, but with task_shared_vars set. */
6550
6551 static tree
6552 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6553 void *data)
6554 {
6555 tree t = *tp;
6556
6557 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6558 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6559 return t;
6560
6561 if (task_shared_vars
6562 && DECL_P (t)
6563 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6564 return t;
6565
6566 /* If a global variable has been privatized, TREE_CONSTANT on
6567 ADDR_EXPR might be wrong. */
6568 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6569 recompute_tree_invariant_for_addr_expr (t);
6570
6571 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6572 return NULL_TREE;
6573 }
6574
6575 static void
6576 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6577 {
6578 gimple stmt = gsi_stmt (*gsi_p);
6579 struct walk_stmt_info wi;
6580
6581 if (gimple_has_location (stmt))
6582 input_location = gimple_location (stmt);
6583
6584 if (task_shared_vars)
6585 memset (&wi, '\0', sizeof (wi));
6586
6587 /* If we have issued syntax errors, avoid doing any heavy lifting.
6588 Just replace the OpenMP directives with a NOP to avoid
6589 confusing RTL expansion. */
6590 if (seen_error () && is_gimple_omp (stmt))
6591 {
6592 gsi_replace (gsi_p, gimple_build_nop (), true);
6593 return;
6594 }
6595
6596 switch (gimple_code (stmt))
6597 {
6598 case GIMPLE_COND:
6599 if ((ctx || task_shared_vars)
6600 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6601 ctx ? NULL : &wi, NULL)
6602 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6603 ctx ? NULL : &wi, NULL)))
6604 gimple_regimplify_operands (stmt, gsi_p);
6605 break;
6606 case GIMPLE_CATCH:
6607 lower_omp (gimple_catch_handler (stmt), ctx);
6608 break;
6609 case GIMPLE_EH_FILTER:
6610 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6611 break;
6612 case GIMPLE_TRY:
6613 lower_omp (gimple_try_eval (stmt), ctx);
6614 lower_omp (gimple_try_cleanup (stmt), ctx);
6615 break;
6616 case GIMPLE_BIND:
6617 lower_omp (gimple_bind_body (stmt), ctx);
6618 break;
6619 case GIMPLE_OMP_PARALLEL:
6620 case GIMPLE_OMP_TASK:
6621 ctx = maybe_lookup_ctx (stmt);
6622 lower_omp_taskreg (gsi_p, ctx);
6623 break;
6624 case GIMPLE_OMP_FOR:
6625 ctx = maybe_lookup_ctx (stmt);
6626 gcc_assert (ctx);
6627 lower_omp_for (gsi_p, ctx);
6628 break;
6629 case GIMPLE_OMP_SECTIONS:
6630 ctx = maybe_lookup_ctx (stmt);
6631 gcc_assert (ctx);
6632 lower_omp_sections (gsi_p, ctx);
6633 break;
6634 case GIMPLE_OMP_SINGLE:
6635 ctx = maybe_lookup_ctx (stmt);
6636 gcc_assert (ctx);
6637 lower_omp_single (gsi_p, ctx);
6638 break;
6639 case GIMPLE_OMP_MASTER:
6640 ctx = maybe_lookup_ctx (stmt);
6641 gcc_assert (ctx);
6642 lower_omp_master (gsi_p, ctx);
6643 break;
6644 case GIMPLE_OMP_ORDERED:
6645 ctx = maybe_lookup_ctx (stmt);
6646 gcc_assert (ctx);
6647 lower_omp_ordered (gsi_p, ctx);
6648 break;
6649 case GIMPLE_OMP_CRITICAL:
6650 ctx = maybe_lookup_ctx (stmt);
6651 gcc_assert (ctx);
6652 lower_omp_critical (gsi_p, ctx);
6653 break;
6654 case GIMPLE_OMP_ATOMIC_LOAD:
6655 if ((ctx || task_shared_vars)
6656 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6657 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6658 gimple_regimplify_operands (stmt, gsi_p);
6659 break;
6660 default:
6661 if ((ctx || task_shared_vars)
6662 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6663 ctx ? NULL : &wi))
6664 gimple_regimplify_operands (stmt, gsi_p);
6665 break;
6666 }
6667 }
6668
6669 static void
6670 lower_omp (gimple_seq body, omp_context *ctx)
6671 {
6672 location_t saved_location = input_location;
6673 gimple_stmt_iterator gsi = gsi_start (body);
6674 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6675 lower_omp_1 (&gsi, ctx);
6676 input_location = saved_location;
6677 }
6678 \f
6679 /* Main entry point. */
6680
6681 static unsigned int
6682 execute_lower_omp (void)
6683 {
6684 gimple_seq body;
6685
6686 /* This pass always runs, to provide PROP_gimple_lomp.
6687 But there is nothing to do unless -fopenmp is given. */
6688 if (flag_openmp == 0)
6689 return 0;
6690
6691 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6692 delete_omp_context);
6693
6694 body = gimple_body (current_function_decl);
6695 scan_omp (body, NULL);
6696 gcc_assert (taskreg_nesting_level == 0);
6697
6698 if (all_contexts->root)
6699 {
6700 struct gimplify_ctx gctx;
6701
6702 if (task_shared_vars)
6703 push_gimplify_context (&gctx);
6704 lower_omp (body, NULL);
6705 if (task_shared_vars)
6706 pop_gimplify_context (NULL);
6707 }
6708
6709 if (all_contexts)
6710 {
6711 splay_tree_delete (all_contexts);
6712 all_contexts = NULL;
6713 }
6714 BITMAP_FREE (task_shared_vars);
6715 return 0;
6716 }
6717
6718 struct gimple_opt_pass pass_lower_omp =
6719 {
6720 {
6721 GIMPLE_PASS,
6722 "omplower", /* name */
6723 NULL, /* gate */
6724 execute_lower_omp, /* execute */
6725 NULL, /* sub */
6726 NULL, /* next */
6727 0, /* static_pass_number */
6728 TV_NONE, /* tv_id */
6729 PROP_gimple_any, /* properties_required */
6730 PROP_gimple_lomp, /* properties_provided */
6731 0, /* properties_destroyed */
6732 0, /* todo_flags_start */
6733 0 /* todo_flags_finish */
6734 }
6735 };
6736 \f
6737 /* The following is a utility to diagnose OpenMP structured block violations.
6738 It is not part of the "omplower" pass, as that's invoked too late. It
6739 should be invoked by the respective front ends after gimplification. */
6740
6741 static splay_tree all_labels;
6742
6743 /* Check for mismatched contexts and generate an error if needed. Return
6744 true if an error is detected. */
6745
6746 static bool
6747 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6748 gimple branch_ctx, gimple label_ctx)
6749 {
6750 if (label_ctx == branch_ctx)
6751 return false;
6752
6753
6754 /*
6755 Previously we kept track of the label's entire context in diagnose_sb_[12]
6756 so we could traverse it and issue a correct "exit" or "enter" error
6757 message upon a structured block violation.
6758
6759 We built the context by building a list with tree_cons'ing, but there is
6760 no easy counterpart in gimple tuples. It seems like far too much work
6761 for issuing exit/enter error messages. If someone really misses the
6762 distinct error message... patches welcome.
6763 */
6764
6765 #if 0
6766 /* Try to avoid confusing the user by producing and error message
6767 with correct "exit" or "enter" verbiage. We prefer "exit"
6768 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6769 if (branch_ctx == NULL)
6770 exit_p = false;
6771 else
6772 {
6773 while (label_ctx)
6774 {
6775 if (TREE_VALUE (label_ctx) == branch_ctx)
6776 {
6777 exit_p = false;
6778 break;
6779 }
6780 label_ctx = TREE_CHAIN (label_ctx);
6781 }
6782 }
6783
6784 if (exit_p)
6785 error ("invalid exit from OpenMP structured block");
6786 else
6787 error ("invalid entry to OpenMP structured block");
6788 #endif
6789
6790 /* If it's obvious we have an invalid entry, be specific about the error. */
6791 if (branch_ctx == NULL)
6792 error ("invalid entry to OpenMP structured block");
6793 else
6794 /* Otherwise, be vague and lazy, but efficient. */
6795 error ("invalid branch to/from an OpenMP structured block");
6796
6797 gsi_replace (gsi_p, gimple_build_nop (), false);
6798 return true;
6799 }
6800
6801 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6802 where each label is found. */
6803
6804 static tree
6805 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6806 struct walk_stmt_info *wi)
6807 {
6808 gimple context = (gimple) wi->info;
6809 gimple inner_context;
6810 gimple stmt = gsi_stmt (*gsi_p);
6811
6812 *handled_ops_p = true;
6813
6814 switch (gimple_code (stmt))
6815 {
6816 WALK_SUBSTMTS;
6817
6818 case GIMPLE_OMP_PARALLEL:
6819 case GIMPLE_OMP_TASK:
6820 case GIMPLE_OMP_SECTIONS:
6821 case GIMPLE_OMP_SINGLE:
6822 case GIMPLE_OMP_SECTION:
6823 case GIMPLE_OMP_MASTER:
6824 case GIMPLE_OMP_ORDERED:
6825 case GIMPLE_OMP_CRITICAL:
6826 /* The minimal context here is just the current OMP construct. */
6827 inner_context = stmt;
6828 wi->info = inner_context;
6829 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6830 wi->info = context;
6831 break;
6832
6833 case GIMPLE_OMP_FOR:
6834 inner_context = stmt;
6835 wi->info = inner_context;
6836 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6837 walk them. */
6838 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6839 diagnose_sb_1, NULL, wi);
6840 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6841 wi->info = context;
6842 break;
6843
6844 case GIMPLE_LABEL:
6845 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6846 (splay_tree_value) context);
6847 break;
6848
6849 default:
6850 break;
6851 }
6852
6853 return NULL_TREE;
6854 }
6855
6856 /* Pass 2: Check each branch and see if its context differs from that of
6857 the destination label's context. */
6858
6859 static tree
6860 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6861 struct walk_stmt_info *wi)
6862 {
6863 gimple context = (gimple) wi->info;
6864 splay_tree_node n;
6865 gimple stmt = gsi_stmt (*gsi_p);
6866
6867 *handled_ops_p = true;
6868
6869 switch (gimple_code (stmt))
6870 {
6871 WALK_SUBSTMTS;
6872
6873 case GIMPLE_OMP_PARALLEL:
6874 case GIMPLE_OMP_TASK:
6875 case GIMPLE_OMP_SECTIONS:
6876 case GIMPLE_OMP_SINGLE:
6877 case GIMPLE_OMP_SECTION:
6878 case GIMPLE_OMP_MASTER:
6879 case GIMPLE_OMP_ORDERED:
6880 case GIMPLE_OMP_CRITICAL:
6881 wi->info = stmt;
6882 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6883 wi->info = context;
6884 break;
6885
6886 case GIMPLE_OMP_FOR:
6887 wi->info = stmt;
6888 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6889 walk them. */
6890 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6891 diagnose_sb_2, NULL, wi);
6892 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6893 wi->info = context;
6894 break;
6895
6896 case GIMPLE_COND:
6897 {
6898 tree lab = gimple_cond_true_label (stmt);
6899 if (lab)
6900 {
6901 n = splay_tree_lookup (all_labels,
6902 (splay_tree_key) lab);
6903 diagnose_sb_0 (gsi_p, context,
6904 n ? (gimple) n->value : NULL);
6905 }
6906 lab = gimple_cond_false_label (stmt);
6907 if (lab)
6908 {
6909 n = splay_tree_lookup (all_labels,
6910 (splay_tree_key) lab);
6911 diagnose_sb_0 (gsi_p, context,
6912 n ? (gimple) n->value : NULL);
6913 }
6914 }
6915 break;
6916
6917 case GIMPLE_GOTO:
6918 {
6919 tree lab = gimple_goto_dest (stmt);
6920 if (TREE_CODE (lab) != LABEL_DECL)
6921 break;
6922
6923 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6924 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6925 }
6926 break;
6927
6928 case GIMPLE_SWITCH:
6929 {
6930 unsigned int i;
6931 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6932 {
6933 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6934 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6935 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6936 break;
6937 }
6938 }
6939 break;
6940
6941 case GIMPLE_RETURN:
6942 diagnose_sb_0 (gsi_p, context, NULL);
6943 break;
6944
6945 default:
6946 break;
6947 }
6948
6949 return NULL_TREE;
6950 }
6951
6952 static unsigned int
6953 diagnose_omp_structured_block_errors (void)
6954 {
6955 struct walk_stmt_info wi;
6956 gimple_seq body = gimple_body (current_function_decl);
6957
6958 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6959
6960 memset (&wi, 0, sizeof (wi));
6961 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6962
6963 memset (&wi, 0, sizeof (wi));
6964 wi.want_locations = true;
6965 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6966
6967 splay_tree_delete (all_labels);
6968 all_labels = NULL;
6969
6970 return 0;
6971 }
6972
6973 static bool
6974 gate_diagnose_omp_blocks (void)
6975 {
6976 return flag_openmp != 0;
6977 }
6978
6979 struct gimple_opt_pass pass_diagnose_omp_blocks =
6980 {
6981 {
6982 GIMPLE_PASS,
6983 "*diagnose_omp_blocks", /* name */
6984 gate_diagnose_omp_blocks, /* gate */
6985 diagnose_omp_structured_block_errors, /* execute */
6986 NULL, /* sub */
6987 NULL, /* next */
6988 0, /* static_pass_number */
6989 TV_NONE, /* tv_id */
6990 PROP_gimple_any, /* properties_required */
6991 0, /* properties_provided */
6992 0, /* properties_destroyed */
6993 0, /* todo_flags_start */
6994 0, /* todo_flags_finish */
6995 }
6996 };
6997
6998 #include "gt-omp-low.h"