]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/omp-low.c
* tree.h (get_pending_sizes): Remove prototype.
[thirdparty/gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
7 Free Software Foundation, Inc.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "gimple.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
37 #include "timevar.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
47
48
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
55
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
59
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63 typedef struct omp_context
64 {
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
74
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
106
107
108 struct omp_for_data_loop
109 {
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112 };
113
114 /* A structure describing the main elements of a parallel loop. */
115
116 struct omp_for_data
117 {
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
126 };
127
128
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
133
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
136
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
145
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
147
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
150 {
151 struct walk_stmt_info wi;
152
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
156
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158 }
159
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
163
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
165
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
168 {
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
172
173 return NULL_TREE;
174 }
175
176 /* Return true if CTX is for an omp parallel. */
177
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
180 {
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
182 }
183
184
185 /* Return true if CTX is for an omp task. */
186
187 static inline bool
188 is_task_ctx (omp_context *ctx)
189 {
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
191 }
192
193
194 /* Return true if CTX is for an omp parallel or omp task. */
195
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
198 {
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
201 }
202
203
204 /* Return true if REGION is a combined parallel+workshare region. */
205
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
208 {
209 return region->is_combined_parallel;
210 }
211
212
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
215
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
219 {
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225 location_t loc = gimple_location (for_stmt);
226
227 fd->for_stmt = for_stmt;
228 fd->pre = NULL;
229 fd->collapse = gimple_omp_for_collapse (for_stmt);
230 if (fd->collapse > 1)
231 fd->loops = loops;
232 else
233 fd->loops = &fd->loop;
234
235 fd->have_nowait = fd->have_ordered = false;
236 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
237 fd->chunk_size = NULL_TREE;
238 collapse_iter = NULL;
239 collapse_count = NULL;
240
241 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
242 switch (OMP_CLAUSE_CODE (t))
243 {
244 case OMP_CLAUSE_NOWAIT:
245 fd->have_nowait = true;
246 break;
247 case OMP_CLAUSE_ORDERED:
248 fd->have_ordered = true;
249 break;
250 case OMP_CLAUSE_SCHEDULE:
251 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
252 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
253 break;
254 case OMP_CLAUSE_COLLAPSE:
255 if (fd->collapse > 1)
256 {
257 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
258 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
259 }
260 default:
261 break;
262 }
263
264 /* FIXME: for now map schedule(auto) to schedule(static).
265 There should be analysis to determine whether all iterations
266 are approximately the same amount of work (then schedule(static)
267 is best) or if it varies (then schedule(dynamic,N) is better). */
268 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
269 {
270 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
271 gcc_assert (fd->chunk_size == NULL);
272 }
273 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
274 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
275 gcc_assert (fd->chunk_size == NULL);
276 else if (fd->chunk_size == NULL)
277 {
278 /* We only need to compute a default chunk size for ordered
279 static loops and dynamic loops. */
280 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
281 || fd->have_ordered
282 || fd->collapse > 1)
283 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
284 ? integer_zero_node : integer_one_node;
285 }
286
287 for (i = 0; i < fd->collapse; i++)
288 {
289 if (fd->collapse == 1)
290 loop = &fd->loop;
291 else if (loops != NULL)
292 loop = loops + i;
293 else
294 loop = &dummy_loop;
295
296
297 loop->v = gimple_omp_for_index (for_stmt, i);
298 gcc_assert (SSA_VAR_P (loop->v));
299 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
300 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
301 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
302 loop->n1 = gimple_omp_for_initial (for_stmt, i);
303
304 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
305 loop->n2 = gimple_omp_for_final (for_stmt, i);
306 switch (loop->cond_code)
307 {
308 case LT_EXPR:
309 case GT_EXPR:
310 break;
311 case LE_EXPR:
312 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
313 loop->n2 = fold_build2_loc (loc,
314 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
315 loop->n2, size_one_node);
316 else
317 loop->n2 = fold_build2_loc (loc,
318 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
319 build_int_cst (TREE_TYPE (loop->n2), 1));
320 loop->cond_code = LT_EXPR;
321 break;
322 case GE_EXPR:
323 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
324 loop->n2 = fold_build2_loc (loc,
325 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
326 loop->n2, size_int (-1));
327 else
328 loop->n2 = fold_build2_loc (loc,
329 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
330 build_int_cst (TREE_TYPE (loop->n2), 1));
331 loop->cond_code = GT_EXPR;
332 break;
333 default:
334 gcc_unreachable ();
335 }
336
337 t = gimple_omp_for_incr (for_stmt, i);
338 gcc_assert (TREE_OPERAND (t, 0) == var);
339 switch (TREE_CODE (t))
340 {
341 case PLUS_EXPR:
342 case POINTER_PLUS_EXPR:
343 loop->step = TREE_OPERAND (t, 1);
344 break;
345 case MINUS_EXPR:
346 loop->step = TREE_OPERAND (t, 1);
347 loop->step = fold_build1_loc (loc,
348 NEGATE_EXPR, TREE_TYPE (loop->step),
349 loop->step);
350 break;
351 default:
352 gcc_unreachable ();
353 }
354
355 if (iter_type != long_long_unsigned_type_node)
356 {
357 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
358 iter_type = long_long_unsigned_type_node;
359 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
360 && TYPE_PRECISION (TREE_TYPE (loop->v))
361 >= TYPE_PRECISION (iter_type))
362 {
363 tree n;
364
365 if (loop->cond_code == LT_EXPR)
366 n = fold_build2_loc (loc,
367 PLUS_EXPR, TREE_TYPE (loop->v),
368 loop->n2, loop->step);
369 else
370 n = loop->n1;
371 if (TREE_CODE (n) != INTEGER_CST
372 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
373 iter_type = long_long_unsigned_type_node;
374 }
375 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
376 > TYPE_PRECISION (iter_type))
377 {
378 tree n1, n2;
379
380 if (loop->cond_code == LT_EXPR)
381 {
382 n1 = loop->n1;
383 n2 = fold_build2_loc (loc,
384 PLUS_EXPR, TREE_TYPE (loop->v),
385 loop->n2, loop->step);
386 }
387 else
388 {
389 n1 = fold_build2_loc (loc,
390 MINUS_EXPR, TREE_TYPE (loop->v),
391 loop->n2, loop->step);
392 n2 = loop->n1;
393 }
394 if (TREE_CODE (n1) != INTEGER_CST
395 || TREE_CODE (n2) != INTEGER_CST
396 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
397 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
398 iter_type = long_long_unsigned_type_node;
399 }
400 }
401
402 if (collapse_count && *collapse_count == NULL)
403 {
404 if ((i == 0 || count != NULL_TREE)
405 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
406 && TREE_CONSTANT (loop->n1)
407 && TREE_CONSTANT (loop->n2)
408 && TREE_CODE (loop->step) == INTEGER_CST)
409 {
410 tree itype = TREE_TYPE (loop->v);
411
412 if (POINTER_TYPE_P (itype))
413 itype
414 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
415 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
416 t = fold_build2_loc (loc,
417 PLUS_EXPR, itype,
418 fold_convert_loc (loc, itype, loop->step), t);
419 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
420 fold_convert_loc (loc, itype, loop->n2));
421 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
422 fold_convert_loc (loc, itype, loop->n1));
423 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
424 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
425 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
426 fold_build1_loc (loc, NEGATE_EXPR, itype,
427 fold_convert_loc (loc, itype,
428 loop->step)));
429 else
430 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
431 fold_convert_loc (loc, itype, loop->step));
432 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
433 if (count != NULL_TREE)
434 count = fold_build2_loc (loc,
435 MULT_EXPR, long_long_unsigned_type_node,
436 count, t);
437 else
438 count = t;
439 if (TREE_CODE (count) != INTEGER_CST)
440 count = NULL_TREE;
441 }
442 else
443 count = NULL_TREE;
444 }
445 }
446
447 if (count)
448 {
449 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
450 iter_type = long_long_unsigned_type_node;
451 else
452 iter_type = long_integer_type_node;
453 }
454 else if (collapse_iter && *collapse_iter != NULL)
455 iter_type = TREE_TYPE (*collapse_iter);
456 fd->iter_type = iter_type;
457 if (collapse_iter && *collapse_iter == NULL)
458 *collapse_iter = create_tmp_var (iter_type, ".iter");
459 if (collapse_count && *collapse_count == NULL)
460 {
461 if (count)
462 *collapse_count = fold_convert_loc (loc, iter_type, count);
463 else
464 *collapse_count = create_tmp_var (iter_type, ".count");
465 }
466
467 if (fd->collapse > 1)
468 {
469 fd->loop.v = *collapse_iter;
470 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
471 fd->loop.n2 = *collapse_count;
472 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
473 fd->loop.cond_code = LT_EXPR;
474 }
475 }
476
477
478 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
479 is the immediate dominator of PAR_ENTRY_BB, return true if there
480 are no data dependencies that would prevent expanding the parallel
481 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
482
483 When expanding a combined parallel+workshare region, the call to
484 the child function may need additional arguments in the case of
485 GIMPLE_OMP_FOR regions. In some cases, these arguments are
486 computed out of variables passed in from the parent to the child
487 via 'struct .omp_data_s'. For instance:
488
489 #pragma omp parallel for schedule (guided, i * 4)
490 for (j ...)
491
492 Is lowered into:
493
494 # BLOCK 2 (PAR_ENTRY_BB)
495 .omp_data_o.i = i;
496 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
497
498 # BLOCK 3 (WS_ENTRY_BB)
499 .omp_data_i = &.omp_data_o;
500 D.1667 = .omp_data_i->i;
501 D.1598 = D.1667 * 4;
502 #pragma omp for schedule (guided, D.1598)
503
504 When we outline the parallel region, the call to the child function
505 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
506 that value is computed *after* the call site. So, in principle we
507 cannot do the transformation.
508
509 To see whether the code in WS_ENTRY_BB blocks the combined
510 parallel+workshare call, we collect all the variables used in the
511 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
512 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
513 call.
514
515 FIXME. If we had the SSA form built at this point, we could merely
516 hoist the code in block 3 into block 2 and be done with it. But at
517 this point we don't have dataflow information and though we could
518 hack something up here, it is really not worth the aggravation. */
519
520 static bool
521 workshare_safe_to_combine_p (basic_block ws_entry_bb)
522 {
523 struct omp_for_data fd;
524 gimple ws_stmt = last_stmt (ws_entry_bb);
525
526 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
527 return true;
528
529 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
530
531 extract_omp_for_data (ws_stmt, &fd, NULL);
532
533 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
534 return false;
535 if (fd.iter_type != long_integer_type_node)
536 return false;
537
538 /* FIXME. We give up too easily here. If any of these arguments
539 are not constants, they will likely involve variables that have
540 been mapped into fields of .omp_data_s for sharing with the child
541 function. With appropriate data flow, it would be possible to
542 see through this. */
543 if (!is_gimple_min_invariant (fd.loop.n1)
544 || !is_gimple_min_invariant (fd.loop.n2)
545 || !is_gimple_min_invariant (fd.loop.step)
546 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
547 return false;
548
549 return true;
550 }
551
552
553 /* Collect additional arguments needed to emit a combined
554 parallel+workshare call. WS_STMT is the workshare directive being
555 expanded. */
556
557 static VEC(tree,gc) *
558 get_ws_args_for (gimple ws_stmt)
559 {
560 tree t;
561 location_t loc = gimple_location (ws_stmt);
562 VEC(tree,gc) *ws_args;
563
564 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
565 {
566 struct omp_for_data fd;
567
568 extract_omp_for_data (ws_stmt, &fd, NULL);
569
570 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
571
572 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
573 VEC_quick_push (tree, ws_args, t);
574
575 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
576 VEC_quick_push (tree, ws_args, t);
577
578 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
579 VEC_quick_push (tree, ws_args, t);
580
581 if (fd.chunk_size)
582 {
583 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
584 VEC_quick_push (tree, ws_args, t);
585 }
586
587 return ws_args;
588 }
589 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
590 {
591 /* Number of sections is equal to the number of edges from the
592 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
593 the exit of the sections region. */
594 basic_block bb = single_succ (gimple_bb (ws_stmt));
595 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
596 ws_args = VEC_alloc (tree, gc, 1);
597 VEC_quick_push (tree, ws_args, t);
598 return ws_args;
599 }
600
601 gcc_unreachable ();
602 }
603
604
605 /* Discover whether REGION is a combined parallel+workshare region. */
606
607 static void
608 determine_parallel_type (struct omp_region *region)
609 {
610 basic_block par_entry_bb, par_exit_bb;
611 basic_block ws_entry_bb, ws_exit_bb;
612
613 if (region == NULL || region->inner == NULL
614 || region->exit == NULL || region->inner->exit == NULL
615 || region->inner->cont == NULL)
616 return;
617
618 /* We only support parallel+for and parallel+sections. */
619 if (region->type != GIMPLE_OMP_PARALLEL
620 || (region->inner->type != GIMPLE_OMP_FOR
621 && region->inner->type != GIMPLE_OMP_SECTIONS))
622 return;
623
624 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
625 WS_EXIT_BB -> PAR_EXIT_BB. */
626 par_entry_bb = region->entry;
627 par_exit_bb = region->exit;
628 ws_entry_bb = region->inner->entry;
629 ws_exit_bb = region->inner->exit;
630
631 if (single_succ (par_entry_bb) == ws_entry_bb
632 && single_succ (ws_exit_bb) == par_exit_bb
633 && workshare_safe_to_combine_p (ws_entry_bb)
634 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
635 || (last_and_only_stmt (ws_entry_bb)
636 && last_and_only_stmt (par_exit_bb))))
637 {
638 gimple ws_stmt = last_stmt (ws_entry_bb);
639
640 if (region->inner->type == GIMPLE_OMP_FOR)
641 {
642 /* If this is a combined parallel loop, we need to determine
643 whether or not to use the combined library calls. There
644 are two cases where we do not apply the transformation:
645 static loops and any kind of ordered loop. In the first
646 case, we already open code the loop so there is no need
647 to do anything else. In the latter case, the combined
648 parallel loop call would still need extra synchronization
649 to implement ordered semantics, so there would not be any
650 gain in using the combined call. */
651 tree clauses = gimple_omp_for_clauses (ws_stmt);
652 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
653 if (c == NULL
654 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
655 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
656 {
657 region->is_combined_parallel = false;
658 region->inner->is_combined_parallel = false;
659 return;
660 }
661 }
662
663 region->is_combined_parallel = true;
664 region->inner->is_combined_parallel = true;
665 region->ws_args = get_ws_args_for (ws_stmt);
666 }
667 }
668
669
670 /* Return true if EXPR is variable sized. */
671
672 static inline bool
673 is_variable_sized (const_tree expr)
674 {
675 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
676 }
677
678 /* Return true if DECL is a reference type. */
679
680 static inline bool
681 is_reference (tree decl)
682 {
683 return lang_hooks.decls.omp_privatize_by_reference (decl);
684 }
685
686 /* Lookup variables in the decl or field splay trees. The "maybe" form
687 allows for the variable form to not have been entered, otherwise we
688 assert that the variable must have been entered. */
689
690 static inline tree
691 lookup_decl (tree var, omp_context *ctx)
692 {
693 tree *n;
694 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
695 return *n;
696 }
697
698 static inline tree
699 maybe_lookup_decl (const_tree var, omp_context *ctx)
700 {
701 tree *n;
702 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
703 return n ? *n : NULL_TREE;
704 }
705
706 static inline tree
707 lookup_field (tree var, omp_context *ctx)
708 {
709 splay_tree_node n;
710 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
711 return (tree) n->value;
712 }
713
714 static inline tree
715 lookup_sfield (tree var, omp_context *ctx)
716 {
717 splay_tree_node n;
718 n = splay_tree_lookup (ctx->sfield_map
719 ? ctx->sfield_map : ctx->field_map,
720 (splay_tree_key) var);
721 return (tree) n->value;
722 }
723
724 static inline tree
725 maybe_lookup_field (tree var, omp_context *ctx)
726 {
727 splay_tree_node n;
728 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
729 return n ? (tree) n->value : NULL_TREE;
730 }
731
732 /* Return true if DECL should be copied by pointer. SHARED_CTX is
733 the parallel context if DECL is to be shared. */
734
735 static bool
736 use_pointer_for_field (tree decl, omp_context *shared_ctx)
737 {
738 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
739 return true;
740
741 /* We can only use copy-in/copy-out semantics for shared variables
742 when we know the value is not accessible from an outer scope. */
743 if (shared_ctx)
744 {
745 /* ??? Trivially accessible from anywhere. But why would we even
746 be passing an address in this case? Should we simply assert
747 this to be false, or should we have a cleanup pass that removes
748 these from the list of mappings? */
749 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
750 return true;
751
752 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
753 without analyzing the expression whether or not its location
754 is accessible to anyone else. In the case of nested parallel
755 regions it certainly may be. */
756 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
757 return true;
758
759 /* Do not use copy-in/copy-out for variables that have their
760 address taken. */
761 if (TREE_ADDRESSABLE (decl))
762 return true;
763
764 /* Disallow copy-in/out in nested parallel if
765 decl is shared in outer parallel, otherwise
766 each thread could store the shared variable
767 in its own copy-in location, making the
768 variable no longer really shared. */
769 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
770 {
771 omp_context *up;
772
773 for (up = shared_ctx->outer; up; up = up->outer)
774 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
775 break;
776
777 if (up)
778 {
779 tree c;
780
781 for (c = gimple_omp_taskreg_clauses (up->stmt);
782 c; c = OMP_CLAUSE_CHAIN (c))
783 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
784 && OMP_CLAUSE_DECL (c) == decl)
785 break;
786
787 if (c)
788 return true;
789 }
790 }
791
792 /* For tasks avoid using copy-in/out, unless they are readonly
793 (in which case just copy-in is used). As tasks can be
794 deferred or executed in different thread, when GOMP_task
795 returns, the task hasn't necessarily terminated. */
796 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
797 {
798 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
799 if (is_gimple_reg (outer))
800 {
801 /* Taking address of OUTER in lower_send_shared_vars
802 might need regimplification of everything that uses the
803 variable. */
804 if (!task_shared_vars)
805 task_shared_vars = BITMAP_ALLOC (NULL);
806 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
807 TREE_ADDRESSABLE (outer) = 1;
808 }
809 return true;
810 }
811 }
812
813 return false;
814 }
815
816 /* Create a new VAR_DECL and copy information from VAR to it. */
817
818 tree
819 copy_var_decl (tree var, tree name, tree type)
820 {
821 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
822
823 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
824 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
825 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
826 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
827 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
828 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
829 TREE_USED (copy) = 1;
830 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
831
832 return copy;
833 }
834
835 /* Construct a new automatic decl similar to VAR. */
836
837 static tree
838 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
839 {
840 tree copy = copy_var_decl (var, name, type);
841
842 DECL_CONTEXT (copy) = current_function_decl;
843 DECL_CHAIN (copy) = ctx->block_vars;
844 ctx->block_vars = copy;
845
846 return copy;
847 }
848
849 static tree
850 omp_copy_decl_1 (tree var, omp_context *ctx)
851 {
852 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
853 }
854
855 /* Build tree nodes to access the field for VAR on the receiver side. */
856
857 static tree
858 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
859 {
860 tree x, field = lookup_field (var, ctx);
861
862 /* If the receiver record type was remapped in the child function,
863 remap the field into the new record type. */
864 x = maybe_lookup_field (field, ctx);
865 if (x != NULL)
866 field = x;
867
868 x = build_simple_mem_ref (ctx->receiver_decl);
869 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
870 if (by_ref)
871 x = build_simple_mem_ref (x);
872
873 return x;
874 }
875
876 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
877 of a parallel, this is a component reference; for workshare constructs
878 this is some variable. */
879
880 static tree
881 build_outer_var_ref (tree var, omp_context *ctx)
882 {
883 tree x;
884
885 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
886 x = var;
887 else if (is_variable_sized (var))
888 {
889 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
890 x = build_outer_var_ref (x, ctx);
891 x = build_simple_mem_ref (x);
892 }
893 else if (is_taskreg_ctx (ctx))
894 {
895 bool by_ref = use_pointer_for_field (var, NULL);
896 x = build_receiver_ref (var, by_ref, ctx);
897 }
898 else if (ctx->outer)
899 x = lookup_decl (var, ctx->outer);
900 else if (is_reference (var))
901 /* This can happen with orphaned constructs. If var is reference, it is
902 possible it is shared and as such valid. */
903 x = var;
904 else
905 gcc_unreachable ();
906
907 if (is_reference (var))
908 x = build_simple_mem_ref (x);
909
910 return x;
911 }
912
913 /* Build tree nodes to access the field for VAR on the sender side. */
914
915 static tree
916 build_sender_ref (tree var, omp_context *ctx)
917 {
918 tree field = lookup_sfield (var, ctx);
919 return build3 (COMPONENT_REF, TREE_TYPE (field),
920 ctx->sender_decl, field, NULL);
921 }
922
923 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
924
925 static void
926 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
927 {
928 tree field, type, sfield = NULL_TREE;
929
930 gcc_assert ((mask & 1) == 0
931 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
932 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
933 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
934
935 type = TREE_TYPE (var);
936 if (by_ref)
937 type = build_pointer_type (type);
938 else if ((mask & 3) == 1 && is_reference (var))
939 type = TREE_TYPE (type);
940
941 field = build_decl (DECL_SOURCE_LOCATION (var),
942 FIELD_DECL, DECL_NAME (var), type);
943
944 /* Remember what variable this field was created for. This does have a
945 side effect of making dwarf2out ignore this member, so for helpful
946 debugging we clear it later in delete_omp_context. */
947 DECL_ABSTRACT_ORIGIN (field) = var;
948 if (type == TREE_TYPE (var))
949 {
950 DECL_ALIGN (field) = DECL_ALIGN (var);
951 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
952 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
953 }
954 else
955 DECL_ALIGN (field) = TYPE_ALIGN (type);
956
957 if ((mask & 3) == 3)
958 {
959 insert_field_into_struct (ctx->record_type, field);
960 if (ctx->srecord_type)
961 {
962 sfield = build_decl (DECL_SOURCE_LOCATION (var),
963 FIELD_DECL, DECL_NAME (var), type);
964 DECL_ABSTRACT_ORIGIN (sfield) = var;
965 DECL_ALIGN (sfield) = DECL_ALIGN (field);
966 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
967 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
968 insert_field_into_struct (ctx->srecord_type, sfield);
969 }
970 }
971 else
972 {
973 if (ctx->srecord_type == NULL_TREE)
974 {
975 tree t;
976
977 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
978 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
979 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
980 {
981 sfield = build_decl (DECL_SOURCE_LOCATION (var),
982 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
983 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
984 insert_field_into_struct (ctx->srecord_type, sfield);
985 splay_tree_insert (ctx->sfield_map,
986 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
987 (splay_tree_value) sfield);
988 }
989 }
990 sfield = field;
991 insert_field_into_struct ((mask & 1) ? ctx->record_type
992 : ctx->srecord_type, field);
993 }
994
995 if (mask & 1)
996 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
997 (splay_tree_value) field);
998 if ((mask & 2) && ctx->sfield_map)
999 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1000 (splay_tree_value) sfield);
1001 }
1002
1003 static tree
1004 install_var_local (tree var, omp_context *ctx)
1005 {
1006 tree new_var = omp_copy_decl_1 (var, ctx);
1007 insert_decl_map (&ctx->cb, var, new_var);
1008 return new_var;
1009 }
1010
1011 /* Adjust the replacement for DECL in CTX for the new context. This means
1012 copying the DECL_VALUE_EXPR, and fixing up the type. */
1013
1014 static void
1015 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1016 {
1017 tree new_decl, size;
1018
1019 new_decl = lookup_decl (decl, ctx);
1020
1021 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1022
1023 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1024 && DECL_HAS_VALUE_EXPR_P (decl))
1025 {
1026 tree ve = DECL_VALUE_EXPR (decl);
1027 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1028 SET_DECL_VALUE_EXPR (new_decl, ve);
1029 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1030 }
1031
1032 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1033 {
1034 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1035 if (size == error_mark_node)
1036 size = TYPE_SIZE (TREE_TYPE (new_decl));
1037 DECL_SIZE (new_decl) = size;
1038
1039 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1040 if (size == error_mark_node)
1041 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1042 DECL_SIZE_UNIT (new_decl) = size;
1043 }
1044 }
1045
1046 /* The callback for remap_decl. Search all containing contexts for a
1047 mapping of the variable; this avoids having to duplicate the splay
1048 tree ahead of time. We know a mapping doesn't already exist in the
1049 given context. Create new mappings to implement default semantics. */
1050
1051 static tree
1052 omp_copy_decl (tree var, copy_body_data *cb)
1053 {
1054 omp_context *ctx = (omp_context *) cb;
1055 tree new_var;
1056
1057 if (TREE_CODE (var) == LABEL_DECL)
1058 {
1059 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1060 DECL_CONTEXT (new_var) = current_function_decl;
1061 insert_decl_map (&ctx->cb, var, new_var);
1062 return new_var;
1063 }
1064
1065 while (!is_taskreg_ctx (ctx))
1066 {
1067 ctx = ctx->outer;
1068 if (ctx == NULL)
1069 return var;
1070 new_var = maybe_lookup_decl (var, ctx);
1071 if (new_var)
1072 return new_var;
1073 }
1074
1075 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1076 return var;
1077
1078 return error_mark_node;
1079 }
1080
1081
1082 /* Return the parallel region associated with STMT. */
1083
1084 /* Debugging dumps for parallel regions. */
1085 void dump_omp_region (FILE *, struct omp_region *, int);
1086 void debug_omp_region (struct omp_region *);
1087 void debug_all_omp_regions (void);
1088
1089 /* Dump the parallel region tree rooted at REGION. */
1090
1091 void
1092 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1093 {
1094 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1095 gimple_code_name[region->type]);
1096
1097 if (region->inner)
1098 dump_omp_region (file, region->inner, indent + 4);
1099
1100 if (region->cont)
1101 {
1102 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1103 region->cont->index);
1104 }
1105
1106 if (region->exit)
1107 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1108 region->exit->index);
1109 else
1110 fprintf (file, "%*s[no exit marker]\n", indent, "");
1111
1112 if (region->next)
1113 dump_omp_region (file, region->next, indent);
1114 }
1115
1116 DEBUG_FUNCTION void
1117 debug_omp_region (struct omp_region *region)
1118 {
1119 dump_omp_region (stderr, region, 0);
1120 }
1121
1122 DEBUG_FUNCTION void
1123 debug_all_omp_regions (void)
1124 {
1125 dump_omp_region (stderr, root_omp_region, 0);
1126 }
1127
1128
1129 /* Create a new parallel region starting at STMT inside region PARENT. */
1130
1131 struct omp_region *
1132 new_omp_region (basic_block bb, enum gimple_code type,
1133 struct omp_region *parent)
1134 {
1135 struct omp_region *region = XCNEW (struct omp_region);
1136
1137 region->outer = parent;
1138 region->entry = bb;
1139 region->type = type;
1140
1141 if (parent)
1142 {
1143 /* This is a nested region. Add it to the list of inner
1144 regions in PARENT. */
1145 region->next = parent->inner;
1146 parent->inner = region;
1147 }
1148 else
1149 {
1150 /* This is a toplevel region. Add it to the list of toplevel
1151 regions in ROOT_OMP_REGION. */
1152 region->next = root_omp_region;
1153 root_omp_region = region;
1154 }
1155
1156 return region;
1157 }
1158
1159 /* Release the memory associated with the region tree rooted at REGION. */
1160
1161 static void
1162 free_omp_region_1 (struct omp_region *region)
1163 {
1164 struct omp_region *i, *n;
1165
1166 for (i = region->inner; i ; i = n)
1167 {
1168 n = i->next;
1169 free_omp_region_1 (i);
1170 }
1171
1172 free (region);
1173 }
1174
1175 /* Release the memory for the entire omp region tree. */
1176
1177 void
1178 free_omp_regions (void)
1179 {
1180 struct omp_region *r, *n;
1181 for (r = root_omp_region; r ; r = n)
1182 {
1183 n = r->next;
1184 free_omp_region_1 (r);
1185 }
1186 root_omp_region = NULL;
1187 }
1188
1189
1190 /* Create a new context, with OUTER_CTX being the surrounding context. */
1191
1192 static omp_context *
1193 new_omp_context (gimple stmt, omp_context *outer_ctx)
1194 {
1195 omp_context *ctx = XCNEW (omp_context);
1196
1197 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1198 (splay_tree_value) ctx);
1199 ctx->stmt = stmt;
1200
1201 if (outer_ctx)
1202 {
1203 ctx->outer = outer_ctx;
1204 ctx->cb = outer_ctx->cb;
1205 ctx->cb.block = NULL;
1206 ctx->depth = outer_ctx->depth + 1;
1207 }
1208 else
1209 {
1210 ctx->cb.src_fn = current_function_decl;
1211 ctx->cb.dst_fn = current_function_decl;
1212 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1213 gcc_checking_assert (ctx->cb.src_node);
1214 ctx->cb.dst_node = ctx->cb.src_node;
1215 ctx->cb.src_cfun = cfun;
1216 ctx->cb.copy_decl = omp_copy_decl;
1217 ctx->cb.eh_lp_nr = 0;
1218 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1219 ctx->depth = 1;
1220 }
1221
1222 ctx->cb.decl_map = pointer_map_create ();
1223
1224 return ctx;
1225 }
1226
1227 static gimple_seq maybe_catch_exception (gimple_seq);
1228
1229 /* Finalize task copyfn. */
1230
1231 static void
1232 finalize_task_copyfn (gimple task_stmt)
1233 {
1234 struct function *child_cfun;
1235 tree child_fn, old_fn;
1236 gimple_seq seq, new_seq;
1237 gimple bind;
1238
1239 child_fn = gimple_omp_task_copy_fn (task_stmt);
1240 if (child_fn == NULL_TREE)
1241 return;
1242
1243 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1244
1245 /* Inform the callgraph about the new function. */
1246 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1247 = cfun->curr_properties;
1248
1249 old_fn = current_function_decl;
1250 push_cfun (child_cfun);
1251 current_function_decl = child_fn;
1252 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1253 seq = gimple_seq_alloc ();
1254 gimple_seq_add_stmt (&seq, bind);
1255 new_seq = maybe_catch_exception (seq);
1256 if (new_seq != seq)
1257 {
1258 bind = gimple_build_bind (NULL, new_seq, NULL);
1259 seq = gimple_seq_alloc ();
1260 gimple_seq_add_stmt (&seq, bind);
1261 }
1262 gimple_set_body (child_fn, seq);
1263 pop_cfun ();
1264 current_function_decl = old_fn;
1265
1266 cgraph_add_new_function (child_fn, false);
1267 }
1268
1269 /* Destroy a omp_context data structures. Called through the splay tree
1270 value delete callback. */
1271
1272 static void
1273 delete_omp_context (splay_tree_value value)
1274 {
1275 omp_context *ctx = (omp_context *) value;
1276
1277 pointer_map_destroy (ctx->cb.decl_map);
1278
1279 if (ctx->field_map)
1280 splay_tree_delete (ctx->field_map);
1281 if (ctx->sfield_map)
1282 splay_tree_delete (ctx->sfield_map);
1283
1284 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1285 it produces corrupt debug information. */
1286 if (ctx->record_type)
1287 {
1288 tree t;
1289 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1290 DECL_ABSTRACT_ORIGIN (t) = NULL;
1291 }
1292 if (ctx->srecord_type)
1293 {
1294 tree t;
1295 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1296 DECL_ABSTRACT_ORIGIN (t) = NULL;
1297 }
1298
1299 if (is_task_ctx (ctx))
1300 finalize_task_copyfn (ctx->stmt);
1301
1302 XDELETE (ctx);
1303 }
1304
1305 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1306 context. */
1307
1308 static void
1309 fixup_child_record_type (omp_context *ctx)
1310 {
1311 tree f, type = ctx->record_type;
1312
1313 /* ??? It isn't sufficient to just call remap_type here, because
1314 variably_modified_type_p doesn't work the way we expect for
1315 record types. Testing each field for whether it needs remapping
1316 and creating a new record by hand works, however. */
1317 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1318 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1319 break;
1320 if (f)
1321 {
1322 tree name, new_fields = NULL;
1323
1324 type = lang_hooks.types.make_type (RECORD_TYPE);
1325 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1326 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1327 TYPE_DECL, name, type);
1328 TYPE_NAME (type) = name;
1329
1330 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1331 {
1332 tree new_f = copy_node (f);
1333 DECL_CONTEXT (new_f) = type;
1334 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1335 DECL_CHAIN (new_f) = new_fields;
1336 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1337 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1338 &ctx->cb, NULL);
1339 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1340 &ctx->cb, NULL);
1341 new_fields = new_f;
1342
1343 /* Arrange to be able to look up the receiver field
1344 given the sender field. */
1345 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1346 (splay_tree_value) new_f);
1347 }
1348 TYPE_FIELDS (type) = nreverse (new_fields);
1349 layout_type (type);
1350 }
1351
1352 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1353 }
1354
1355 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1356 specified by CLAUSES. */
1357
1358 static void
1359 scan_sharing_clauses (tree clauses, omp_context *ctx)
1360 {
1361 tree c, decl;
1362 bool scan_array_reductions = false;
1363
1364 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1365 {
1366 bool by_ref;
1367
1368 switch (OMP_CLAUSE_CODE (c))
1369 {
1370 case OMP_CLAUSE_PRIVATE:
1371 decl = OMP_CLAUSE_DECL (c);
1372 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1373 goto do_private;
1374 else if (!is_variable_sized (decl))
1375 install_var_local (decl, ctx);
1376 break;
1377
1378 case OMP_CLAUSE_SHARED:
1379 gcc_assert (is_taskreg_ctx (ctx));
1380 decl = OMP_CLAUSE_DECL (c);
1381 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1382 || !is_variable_sized (decl));
1383 /* Global variables don't need to be copied,
1384 the receiver side will use them directly. */
1385 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1386 break;
1387 by_ref = use_pointer_for_field (decl, ctx);
1388 if (! TREE_READONLY (decl)
1389 || TREE_ADDRESSABLE (decl)
1390 || by_ref
1391 || is_reference (decl))
1392 {
1393 install_var_field (decl, by_ref, 3, ctx);
1394 install_var_local (decl, ctx);
1395 break;
1396 }
1397 /* We don't need to copy const scalar vars back. */
1398 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1399 goto do_private;
1400
1401 case OMP_CLAUSE_LASTPRIVATE:
1402 /* Let the corresponding firstprivate clause create
1403 the variable. */
1404 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1405 break;
1406 /* FALLTHRU */
1407
1408 case OMP_CLAUSE_FIRSTPRIVATE:
1409 case OMP_CLAUSE_REDUCTION:
1410 decl = OMP_CLAUSE_DECL (c);
1411 do_private:
1412 if (is_variable_sized (decl))
1413 {
1414 if (is_task_ctx (ctx))
1415 install_var_field (decl, false, 1, ctx);
1416 break;
1417 }
1418 else if (is_taskreg_ctx (ctx))
1419 {
1420 bool global
1421 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1422 by_ref = use_pointer_for_field (decl, NULL);
1423
1424 if (is_task_ctx (ctx)
1425 && (global || by_ref || is_reference (decl)))
1426 {
1427 install_var_field (decl, false, 1, ctx);
1428 if (!global)
1429 install_var_field (decl, by_ref, 2, ctx);
1430 }
1431 else if (!global)
1432 install_var_field (decl, by_ref, 3, ctx);
1433 }
1434 install_var_local (decl, ctx);
1435 break;
1436
1437 case OMP_CLAUSE_COPYPRIVATE:
1438 case OMP_CLAUSE_COPYIN:
1439 decl = OMP_CLAUSE_DECL (c);
1440 by_ref = use_pointer_for_field (decl, NULL);
1441 install_var_field (decl, by_ref, 3, ctx);
1442 break;
1443
1444 case OMP_CLAUSE_DEFAULT:
1445 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1446 break;
1447
1448 case OMP_CLAUSE_IF:
1449 case OMP_CLAUSE_NUM_THREADS:
1450 case OMP_CLAUSE_SCHEDULE:
1451 if (ctx->outer)
1452 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1453 break;
1454
1455 case OMP_CLAUSE_NOWAIT:
1456 case OMP_CLAUSE_ORDERED:
1457 case OMP_CLAUSE_COLLAPSE:
1458 case OMP_CLAUSE_UNTIED:
1459 break;
1460
1461 default:
1462 gcc_unreachable ();
1463 }
1464 }
1465
1466 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1467 {
1468 switch (OMP_CLAUSE_CODE (c))
1469 {
1470 case OMP_CLAUSE_LASTPRIVATE:
1471 /* Let the corresponding firstprivate clause create
1472 the variable. */
1473 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1474 scan_array_reductions = true;
1475 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1476 break;
1477 /* FALLTHRU */
1478
1479 case OMP_CLAUSE_PRIVATE:
1480 case OMP_CLAUSE_FIRSTPRIVATE:
1481 case OMP_CLAUSE_REDUCTION:
1482 decl = OMP_CLAUSE_DECL (c);
1483 if (is_variable_sized (decl))
1484 install_var_local (decl, ctx);
1485 fixup_remapped_decl (decl, ctx,
1486 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1487 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1488 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1489 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1490 scan_array_reductions = true;
1491 break;
1492
1493 case OMP_CLAUSE_SHARED:
1494 decl = OMP_CLAUSE_DECL (c);
1495 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1496 fixup_remapped_decl (decl, ctx, false);
1497 break;
1498
1499 case OMP_CLAUSE_COPYPRIVATE:
1500 case OMP_CLAUSE_COPYIN:
1501 case OMP_CLAUSE_DEFAULT:
1502 case OMP_CLAUSE_IF:
1503 case OMP_CLAUSE_NUM_THREADS:
1504 case OMP_CLAUSE_SCHEDULE:
1505 case OMP_CLAUSE_NOWAIT:
1506 case OMP_CLAUSE_ORDERED:
1507 case OMP_CLAUSE_COLLAPSE:
1508 case OMP_CLAUSE_UNTIED:
1509 break;
1510
1511 default:
1512 gcc_unreachable ();
1513 }
1514 }
1515
1516 if (scan_array_reductions)
1517 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1518 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1519 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1520 {
1521 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1522 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1523 }
1524 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1525 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1526 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1527 }
1528
1529 /* Create a new name for omp child function. Returns an identifier. */
1530
1531 static GTY(()) unsigned int tmp_ompfn_id_num;
1532
1533 static tree
1534 create_omp_child_function_name (bool task_copy)
1535 {
1536 return (clone_function_name (current_function_decl,
1537 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1538 }
1539
1540 /* Build a decl for the omp child function. It'll not contain a body
1541 yet, just the bare decl. */
1542
1543 static void
1544 create_omp_child_function (omp_context *ctx, bool task_copy)
1545 {
1546 tree decl, type, name, t;
1547
1548 name = create_omp_child_function_name (task_copy);
1549 if (task_copy)
1550 type = build_function_type_list (void_type_node, ptr_type_node,
1551 ptr_type_node, NULL_TREE);
1552 else
1553 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1554
1555 decl = build_decl (gimple_location (ctx->stmt),
1556 FUNCTION_DECL, name, type);
1557
1558 if (!task_copy)
1559 ctx->cb.dst_fn = decl;
1560 else
1561 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1562
1563 TREE_STATIC (decl) = 1;
1564 TREE_USED (decl) = 1;
1565 DECL_ARTIFICIAL (decl) = 1;
1566 DECL_NAMELESS (decl) = 1;
1567 DECL_IGNORED_P (decl) = 0;
1568 TREE_PUBLIC (decl) = 0;
1569 DECL_UNINLINABLE (decl) = 1;
1570 DECL_EXTERNAL (decl) = 0;
1571 DECL_CONTEXT (decl) = NULL_TREE;
1572 DECL_INITIAL (decl) = make_node (BLOCK);
1573
1574 t = build_decl (DECL_SOURCE_LOCATION (decl),
1575 RESULT_DECL, NULL_TREE, void_type_node);
1576 DECL_ARTIFICIAL (t) = 1;
1577 DECL_IGNORED_P (t) = 1;
1578 DECL_CONTEXT (t) = decl;
1579 DECL_RESULT (decl) = t;
1580
1581 t = build_decl (DECL_SOURCE_LOCATION (decl),
1582 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1583 DECL_ARTIFICIAL (t) = 1;
1584 DECL_NAMELESS (t) = 1;
1585 DECL_ARG_TYPE (t) = ptr_type_node;
1586 DECL_CONTEXT (t) = current_function_decl;
1587 TREE_USED (t) = 1;
1588 DECL_ARGUMENTS (decl) = t;
1589 if (!task_copy)
1590 ctx->receiver_decl = t;
1591 else
1592 {
1593 t = build_decl (DECL_SOURCE_LOCATION (decl),
1594 PARM_DECL, get_identifier (".omp_data_o"),
1595 ptr_type_node);
1596 DECL_ARTIFICIAL (t) = 1;
1597 DECL_NAMELESS (t) = 1;
1598 DECL_ARG_TYPE (t) = ptr_type_node;
1599 DECL_CONTEXT (t) = current_function_decl;
1600 TREE_USED (t) = 1;
1601 TREE_ADDRESSABLE (t) = 1;
1602 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1603 DECL_ARGUMENTS (decl) = t;
1604 }
1605
1606 /* Allocate memory for the function structure. The call to
1607 allocate_struct_function clobbers CFUN, so we need to restore
1608 it afterward. */
1609 push_struct_function (decl);
1610 cfun->function_end_locus = gimple_location (ctx->stmt);
1611 pop_cfun ();
1612 }
1613
1614
1615 /* Scan an OpenMP parallel directive. */
1616
1617 static void
1618 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1619 {
1620 omp_context *ctx;
1621 tree name;
1622 gimple stmt = gsi_stmt (*gsi);
1623
1624 /* Ignore parallel directives with empty bodies, unless there
1625 are copyin clauses. */
1626 if (optimize > 0
1627 && empty_body_p (gimple_omp_body (stmt))
1628 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1629 OMP_CLAUSE_COPYIN) == NULL)
1630 {
1631 gsi_replace (gsi, gimple_build_nop (), false);
1632 return;
1633 }
1634
1635 ctx = new_omp_context (stmt, outer_ctx);
1636 if (taskreg_nesting_level > 1)
1637 ctx->is_nested = true;
1638 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1639 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1640 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1641 name = create_tmp_var_name (".omp_data_s");
1642 name = build_decl (gimple_location (stmt),
1643 TYPE_DECL, name, ctx->record_type);
1644 DECL_ARTIFICIAL (name) = 1;
1645 DECL_NAMELESS (name) = 1;
1646 TYPE_NAME (ctx->record_type) = name;
1647 create_omp_child_function (ctx, false);
1648 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1649
1650 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1651 scan_omp (gimple_omp_body (stmt), ctx);
1652
1653 if (TYPE_FIELDS (ctx->record_type) == NULL)
1654 ctx->record_type = ctx->receiver_decl = NULL;
1655 else
1656 {
1657 layout_type (ctx->record_type);
1658 fixup_child_record_type (ctx);
1659 }
1660 }
1661
1662 /* Scan an OpenMP task directive. */
1663
1664 static void
1665 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1666 {
1667 omp_context *ctx;
1668 tree name, t;
1669 gimple stmt = gsi_stmt (*gsi);
1670 location_t loc = gimple_location (stmt);
1671
1672 /* Ignore task directives with empty bodies. */
1673 if (optimize > 0
1674 && empty_body_p (gimple_omp_body (stmt)))
1675 {
1676 gsi_replace (gsi, gimple_build_nop (), false);
1677 return;
1678 }
1679
1680 ctx = new_omp_context (stmt, outer_ctx);
1681 if (taskreg_nesting_level > 1)
1682 ctx->is_nested = true;
1683 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1684 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1685 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1686 name = create_tmp_var_name (".omp_data_s");
1687 name = build_decl (gimple_location (stmt),
1688 TYPE_DECL, name, ctx->record_type);
1689 DECL_ARTIFICIAL (name) = 1;
1690 DECL_NAMELESS (name) = 1;
1691 TYPE_NAME (ctx->record_type) = name;
1692 create_omp_child_function (ctx, false);
1693 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1694
1695 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1696
1697 if (ctx->srecord_type)
1698 {
1699 name = create_tmp_var_name (".omp_data_a");
1700 name = build_decl (gimple_location (stmt),
1701 TYPE_DECL, name, ctx->srecord_type);
1702 DECL_ARTIFICIAL (name) = 1;
1703 DECL_NAMELESS (name) = 1;
1704 TYPE_NAME (ctx->srecord_type) = name;
1705 create_omp_child_function (ctx, true);
1706 }
1707
1708 scan_omp (gimple_omp_body (stmt), ctx);
1709
1710 if (TYPE_FIELDS (ctx->record_type) == NULL)
1711 {
1712 ctx->record_type = ctx->receiver_decl = NULL;
1713 t = build_int_cst (long_integer_type_node, 0);
1714 gimple_omp_task_set_arg_size (stmt, t);
1715 t = build_int_cst (long_integer_type_node, 1);
1716 gimple_omp_task_set_arg_align (stmt, t);
1717 }
1718 else
1719 {
1720 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1721 /* Move VLA fields to the end. */
1722 p = &TYPE_FIELDS (ctx->record_type);
1723 while (*p)
1724 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1725 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1726 {
1727 *q = *p;
1728 *p = TREE_CHAIN (*p);
1729 TREE_CHAIN (*q) = NULL_TREE;
1730 q = &TREE_CHAIN (*q);
1731 }
1732 else
1733 p = &DECL_CHAIN (*p);
1734 *p = vla_fields;
1735 layout_type (ctx->record_type);
1736 fixup_child_record_type (ctx);
1737 if (ctx->srecord_type)
1738 layout_type (ctx->srecord_type);
1739 t = fold_convert_loc (loc, long_integer_type_node,
1740 TYPE_SIZE_UNIT (ctx->record_type));
1741 gimple_omp_task_set_arg_size (stmt, t);
1742 t = build_int_cst (long_integer_type_node,
1743 TYPE_ALIGN_UNIT (ctx->record_type));
1744 gimple_omp_task_set_arg_align (stmt, t);
1745 }
1746 }
1747
1748
1749 /* Scan an OpenMP loop directive. */
1750
1751 static void
1752 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1753 {
1754 omp_context *ctx;
1755 size_t i;
1756
1757 ctx = new_omp_context (stmt, outer_ctx);
1758
1759 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1760
1761 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1762 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1763 {
1764 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1765 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1766 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1767 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1768 }
1769 scan_omp (gimple_omp_body (stmt), ctx);
1770 }
1771
1772 /* Scan an OpenMP sections directive. */
1773
1774 static void
1775 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1776 {
1777 omp_context *ctx;
1778
1779 ctx = new_omp_context (stmt, outer_ctx);
1780 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1781 scan_omp (gimple_omp_body (stmt), ctx);
1782 }
1783
1784 /* Scan an OpenMP single directive. */
1785
1786 static void
1787 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1788 {
1789 omp_context *ctx;
1790 tree name;
1791
1792 ctx = new_omp_context (stmt, outer_ctx);
1793 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1794 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1795 name = create_tmp_var_name (".omp_copy_s");
1796 name = build_decl (gimple_location (stmt),
1797 TYPE_DECL, name, ctx->record_type);
1798 TYPE_NAME (ctx->record_type) = name;
1799
1800 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1801 scan_omp (gimple_omp_body (stmt), ctx);
1802
1803 if (TYPE_FIELDS (ctx->record_type) == NULL)
1804 ctx->record_type = NULL;
1805 else
1806 layout_type (ctx->record_type);
1807 }
1808
1809
1810 /* Check OpenMP nesting restrictions. */
1811 static void
1812 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1813 {
1814 switch (gimple_code (stmt))
1815 {
1816 case GIMPLE_OMP_FOR:
1817 case GIMPLE_OMP_SECTIONS:
1818 case GIMPLE_OMP_SINGLE:
1819 case GIMPLE_CALL:
1820 for (; ctx != NULL; ctx = ctx->outer)
1821 switch (gimple_code (ctx->stmt))
1822 {
1823 case GIMPLE_OMP_FOR:
1824 case GIMPLE_OMP_SECTIONS:
1825 case GIMPLE_OMP_SINGLE:
1826 case GIMPLE_OMP_ORDERED:
1827 case GIMPLE_OMP_MASTER:
1828 case GIMPLE_OMP_TASK:
1829 if (is_gimple_call (stmt))
1830 {
1831 warning (0, "barrier region may not be closely nested inside "
1832 "of work-sharing, critical, ordered, master or "
1833 "explicit task region");
1834 return;
1835 }
1836 warning (0, "work-sharing region may not be closely nested inside "
1837 "of work-sharing, critical, ordered, master or explicit "
1838 "task region");
1839 return;
1840 case GIMPLE_OMP_PARALLEL:
1841 return;
1842 default:
1843 break;
1844 }
1845 break;
1846 case GIMPLE_OMP_MASTER:
1847 for (; ctx != NULL; ctx = ctx->outer)
1848 switch (gimple_code (ctx->stmt))
1849 {
1850 case GIMPLE_OMP_FOR:
1851 case GIMPLE_OMP_SECTIONS:
1852 case GIMPLE_OMP_SINGLE:
1853 case GIMPLE_OMP_TASK:
1854 warning (0, "master region may not be closely nested inside "
1855 "of work-sharing or explicit task region");
1856 return;
1857 case GIMPLE_OMP_PARALLEL:
1858 return;
1859 default:
1860 break;
1861 }
1862 break;
1863 case GIMPLE_OMP_ORDERED:
1864 for (; ctx != NULL; ctx = ctx->outer)
1865 switch (gimple_code (ctx->stmt))
1866 {
1867 case GIMPLE_OMP_CRITICAL:
1868 case GIMPLE_OMP_TASK:
1869 warning (0, "ordered region may not be closely nested inside "
1870 "of critical or explicit task region");
1871 return;
1872 case GIMPLE_OMP_FOR:
1873 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1874 OMP_CLAUSE_ORDERED) == NULL)
1875 warning (0, "ordered region must be closely nested inside "
1876 "a loop region with an ordered clause");
1877 return;
1878 case GIMPLE_OMP_PARALLEL:
1879 return;
1880 default:
1881 break;
1882 }
1883 break;
1884 case GIMPLE_OMP_CRITICAL:
1885 for (; ctx != NULL; ctx = ctx->outer)
1886 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1887 && (gimple_omp_critical_name (stmt)
1888 == gimple_omp_critical_name (ctx->stmt)))
1889 {
1890 warning (0, "critical region may not be nested inside a critical "
1891 "region with the same name");
1892 return;
1893 }
1894 break;
1895 default:
1896 break;
1897 }
1898 }
1899
1900
1901 /* Helper function scan_omp.
1902
1903 Callback for walk_tree or operators in walk_gimple_stmt used to
1904 scan for OpenMP directives in TP. */
1905
1906 static tree
1907 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1908 {
1909 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1910 omp_context *ctx = (omp_context *) wi->info;
1911 tree t = *tp;
1912
1913 switch (TREE_CODE (t))
1914 {
1915 case VAR_DECL:
1916 case PARM_DECL:
1917 case LABEL_DECL:
1918 case RESULT_DECL:
1919 if (ctx)
1920 *tp = remap_decl (t, &ctx->cb);
1921 break;
1922
1923 default:
1924 if (ctx && TYPE_P (t))
1925 *tp = remap_type (t, &ctx->cb);
1926 else if (!DECL_P (t))
1927 {
1928 *walk_subtrees = 1;
1929 if (ctx)
1930 {
1931 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1932 if (tem != TREE_TYPE (t))
1933 {
1934 if (TREE_CODE (t) == INTEGER_CST)
1935 *tp = build_int_cst_wide (tem,
1936 TREE_INT_CST_LOW (t),
1937 TREE_INT_CST_HIGH (t));
1938 else
1939 TREE_TYPE (t) = tem;
1940 }
1941 }
1942 }
1943 break;
1944 }
1945
1946 return NULL_TREE;
1947 }
1948
1949
1950 /* Helper function for scan_omp.
1951
1952 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1953 the current statement in GSI. */
1954
1955 static tree
1956 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1957 struct walk_stmt_info *wi)
1958 {
1959 gimple stmt = gsi_stmt (*gsi);
1960 omp_context *ctx = (omp_context *) wi->info;
1961
1962 if (gimple_has_location (stmt))
1963 input_location = gimple_location (stmt);
1964
1965 /* Check the OpenMP nesting restrictions. */
1966 if (ctx != NULL)
1967 {
1968 if (is_gimple_omp (stmt))
1969 check_omp_nesting_restrictions (stmt, ctx);
1970 else if (is_gimple_call (stmt))
1971 {
1972 tree fndecl = gimple_call_fndecl (stmt);
1973 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1974 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1975 check_omp_nesting_restrictions (stmt, ctx);
1976 }
1977 }
1978
1979 *handled_ops_p = true;
1980
1981 switch (gimple_code (stmt))
1982 {
1983 case GIMPLE_OMP_PARALLEL:
1984 taskreg_nesting_level++;
1985 scan_omp_parallel (gsi, ctx);
1986 taskreg_nesting_level--;
1987 break;
1988
1989 case GIMPLE_OMP_TASK:
1990 taskreg_nesting_level++;
1991 scan_omp_task (gsi, ctx);
1992 taskreg_nesting_level--;
1993 break;
1994
1995 case GIMPLE_OMP_FOR:
1996 scan_omp_for (stmt, ctx);
1997 break;
1998
1999 case GIMPLE_OMP_SECTIONS:
2000 scan_omp_sections (stmt, ctx);
2001 break;
2002
2003 case GIMPLE_OMP_SINGLE:
2004 scan_omp_single (stmt, ctx);
2005 break;
2006
2007 case GIMPLE_OMP_SECTION:
2008 case GIMPLE_OMP_MASTER:
2009 case GIMPLE_OMP_ORDERED:
2010 case GIMPLE_OMP_CRITICAL:
2011 ctx = new_omp_context (stmt, ctx);
2012 scan_omp (gimple_omp_body (stmt), ctx);
2013 break;
2014
2015 case GIMPLE_BIND:
2016 {
2017 tree var;
2018
2019 *handled_ops_p = false;
2020 if (ctx)
2021 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2022 insert_decl_map (&ctx->cb, var, var);
2023 }
2024 break;
2025 default:
2026 *handled_ops_p = false;
2027 break;
2028 }
2029
2030 return NULL_TREE;
2031 }
2032
2033
2034 /* Scan all the statements starting at the current statement. CTX
2035 contains context information about the OpenMP directives and
2036 clauses found during the scan. */
2037
2038 static void
2039 scan_omp (gimple_seq body, omp_context *ctx)
2040 {
2041 location_t saved_location;
2042 struct walk_stmt_info wi;
2043
2044 memset (&wi, 0, sizeof (wi));
2045 wi.info = ctx;
2046 wi.want_locations = true;
2047
2048 saved_location = input_location;
2049 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2050 input_location = saved_location;
2051 }
2052 \f
2053 /* Re-gimplification and code generation routines. */
2054
2055 /* Build a call to GOMP_barrier. */
2056
2057 static tree
2058 build_omp_barrier (void)
2059 {
2060 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2061 }
2062
2063 /* If a context was created for STMT when it was scanned, return it. */
2064
2065 static omp_context *
2066 maybe_lookup_ctx (gimple stmt)
2067 {
2068 splay_tree_node n;
2069 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2070 return n ? (omp_context *) n->value : NULL;
2071 }
2072
2073
2074 /* Find the mapping for DECL in CTX or the immediately enclosing
2075 context that has a mapping for DECL.
2076
2077 If CTX is a nested parallel directive, we may have to use the decl
2078 mappings created in CTX's parent context. Suppose that we have the
2079 following parallel nesting (variable UIDs showed for clarity):
2080
2081 iD.1562 = 0;
2082 #omp parallel shared(iD.1562) -> outer parallel
2083 iD.1562 = iD.1562 + 1;
2084
2085 #omp parallel shared (iD.1562) -> inner parallel
2086 iD.1562 = iD.1562 - 1;
2087
2088 Each parallel structure will create a distinct .omp_data_s structure
2089 for copying iD.1562 in/out of the directive:
2090
2091 outer parallel .omp_data_s.1.i -> iD.1562
2092 inner parallel .omp_data_s.2.i -> iD.1562
2093
2094 A shared variable mapping will produce a copy-out operation before
2095 the parallel directive and a copy-in operation after it. So, in
2096 this case we would have:
2097
2098 iD.1562 = 0;
2099 .omp_data_o.1.i = iD.1562;
2100 #omp parallel shared(iD.1562) -> outer parallel
2101 .omp_data_i.1 = &.omp_data_o.1
2102 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2103
2104 .omp_data_o.2.i = iD.1562; -> **
2105 #omp parallel shared(iD.1562) -> inner parallel
2106 .omp_data_i.2 = &.omp_data_o.2
2107 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2108
2109
2110 ** This is a problem. The symbol iD.1562 cannot be referenced
2111 inside the body of the outer parallel region. But since we are
2112 emitting this copy operation while expanding the inner parallel
2113 directive, we need to access the CTX structure of the outer
2114 parallel directive to get the correct mapping:
2115
2116 .omp_data_o.2.i = .omp_data_i.1->i
2117
2118 Since there may be other workshare or parallel directives enclosing
2119 the parallel directive, it may be necessary to walk up the context
2120 parent chain. This is not a problem in general because nested
2121 parallelism happens only rarely. */
2122
2123 static tree
2124 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2125 {
2126 tree t;
2127 omp_context *up;
2128
2129 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2130 t = maybe_lookup_decl (decl, up);
2131
2132 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2133
2134 return t ? t : decl;
2135 }
2136
2137
2138 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2139 in outer contexts. */
2140
2141 static tree
2142 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2143 {
2144 tree t = NULL;
2145 omp_context *up;
2146
2147 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2148 t = maybe_lookup_decl (decl, up);
2149
2150 return t ? t : decl;
2151 }
2152
2153
2154 /* Construct the initialization value for reduction CLAUSE. */
2155
2156 tree
2157 omp_reduction_init (tree clause, tree type)
2158 {
2159 location_t loc = OMP_CLAUSE_LOCATION (clause);
2160 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2161 {
2162 case PLUS_EXPR:
2163 case MINUS_EXPR:
2164 case BIT_IOR_EXPR:
2165 case BIT_XOR_EXPR:
2166 case TRUTH_OR_EXPR:
2167 case TRUTH_ORIF_EXPR:
2168 case TRUTH_XOR_EXPR:
2169 case NE_EXPR:
2170 return build_zero_cst (type);
2171
2172 case MULT_EXPR:
2173 case TRUTH_AND_EXPR:
2174 case TRUTH_ANDIF_EXPR:
2175 case EQ_EXPR:
2176 return fold_convert_loc (loc, type, integer_one_node);
2177
2178 case BIT_AND_EXPR:
2179 return fold_convert_loc (loc, type, integer_minus_one_node);
2180
2181 case MAX_EXPR:
2182 if (SCALAR_FLOAT_TYPE_P (type))
2183 {
2184 REAL_VALUE_TYPE max, min;
2185 if (HONOR_INFINITIES (TYPE_MODE (type)))
2186 {
2187 real_inf (&max);
2188 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2189 }
2190 else
2191 real_maxval (&min, 1, TYPE_MODE (type));
2192 return build_real (type, min);
2193 }
2194 else
2195 {
2196 gcc_assert (INTEGRAL_TYPE_P (type));
2197 return TYPE_MIN_VALUE (type);
2198 }
2199
2200 case MIN_EXPR:
2201 if (SCALAR_FLOAT_TYPE_P (type))
2202 {
2203 REAL_VALUE_TYPE max;
2204 if (HONOR_INFINITIES (TYPE_MODE (type)))
2205 real_inf (&max);
2206 else
2207 real_maxval (&max, 0, TYPE_MODE (type));
2208 return build_real (type, max);
2209 }
2210 else
2211 {
2212 gcc_assert (INTEGRAL_TYPE_P (type));
2213 return TYPE_MAX_VALUE (type);
2214 }
2215
2216 default:
2217 gcc_unreachable ();
2218 }
2219 }
2220
2221 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2222 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2223 private variables. Initialization statements go in ILIST, while calls
2224 to destructors go in DLIST. */
2225
2226 static void
2227 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2228 omp_context *ctx)
2229 {
2230 gimple_stmt_iterator diter;
2231 tree c, dtor, copyin_seq, x, ptr;
2232 bool copyin_by_ref = false;
2233 bool lastprivate_firstprivate = false;
2234 int pass;
2235
2236 *dlist = gimple_seq_alloc ();
2237 diter = gsi_start (*dlist);
2238 copyin_seq = NULL;
2239
2240 /* Do all the fixed sized types in the first pass, and the variable sized
2241 types in the second pass. This makes sure that the scalar arguments to
2242 the variable sized types are processed before we use them in the
2243 variable sized operations. */
2244 for (pass = 0; pass < 2; ++pass)
2245 {
2246 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2247 {
2248 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2249 tree var, new_var;
2250 bool by_ref;
2251 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2252
2253 switch (c_kind)
2254 {
2255 case OMP_CLAUSE_PRIVATE:
2256 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2257 continue;
2258 break;
2259 case OMP_CLAUSE_SHARED:
2260 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2261 {
2262 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2263 continue;
2264 }
2265 case OMP_CLAUSE_FIRSTPRIVATE:
2266 case OMP_CLAUSE_COPYIN:
2267 case OMP_CLAUSE_REDUCTION:
2268 break;
2269 case OMP_CLAUSE_LASTPRIVATE:
2270 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2271 {
2272 lastprivate_firstprivate = true;
2273 if (pass != 0)
2274 continue;
2275 }
2276 break;
2277 default:
2278 continue;
2279 }
2280
2281 new_var = var = OMP_CLAUSE_DECL (c);
2282 if (c_kind != OMP_CLAUSE_COPYIN)
2283 new_var = lookup_decl (var, ctx);
2284
2285 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2286 {
2287 if (pass != 0)
2288 continue;
2289 }
2290 else if (is_variable_sized (var))
2291 {
2292 /* For variable sized types, we need to allocate the
2293 actual storage here. Call alloca and store the
2294 result in the pointer decl that we created elsewhere. */
2295 if (pass == 0)
2296 continue;
2297
2298 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2299 {
2300 gimple stmt;
2301 tree tmp;
2302
2303 ptr = DECL_VALUE_EXPR (new_var);
2304 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2305 ptr = TREE_OPERAND (ptr, 0);
2306 gcc_assert (DECL_P (ptr));
2307 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2308
2309 /* void *tmp = __builtin_alloca */
2310 stmt
2311 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2312 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2313 gimple_add_tmp_var (tmp);
2314 gimple_call_set_lhs (stmt, tmp);
2315
2316 gimple_seq_add_stmt (ilist, stmt);
2317
2318 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2319 gimplify_assign (ptr, x, ilist);
2320 }
2321 }
2322 else if (is_reference (var))
2323 {
2324 /* For references that are being privatized for Fortran,
2325 allocate new backing storage for the new pointer
2326 variable. This allows us to avoid changing all the
2327 code that expects a pointer to something that expects
2328 a direct variable. Note that this doesn't apply to
2329 C++, since reference types are disallowed in data
2330 sharing clauses there, except for NRV optimized
2331 return values. */
2332 if (pass == 0)
2333 continue;
2334
2335 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2336 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2337 {
2338 x = build_receiver_ref (var, false, ctx);
2339 x = build_fold_addr_expr_loc (clause_loc, x);
2340 }
2341 else if (TREE_CONSTANT (x))
2342 {
2343 const char *name = NULL;
2344 if (DECL_NAME (var))
2345 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2346
2347 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2348 name);
2349 gimple_add_tmp_var (x);
2350 TREE_ADDRESSABLE (x) = 1;
2351 x = build_fold_addr_expr_loc (clause_loc, x);
2352 }
2353 else
2354 {
2355 x = build_call_expr_loc (clause_loc,
2356 built_in_decls[BUILT_IN_ALLOCA], 1, x);
2357 }
2358
2359 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2360 gimplify_assign (new_var, x, ilist);
2361
2362 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2363 }
2364 else if (c_kind == OMP_CLAUSE_REDUCTION
2365 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2366 {
2367 if (pass == 0)
2368 continue;
2369 }
2370 else if (pass != 0)
2371 continue;
2372
2373 switch (OMP_CLAUSE_CODE (c))
2374 {
2375 case OMP_CLAUSE_SHARED:
2376 /* Shared global vars are just accessed directly. */
2377 if (is_global_var (new_var))
2378 break;
2379 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2380 needs to be delayed until after fixup_child_record_type so
2381 that we get the correct type during the dereference. */
2382 by_ref = use_pointer_for_field (var, ctx);
2383 x = build_receiver_ref (var, by_ref, ctx);
2384 SET_DECL_VALUE_EXPR (new_var, x);
2385 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2386
2387 /* ??? If VAR is not passed by reference, and the variable
2388 hasn't been initialized yet, then we'll get a warning for
2389 the store into the omp_data_s structure. Ideally, we'd be
2390 able to notice this and not store anything at all, but
2391 we're generating code too early. Suppress the warning. */
2392 if (!by_ref)
2393 TREE_NO_WARNING (var) = 1;
2394 break;
2395
2396 case OMP_CLAUSE_LASTPRIVATE:
2397 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2398 break;
2399 /* FALLTHRU */
2400
2401 case OMP_CLAUSE_PRIVATE:
2402 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2403 x = build_outer_var_ref (var, ctx);
2404 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2405 {
2406 if (is_task_ctx (ctx))
2407 x = build_receiver_ref (var, false, ctx);
2408 else
2409 x = build_outer_var_ref (var, ctx);
2410 }
2411 else
2412 x = NULL;
2413 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2414 if (x)
2415 gimplify_and_add (x, ilist);
2416 /* FALLTHRU */
2417
2418 do_dtor:
2419 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2420 if (x)
2421 {
2422 gimple_seq tseq = NULL;
2423
2424 dtor = x;
2425 gimplify_stmt (&dtor, &tseq);
2426 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2427 }
2428 break;
2429
2430 case OMP_CLAUSE_FIRSTPRIVATE:
2431 if (is_task_ctx (ctx))
2432 {
2433 if (is_reference (var) || is_variable_sized (var))
2434 goto do_dtor;
2435 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2436 ctx))
2437 || use_pointer_for_field (var, NULL))
2438 {
2439 x = build_receiver_ref (var, false, ctx);
2440 SET_DECL_VALUE_EXPR (new_var, x);
2441 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2442 goto do_dtor;
2443 }
2444 }
2445 x = build_outer_var_ref (var, ctx);
2446 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2447 gimplify_and_add (x, ilist);
2448 goto do_dtor;
2449 break;
2450
2451 case OMP_CLAUSE_COPYIN:
2452 by_ref = use_pointer_for_field (var, NULL);
2453 x = build_receiver_ref (var, by_ref, ctx);
2454 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2455 append_to_statement_list (x, &copyin_seq);
2456 copyin_by_ref |= by_ref;
2457 break;
2458
2459 case OMP_CLAUSE_REDUCTION:
2460 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2461 {
2462 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2463 x = build_outer_var_ref (var, ctx);
2464
2465 if (is_reference (var))
2466 x = build_fold_addr_expr_loc (clause_loc, x);
2467 SET_DECL_VALUE_EXPR (placeholder, x);
2468 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2469 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2470 gimple_seq_add_seq (ilist,
2471 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2472 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2473 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2474 }
2475 else
2476 {
2477 x = omp_reduction_init (c, TREE_TYPE (new_var));
2478 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2479 gimplify_assign (new_var, x, ilist);
2480 }
2481 break;
2482
2483 default:
2484 gcc_unreachable ();
2485 }
2486 }
2487 }
2488
2489 /* The copyin sequence is not to be executed by the main thread, since
2490 that would result in self-copies. Perhaps not visible to scalars,
2491 but it certainly is to C++ operator=. */
2492 if (copyin_seq)
2493 {
2494 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2495 x = build2 (NE_EXPR, boolean_type_node, x,
2496 build_int_cst (TREE_TYPE (x), 0));
2497 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2498 gimplify_and_add (x, ilist);
2499 }
2500
2501 /* If any copyin variable is passed by reference, we must ensure the
2502 master thread doesn't modify it before it is copied over in all
2503 threads. Similarly for variables in both firstprivate and
2504 lastprivate clauses we need to ensure the lastprivate copying
2505 happens after firstprivate copying in all threads. */
2506 if (copyin_by_ref || lastprivate_firstprivate)
2507 gimplify_and_add (build_omp_barrier (), ilist);
2508 }
2509
2510
2511 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2512 both parallel and workshare constructs. PREDICATE may be NULL if it's
2513 always true. */
2514
2515 static void
2516 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2517 omp_context *ctx)
2518 {
2519 tree x, c, label = NULL;
2520 bool par_clauses = false;
2521
2522 /* Early exit if there are no lastprivate clauses. */
2523 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2524 if (clauses == NULL)
2525 {
2526 /* If this was a workshare clause, see if it had been combined
2527 with its parallel. In that case, look for the clauses on the
2528 parallel statement itself. */
2529 if (is_parallel_ctx (ctx))
2530 return;
2531
2532 ctx = ctx->outer;
2533 if (ctx == NULL || !is_parallel_ctx (ctx))
2534 return;
2535
2536 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2537 OMP_CLAUSE_LASTPRIVATE);
2538 if (clauses == NULL)
2539 return;
2540 par_clauses = true;
2541 }
2542
2543 if (predicate)
2544 {
2545 gimple stmt;
2546 tree label_true, arm1, arm2;
2547
2548 label = create_artificial_label (UNKNOWN_LOCATION);
2549 label_true = create_artificial_label (UNKNOWN_LOCATION);
2550 arm1 = TREE_OPERAND (predicate, 0);
2551 arm2 = TREE_OPERAND (predicate, 1);
2552 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2553 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2554 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2555 label_true, label);
2556 gimple_seq_add_stmt (stmt_list, stmt);
2557 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2558 }
2559
2560 for (c = clauses; c ;)
2561 {
2562 tree var, new_var;
2563 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2564
2565 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2566 {
2567 var = OMP_CLAUSE_DECL (c);
2568 new_var = lookup_decl (var, ctx);
2569
2570 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2571 {
2572 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2573 gimple_seq_add_seq (stmt_list,
2574 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2575 }
2576 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2577
2578 x = build_outer_var_ref (var, ctx);
2579 if (is_reference (var))
2580 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2581 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2582 gimplify_and_add (x, stmt_list);
2583 }
2584 c = OMP_CLAUSE_CHAIN (c);
2585 if (c == NULL && !par_clauses)
2586 {
2587 /* If this was a workshare clause, see if it had been combined
2588 with its parallel. In that case, continue looking for the
2589 clauses also on the parallel statement itself. */
2590 if (is_parallel_ctx (ctx))
2591 break;
2592
2593 ctx = ctx->outer;
2594 if (ctx == NULL || !is_parallel_ctx (ctx))
2595 break;
2596
2597 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2598 OMP_CLAUSE_LASTPRIVATE);
2599 par_clauses = true;
2600 }
2601 }
2602
2603 if (label)
2604 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2605 }
2606
2607
2608 /* Generate code to implement the REDUCTION clauses. */
2609
2610 static void
2611 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2612 {
2613 gimple_seq sub_seq = NULL;
2614 gimple stmt;
2615 tree x, c;
2616 int count = 0;
2617
2618 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2619 update in that case, otherwise use a lock. */
2620 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2621 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2622 {
2623 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2624 {
2625 /* Never use OMP_ATOMIC for array reductions. */
2626 count = -1;
2627 break;
2628 }
2629 count++;
2630 }
2631
2632 if (count == 0)
2633 return;
2634
2635 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2636 {
2637 tree var, ref, new_var;
2638 enum tree_code code;
2639 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2640
2641 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2642 continue;
2643
2644 var = OMP_CLAUSE_DECL (c);
2645 new_var = lookup_decl (var, ctx);
2646 if (is_reference (var))
2647 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2648 ref = build_outer_var_ref (var, ctx);
2649 code = OMP_CLAUSE_REDUCTION_CODE (c);
2650
2651 /* reduction(-:var) sums up the partial results, so it acts
2652 identically to reduction(+:var). */
2653 if (code == MINUS_EXPR)
2654 code = PLUS_EXPR;
2655
2656 if (count == 1)
2657 {
2658 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2659
2660 addr = save_expr (addr);
2661 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2662 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2663 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2664 gimplify_and_add (x, stmt_seqp);
2665 return;
2666 }
2667
2668 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2669 {
2670 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2671
2672 if (is_reference (var))
2673 ref = build_fold_addr_expr_loc (clause_loc, ref);
2674 SET_DECL_VALUE_EXPR (placeholder, ref);
2675 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2676 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2677 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2678 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2679 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2680 }
2681 else
2682 {
2683 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2684 ref = build_outer_var_ref (var, ctx);
2685 gimplify_assign (ref, x, &sub_seq);
2686 }
2687 }
2688
2689 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2690 gimple_seq_add_stmt (stmt_seqp, stmt);
2691
2692 gimple_seq_add_seq (stmt_seqp, sub_seq);
2693
2694 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2695 gimple_seq_add_stmt (stmt_seqp, stmt);
2696 }
2697
2698
2699 /* Generate code to implement the COPYPRIVATE clauses. */
2700
2701 static void
2702 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2703 omp_context *ctx)
2704 {
2705 tree c;
2706
2707 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2708 {
2709 tree var, new_var, ref, x;
2710 bool by_ref;
2711 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2712
2713 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2714 continue;
2715
2716 var = OMP_CLAUSE_DECL (c);
2717 by_ref = use_pointer_for_field (var, NULL);
2718
2719 ref = build_sender_ref (var, ctx);
2720 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2721 if (by_ref)
2722 {
2723 x = build_fold_addr_expr_loc (clause_loc, new_var);
2724 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2725 }
2726 gimplify_assign (ref, x, slist);
2727
2728 ref = build_receiver_ref (var, false, ctx);
2729 if (by_ref)
2730 {
2731 ref = fold_convert_loc (clause_loc,
2732 build_pointer_type (TREE_TYPE (new_var)),
2733 ref);
2734 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2735 }
2736 if (is_reference (var))
2737 {
2738 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2739 ref = build_simple_mem_ref_loc (clause_loc, ref);
2740 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2741 }
2742 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2743 gimplify_and_add (x, rlist);
2744 }
2745 }
2746
2747
2748 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2749 and REDUCTION from the sender (aka parent) side. */
2750
2751 static void
2752 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2753 omp_context *ctx)
2754 {
2755 tree c;
2756
2757 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2758 {
2759 tree val, ref, x, var;
2760 bool by_ref, do_in = false, do_out = false;
2761 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2762
2763 switch (OMP_CLAUSE_CODE (c))
2764 {
2765 case OMP_CLAUSE_PRIVATE:
2766 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2767 break;
2768 continue;
2769 case OMP_CLAUSE_FIRSTPRIVATE:
2770 case OMP_CLAUSE_COPYIN:
2771 case OMP_CLAUSE_LASTPRIVATE:
2772 case OMP_CLAUSE_REDUCTION:
2773 break;
2774 default:
2775 continue;
2776 }
2777
2778 val = OMP_CLAUSE_DECL (c);
2779 var = lookup_decl_in_outer_ctx (val, ctx);
2780
2781 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2782 && is_global_var (var))
2783 continue;
2784 if (is_variable_sized (val))
2785 continue;
2786 by_ref = use_pointer_for_field (val, NULL);
2787
2788 switch (OMP_CLAUSE_CODE (c))
2789 {
2790 case OMP_CLAUSE_PRIVATE:
2791 case OMP_CLAUSE_FIRSTPRIVATE:
2792 case OMP_CLAUSE_COPYIN:
2793 do_in = true;
2794 break;
2795
2796 case OMP_CLAUSE_LASTPRIVATE:
2797 if (by_ref || is_reference (val))
2798 {
2799 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2800 continue;
2801 do_in = true;
2802 }
2803 else
2804 {
2805 do_out = true;
2806 if (lang_hooks.decls.omp_private_outer_ref (val))
2807 do_in = true;
2808 }
2809 break;
2810
2811 case OMP_CLAUSE_REDUCTION:
2812 do_in = true;
2813 do_out = !(by_ref || is_reference (val));
2814 break;
2815
2816 default:
2817 gcc_unreachable ();
2818 }
2819
2820 if (do_in)
2821 {
2822 ref = build_sender_ref (val, ctx);
2823 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2824 gimplify_assign (ref, x, ilist);
2825 if (is_task_ctx (ctx))
2826 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2827 }
2828
2829 if (do_out)
2830 {
2831 ref = build_sender_ref (val, ctx);
2832 gimplify_assign (var, ref, olist);
2833 }
2834 }
2835 }
2836
2837 /* Generate code to implement SHARED from the sender (aka parent)
2838 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2839 list things that got automatically shared. */
2840
2841 static void
2842 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2843 {
2844 tree var, ovar, nvar, f, x, record_type;
2845
2846 if (ctx->record_type == NULL)
2847 return;
2848
2849 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2850 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2851 {
2852 ovar = DECL_ABSTRACT_ORIGIN (f);
2853 nvar = maybe_lookup_decl (ovar, ctx);
2854 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2855 continue;
2856
2857 /* If CTX is a nested parallel directive. Find the immediately
2858 enclosing parallel or workshare construct that contains a
2859 mapping for OVAR. */
2860 var = lookup_decl_in_outer_ctx (ovar, ctx);
2861
2862 if (use_pointer_for_field (ovar, ctx))
2863 {
2864 x = build_sender_ref (ovar, ctx);
2865 var = build_fold_addr_expr (var);
2866 gimplify_assign (x, var, ilist);
2867 }
2868 else
2869 {
2870 x = build_sender_ref (ovar, ctx);
2871 gimplify_assign (x, var, ilist);
2872
2873 if (!TREE_READONLY (var)
2874 /* We don't need to receive a new reference to a result
2875 or parm decl. In fact we may not store to it as we will
2876 invalidate any pending RSO and generate wrong gimple
2877 during inlining. */
2878 && !((TREE_CODE (var) == RESULT_DECL
2879 || TREE_CODE (var) == PARM_DECL)
2880 && DECL_BY_REFERENCE (var)))
2881 {
2882 x = build_sender_ref (ovar, ctx);
2883 gimplify_assign (var, x, olist);
2884 }
2885 }
2886 }
2887 }
2888
2889
2890 /* A convenience function to build an empty GIMPLE_COND with just the
2891 condition. */
2892
2893 static gimple
2894 gimple_build_cond_empty (tree cond)
2895 {
2896 enum tree_code pred_code;
2897 tree lhs, rhs;
2898
2899 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2900 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2901 }
2902
2903
2904 /* Build the function calls to GOMP_parallel_start etc to actually
2905 generate the parallel operation. REGION is the parallel region
2906 being expanded. BB is the block where to insert the code. WS_ARGS
2907 will be set if this is a call to a combined parallel+workshare
2908 construct, it contains the list of additional arguments needed by
2909 the workshare construct. */
2910
2911 static void
2912 expand_parallel_call (struct omp_region *region, basic_block bb,
2913 gimple entry_stmt, VEC(tree,gc) *ws_args)
2914 {
2915 tree t, t1, t2, val, cond, c, clauses;
2916 gimple_stmt_iterator gsi;
2917 gimple stmt;
2918 int start_ix;
2919 location_t clause_loc;
2920 VEC(tree,gc) *args;
2921
2922 clauses = gimple_omp_parallel_clauses (entry_stmt);
2923
2924 /* Determine what flavor of GOMP_parallel_start we will be
2925 emitting. */
2926 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2927 if (is_combined_parallel (region))
2928 {
2929 switch (region->inner->type)
2930 {
2931 case GIMPLE_OMP_FOR:
2932 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2933 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2934 + (region->inner->sched_kind
2935 == OMP_CLAUSE_SCHEDULE_RUNTIME
2936 ? 3 : region->inner->sched_kind);
2937 break;
2938 case GIMPLE_OMP_SECTIONS:
2939 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2940 break;
2941 default:
2942 gcc_unreachable ();
2943 }
2944 }
2945
2946 /* By default, the value of NUM_THREADS is zero (selected at run time)
2947 and there is no conditional. */
2948 cond = NULL_TREE;
2949 val = build_int_cst (unsigned_type_node, 0);
2950
2951 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2952 if (c)
2953 cond = OMP_CLAUSE_IF_EXPR (c);
2954
2955 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2956 if (c)
2957 {
2958 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2959 clause_loc = OMP_CLAUSE_LOCATION (c);
2960 }
2961 else
2962 clause_loc = gimple_location (entry_stmt);
2963
2964 /* Ensure 'val' is of the correct type. */
2965 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2966
2967 /* If we found the clause 'if (cond)', build either
2968 (cond != 0) or (cond ? val : 1u). */
2969 if (cond)
2970 {
2971 gimple_stmt_iterator gsi;
2972
2973 cond = gimple_boolify (cond);
2974
2975 if (integer_zerop (val))
2976 val = fold_build2_loc (clause_loc,
2977 EQ_EXPR, unsigned_type_node, cond,
2978 build_int_cst (TREE_TYPE (cond), 0));
2979 else
2980 {
2981 basic_block cond_bb, then_bb, else_bb;
2982 edge e, e_then, e_else;
2983 tree tmp_then, tmp_else, tmp_join, tmp_var;
2984
2985 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2986 if (gimple_in_ssa_p (cfun))
2987 {
2988 tmp_then = make_ssa_name (tmp_var, NULL);
2989 tmp_else = make_ssa_name (tmp_var, NULL);
2990 tmp_join = make_ssa_name (tmp_var, NULL);
2991 }
2992 else
2993 {
2994 tmp_then = tmp_var;
2995 tmp_else = tmp_var;
2996 tmp_join = tmp_var;
2997 }
2998
2999 e = split_block (bb, NULL);
3000 cond_bb = e->src;
3001 bb = e->dest;
3002 remove_edge (e);
3003
3004 then_bb = create_empty_bb (cond_bb);
3005 else_bb = create_empty_bb (then_bb);
3006 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3007 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3008
3009 stmt = gimple_build_cond_empty (cond);
3010 gsi = gsi_start_bb (cond_bb);
3011 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3012
3013 gsi = gsi_start_bb (then_bb);
3014 stmt = gimple_build_assign (tmp_then, val);
3015 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3016
3017 gsi = gsi_start_bb (else_bb);
3018 stmt = gimple_build_assign
3019 (tmp_else, build_int_cst (unsigned_type_node, 1));
3020 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3021
3022 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3023 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3024 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3025 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3026
3027 if (gimple_in_ssa_p (cfun))
3028 {
3029 gimple phi = create_phi_node (tmp_join, bb);
3030 SSA_NAME_DEF_STMT (tmp_join) = phi;
3031 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3032 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3033 }
3034
3035 val = tmp_join;
3036 }
3037
3038 gsi = gsi_start_bb (bb);
3039 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3040 false, GSI_CONTINUE_LINKING);
3041 }
3042
3043 gsi = gsi_last_bb (bb);
3044 t = gimple_omp_parallel_data_arg (entry_stmt);
3045 if (t == NULL)
3046 t1 = null_pointer_node;
3047 else
3048 t1 = build_fold_addr_expr (t);
3049 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3050
3051 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3052 VEC_quick_push (tree, args, t2);
3053 VEC_quick_push (tree, args, t1);
3054 VEC_quick_push (tree, args, val);
3055 VEC_splice (tree, args, ws_args);
3056
3057 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3058 built_in_decls[start_ix], args);
3059
3060 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3061 false, GSI_CONTINUE_LINKING);
3062
3063 t = gimple_omp_parallel_data_arg (entry_stmt);
3064 if (t == NULL)
3065 t = null_pointer_node;
3066 else
3067 t = build_fold_addr_expr (t);
3068 t = build_call_expr_loc (gimple_location (entry_stmt),
3069 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3070 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3071 false, GSI_CONTINUE_LINKING);
3072
3073 t = build_call_expr_loc (gimple_location (entry_stmt),
3074 built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3075 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3076 false, GSI_CONTINUE_LINKING);
3077 }
3078
3079
3080 /* Build the function call to GOMP_task to actually
3081 generate the task operation. BB is the block where to insert the code. */
3082
3083 static void
3084 expand_task_call (basic_block bb, gimple entry_stmt)
3085 {
3086 tree t, t1, t2, t3, flags, cond, c, clauses;
3087 gimple_stmt_iterator gsi;
3088 location_t loc = gimple_location (entry_stmt);
3089
3090 clauses = gimple_omp_task_clauses (entry_stmt);
3091
3092 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3093 if (c)
3094 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3095 else
3096 cond = boolean_true_node;
3097
3098 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3099 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3100
3101 gsi = gsi_last_bb (bb);
3102 t = gimple_omp_task_data_arg (entry_stmt);
3103 if (t == NULL)
3104 t2 = null_pointer_node;
3105 else
3106 t2 = build_fold_addr_expr_loc (loc, t);
3107 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3108 t = gimple_omp_task_copy_fn (entry_stmt);
3109 if (t == NULL)
3110 t3 = null_pointer_node;
3111 else
3112 t3 = build_fold_addr_expr_loc (loc, t);
3113
3114 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3115 gimple_omp_task_arg_size (entry_stmt),
3116 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3117
3118 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3119 false, GSI_CONTINUE_LINKING);
3120 }
3121
3122
3123 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3124 catch handler and return it. This prevents programs from violating the
3125 structured block semantics with throws. */
3126
3127 static gimple_seq
3128 maybe_catch_exception (gimple_seq body)
3129 {
3130 gimple g;
3131 tree decl;
3132
3133 if (!flag_exceptions)
3134 return body;
3135
3136 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3137 decl = lang_hooks.eh_protect_cleanup_actions ();
3138 else
3139 decl = built_in_decls[BUILT_IN_TRAP];
3140
3141 g = gimple_build_eh_must_not_throw (decl);
3142 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3143 GIMPLE_TRY_CATCH);
3144
3145 return gimple_seq_alloc_with_stmt (g);
3146 }
3147
3148 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3149
3150 static tree
3151 vec2chain (VEC(tree,gc) *v)
3152 {
3153 tree chain = NULL_TREE, t;
3154 unsigned ix;
3155
3156 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
3157 {
3158 DECL_CHAIN (t) = chain;
3159 chain = t;
3160 }
3161
3162 return chain;
3163 }
3164
3165
3166 /* Remove barriers in REGION->EXIT's block. Note that this is only
3167 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3168 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3169 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3170 removed. */
3171
3172 static void
3173 remove_exit_barrier (struct omp_region *region)
3174 {
3175 gimple_stmt_iterator gsi;
3176 basic_block exit_bb;
3177 edge_iterator ei;
3178 edge e;
3179 gimple stmt;
3180 int any_addressable_vars = -1;
3181
3182 exit_bb = region->exit;
3183
3184 /* If the parallel region doesn't return, we don't have REGION->EXIT
3185 block at all. */
3186 if (! exit_bb)
3187 return;
3188
3189 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3190 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3191 statements that can appear in between are extremely limited -- no
3192 memory operations at all. Here, we allow nothing at all, so the
3193 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3194 gsi = gsi_last_bb (exit_bb);
3195 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3196 gsi_prev (&gsi);
3197 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3198 return;
3199
3200 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3201 {
3202 gsi = gsi_last_bb (e->src);
3203 if (gsi_end_p (gsi))
3204 continue;
3205 stmt = gsi_stmt (gsi);
3206 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3207 && !gimple_omp_return_nowait_p (stmt))
3208 {
3209 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3210 in many cases. If there could be tasks queued, the barrier
3211 might be needed to let the tasks run before some local
3212 variable of the parallel that the task uses as shared
3213 runs out of scope. The task can be spawned either
3214 from within current function (this would be easy to check)
3215 or from some function it calls and gets passed an address
3216 of such a variable. */
3217 if (any_addressable_vars < 0)
3218 {
3219 gimple parallel_stmt = last_stmt (region->entry);
3220 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3221 tree local_decls, block, decl;
3222 unsigned ix;
3223
3224 any_addressable_vars = 0;
3225 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3226 if (TREE_ADDRESSABLE (decl))
3227 {
3228 any_addressable_vars = 1;
3229 break;
3230 }
3231 for (block = gimple_block (stmt);
3232 !any_addressable_vars
3233 && block
3234 && TREE_CODE (block) == BLOCK;
3235 block = BLOCK_SUPERCONTEXT (block))
3236 {
3237 for (local_decls = BLOCK_VARS (block);
3238 local_decls;
3239 local_decls = DECL_CHAIN (local_decls))
3240 if (TREE_ADDRESSABLE (local_decls))
3241 {
3242 any_addressable_vars = 1;
3243 break;
3244 }
3245 if (block == gimple_block (parallel_stmt))
3246 break;
3247 }
3248 }
3249 if (!any_addressable_vars)
3250 gimple_omp_return_set_nowait (stmt);
3251 }
3252 }
3253 }
3254
3255 static void
3256 remove_exit_barriers (struct omp_region *region)
3257 {
3258 if (region->type == GIMPLE_OMP_PARALLEL)
3259 remove_exit_barrier (region);
3260
3261 if (region->inner)
3262 {
3263 region = region->inner;
3264 remove_exit_barriers (region);
3265 while (region->next)
3266 {
3267 region = region->next;
3268 remove_exit_barriers (region);
3269 }
3270 }
3271 }
3272
3273 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3274 calls. These can't be declared as const functions, but
3275 within one parallel body they are constant, so they can be
3276 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3277 which are declared const. Similarly for task body, except
3278 that in untied task omp_get_thread_num () can change at any task
3279 scheduling point. */
3280
3281 static void
3282 optimize_omp_library_calls (gimple entry_stmt)
3283 {
3284 basic_block bb;
3285 gimple_stmt_iterator gsi;
3286 tree thr_num_id
3287 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3288 tree num_thr_id
3289 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3290 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3291 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3292 OMP_CLAUSE_UNTIED) != NULL);
3293
3294 FOR_EACH_BB (bb)
3295 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3296 {
3297 gimple call = gsi_stmt (gsi);
3298 tree decl;
3299
3300 if (is_gimple_call (call)
3301 && (decl = gimple_call_fndecl (call))
3302 && DECL_EXTERNAL (decl)
3303 && TREE_PUBLIC (decl)
3304 && DECL_INITIAL (decl) == NULL)
3305 {
3306 tree built_in;
3307
3308 if (DECL_NAME (decl) == thr_num_id)
3309 {
3310 /* In #pragma omp task untied omp_get_thread_num () can change
3311 during the execution of the task region. */
3312 if (untied_task)
3313 continue;
3314 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3315 }
3316 else if (DECL_NAME (decl) == num_thr_id)
3317 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3318 else
3319 continue;
3320
3321 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3322 || gimple_call_num_args (call) != 0)
3323 continue;
3324
3325 if (flag_exceptions && !TREE_NOTHROW (decl))
3326 continue;
3327
3328 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3329 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3330 TREE_TYPE (TREE_TYPE (built_in))))
3331 continue;
3332
3333 gimple_call_set_fndecl (call, built_in);
3334 }
3335 }
3336 }
3337
3338 /* Expand the OpenMP parallel or task directive starting at REGION. */
3339
3340 static void
3341 expand_omp_taskreg (struct omp_region *region)
3342 {
3343 basic_block entry_bb, exit_bb, new_bb;
3344 struct function *child_cfun;
3345 tree child_fn, block, t;
3346 tree save_current;
3347 gimple_stmt_iterator gsi;
3348 gimple entry_stmt, stmt;
3349 edge e;
3350 VEC(tree,gc) *ws_args;
3351
3352 entry_stmt = last_stmt (region->entry);
3353 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3354 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3355 /* If this function has been already instrumented, make sure
3356 the child function isn't instrumented again. */
3357 child_cfun->after_tree_profile = cfun->after_tree_profile;
3358
3359 entry_bb = region->entry;
3360 exit_bb = region->exit;
3361
3362 if (is_combined_parallel (region))
3363 ws_args = region->ws_args;
3364 else
3365 ws_args = NULL;
3366
3367 if (child_cfun->cfg)
3368 {
3369 /* Due to inlining, it may happen that we have already outlined
3370 the region, in which case all we need to do is make the
3371 sub-graph unreachable and emit the parallel call. */
3372 edge entry_succ_e, exit_succ_e;
3373 gimple_stmt_iterator gsi;
3374
3375 entry_succ_e = single_succ_edge (entry_bb);
3376
3377 gsi = gsi_last_bb (entry_bb);
3378 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3379 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3380 gsi_remove (&gsi, true);
3381
3382 new_bb = entry_bb;
3383 if (exit_bb)
3384 {
3385 exit_succ_e = single_succ_edge (exit_bb);
3386 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3387 }
3388 remove_edge_and_dominated_blocks (entry_succ_e);
3389 }
3390 else
3391 {
3392 unsigned srcidx, dstidx, num;
3393
3394 /* If the parallel region needs data sent from the parent
3395 function, then the very first statement (except possible
3396 tree profile counter updates) of the parallel body
3397 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3398 &.OMP_DATA_O is passed as an argument to the child function,
3399 we need to replace it with the argument as seen by the child
3400 function.
3401
3402 In most cases, this will end up being the identity assignment
3403 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3404 a function call that has been inlined, the original PARM_DECL
3405 .OMP_DATA_I may have been converted into a different local
3406 variable. In which case, we need to keep the assignment. */
3407 if (gimple_omp_taskreg_data_arg (entry_stmt))
3408 {
3409 basic_block entry_succ_bb = single_succ (entry_bb);
3410 gimple_stmt_iterator gsi;
3411 tree arg, narg;
3412 gimple parcopy_stmt = NULL;
3413
3414 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3415 {
3416 gimple stmt;
3417
3418 gcc_assert (!gsi_end_p (gsi));
3419 stmt = gsi_stmt (gsi);
3420 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3421 continue;
3422
3423 if (gimple_num_ops (stmt) == 2)
3424 {
3425 tree arg = gimple_assign_rhs1 (stmt);
3426
3427 /* We're ignore the subcode because we're
3428 effectively doing a STRIP_NOPS. */
3429
3430 if (TREE_CODE (arg) == ADDR_EXPR
3431 && TREE_OPERAND (arg, 0)
3432 == gimple_omp_taskreg_data_arg (entry_stmt))
3433 {
3434 parcopy_stmt = stmt;
3435 break;
3436 }
3437 }
3438 }
3439
3440 gcc_assert (parcopy_stmt != NULL);
3441 arg = DECL_ARGUMENTS (child_fn);
3442
3443 if (!gimple_in_ssa_p (cfun))
3444 {
3445 if (gimple_assign_lhs (parcopy_stmt) == arg)
3446 gsi_remove (&gsi, true);
3447 else
3448 {
3449 /* ?? Is setting the subcode really necessary ?? */
3450 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3451 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3452 }
3453 }
3454 else
3455 {
3456 /* If we are in ssa form, we must load the value from the default
3457 definition of the argument. That should not be defined now,
3458 since the argument is not used uninitialized. */
3459 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3460 narg = make_ssa_name (arg, gimple_build_nop ());
3461 set_default_def (arg, narg);
3462 /* ?? Is setting the subcode really necessary ?? */
3463 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3464 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3465 update_stmt (parcopy_stmt);
3466 }
3467 }
3468
3469 /* Declare local variables needed in CHILD_CFUN. */
3470 block = DECL_INITIAL (child_fn);
3471 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3472 /* The gimplifier could record temporaries in parallel/task block
3473 rather than in containing function's local_decls chain,
3474 which would mean cgraph missed finalizing them. Do it now. */
3475 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3476 if (TREE_CODE (t) == VAR_DECL
3477 && TREE_STATIC (t)
3478 && !DECL_EXTERNAL (t))
3479 varpool_finalize_decl (t);
3480 DECL_SAVED_TREE (child_fn) = NULL;
3481 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3482 TREE_USED (block) = 1;
3483
3484 /* Reset DECL_CONTEXT on function arguments. */
3485 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3486 DECL_CONTEXT (t) = child_fn;
3487
3488 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3489 so that it can be moved to the child function. */
3490 gsi = gsi_last_bb (entry_bb);
3491 stmt = gsi_stmt (gsi);
3492 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3493 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3494 gsi_remove (&gsi, true);
3495 e = split_block (entry_bb, stmt);
3496 entry_bb = e->dest;
3497 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3498
3499 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3500 if (exit_bb)
3501 {
3502 gsi = gsi_last_bb (exit_bb);
3503 gcc_assert (!gsi_end_p (gsi)
3504 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3505 stmt = gimple_build_return (NULL);
3506 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3507 gsi_remove (&gsi, true);
3508 }
3509
3510 /* Move the parallel region into CHILD_CFUN. */
3511
3512 if (gimple_in_ssa_p (cfun))
3513 {
3514 push_cfun (child_cfun);
3515 init_tree_ssa (child_cfun);
3516 init_ssa_operands ();
3517 cfun->gimple_df->in_ssa_p = true;
3518 pop_cfun ();
3519 block = NULL_TREE;
3520 }
3521 else
3522 block = gimple_block (entry_stmt);
3523
3524 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3525 if (exit_bb)
3526 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3527
3528 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3529 num = VEC_length (tree, child_cfun->local_decls);
3530 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3531 {
3532 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3533 if (DECL_CONTEXT (t) == cfun->decl)
3534 continue;
3535 if (srcidx != dstidx)
3536 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3537 dstidx++;
3538 }
3539 if (dstidx != num)
3540 VEC_truncate (tree, child_cfun->local_decls, dstidx);
3541
3542 /* Inform the callgraph about the new function. */
3543 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3544 = cfun->curr_properties;
3545 cgraph_add_new_function (child_fn, true);
3546
3547 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3548 fixed in a following pass. */
3549 push_cfun (child_cfun);
3550 save_current = current_function_decl;
3551 current_function_decl = child_fn;
3552 if (optimize)
3553 optimize_omp_library_calls (entry_stmt);
3554 rebuild_cgraph_edges ();
3555
3556 /* Some EH regions might become dead, see PR34608. If
3557 pass_cleanup_cfg isn't the first pass to happen with the
3558 new child, these dead EH edges might cause problems.
3559 Clean them up now. */
3560 if (flag_exceptions)
3561 {
3562 basic_block bb;
3563 bool changed = false;
3564
3565 FOR_EACH_BB (bb)
3566 changed |= gimple_purge_dead_eh_edges (bb);
3567 if (changed)
3568 cleanup_tree_cfg ();
3569 }
3570 if (gimple_in_ssa_p (cfun))
3571 update_ssa (TODO_update_ssa);
3572 current_function_decl = save_current;
3573 pop_cfun ();
3574 }
3575
3576 /* Emit a library call to launch the children threads. */
3577 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3578 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3579 else
3580 expand_task_call (new_bb, entry_stmt);
3581 update_ssa (TODO_update_ssa_only_virtuals);
3582 }
3583
3584
3585 /* A subroutine of expand_omp_for. Generate code for a parallel
3586 loop with any schedule. Given parameters:
3587
3588 for (V = N1; V cond N2; V += STEP) BODY;
3589
3590 where COND is "<" or ">", we generate pseudocode
3591
3592 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3593 if (more) goto L0; else goto L3;
3594 L0:
3595 V = istart0;
3596 iend = iend0;
3597 L1:
3598 BODY;
3599 V += STEP;
3600 if (V cond iend) goto L1; else goto L2;
3601 L2:
3602 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3603 L3:
3604
3605 If this is a combined omp parallel loop, instead of the call to
3606 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3607
3608 For collapsed loops, given parameters:
3609 collapse(3)
3610 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3611 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3612 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3613 BODY;
3614
3615 we generate pseudocode
3616
3617 if (cond3 is <)
3618 adj = STEP3 - 1;
3619 else
3620 adj = STEP3 + 1;
3621 count3 = (adj + N32 - N31) / STEP3;
3622 if (cond2 is <)
3623 adj = STEP2 - 1;
3624 else
3625 adj = STEP2 + 1;
3626 count2 = (adj + N22 - N21) / STEP2;
3627 if (cond1 is <)
3628 adj = STEP1 - 1;
3629 else
3630 adj = STEP1 + 1;
3631 count1 = (adj + N12 - N11) / STEP1;
3632 count = count1 * count2 * count3;
3633 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3634 if (more) goto L0; else goto L3;
3635 L0:
3636 V = istart0;
3637 T = V;
3638 V3 = N31 + (T % count3) * STEP3;
3639 T = T / count3;
3640 V2 = N21 + (T % count2) * STEP2;
3641 T = T / count2;
3642 V1 = N11 + T * STEP1;
3643 iend = iend0;
3644 L1:
3645 BODY;
3646 V += 1;
3647 if (V < iend) goto L10; else goto L2;
3648 L10:
3649 V3 += STEP3;
3650 if (V3 cond3 N32) goto L1; else goto L11;
3651 L11:
3652 V3 = N31;
3653 V2 += STEP2;
3654 if (V2 cond2 N22) goto L1; else goto L12;
3655 L12:
3656 V2 = N21;
3657 V1 += STEP1;
3658 goto L1;
3659 L2:
3660 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3661 L3:
3662
3663 */
3664
3665 static void
3666 expand_omp_for_generic (struct omp_region *region,
3667 struct omp_for_data *fd,
3668 enum built_in_function start_fn,
3669 enum built_in_function next_fn)
3670 {
3671 tree type, istart0, iend0, iend;
3672 tree t, vmain, vback, bias = NULL_TREE;
3673 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3674 basic_block l2_bb = NULL, l3_bb = NULL;
3675 gimple_stmt_iterator gsi;
3676 gimple stmt;
3677 bool in_combined_parallel = is_combined_parallel (region);
3678 bool broken_loop = region->cont == NULL;
3679 edge e, ne;
3680 tree *counts = NULL;
3681 int i;
3682
3683 gcc_assert (!broken_loop || !in_combined_parallel);
3684 gcc_assert (fd->iter_type == long_integer_type_node
3685 || !in_combined_parallel);
3686
3687 type = TREE_TYPE (fd->loop.v);
3688 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3689 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3690 TREE_ADDRESSABLE (istart0) = 1;
3691 TREE_ADDRESSABLE (iend0) = 1;
3692 if (gimple_in_ssa_p (cfun))
3693 {
3694 add_referenced_var (istart0);
3695 add_referenced_var (iend0);
3696 }
3697
3698 /* See if we need to bias by LLONG_MIN. */
3699 if (fd->iter_type == long_long_unsigned_type_node
3700 && TREE_CODE (type) == INTEGER_TYPE
3701 && !TYPE_UNSIGNED (type))
3702 {
3703 tree n1, n2;
3704
3705 if (fd->loop.cond_code == LT_EXPR)
3706 {
3707 n1 = fd->loop.n1;
3708 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3709 }
3710 else
3711 {
3712 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3713 n2 = fd->loop.n1;
3714 }
3715 if (TREE_CODE (n1) != INTEGER_CST
3716 || TREE_CODE (n2) != INTEGER_CST
3717 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3718 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3719 }
3720
3721 entry_bb = region->entry;
3722 cont_bb = region->cont;
3723 collapse_bb = NULL;
3724 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3725 gcc_assert (broken_loop
3726 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3727 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3728 l1_bb = single_succ (l0_bb);
3729 if (!broken_loop)
3730 {
3731 l2_bb = create_empty_bb (cont_bb);
3732 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3733 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3734 }
3735 else
3736 l2_bb = NULL;
3737 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3738 exit_bb = region->exit;
3739
3740 gsi = gsi_last_bb (entry_bb);
3741
3742 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3743 if (fd->collapse > 1)
3744 {
3745 /* collapsed loops need work for expansion in SSA form. */
3746 gcc_assert (!gimple_in_ssa_p (cfun));
3747 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3748 for (i = 0; i < fd->collapse; i++)
3749 {
3750 tree itype = TREE_TYPE (fd->loops[i].v);
3751
3752 if (POINTER_TYPE_P (itype))
3753 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3754 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3755 ? -1 : 1));
3756 t = fold_build2 (PLUS_EXPR, itype,
3757 fold_convert (itype, fd->loops[i].step), t);
3758 t = fold_build2 (PLUS_EXPR, itype, t,
3759 fold_convert (itype, fd->loops[i].n2));
3760 t = fold_build2 (MINUS_EXPR, itype, t,
3761 fold_convert (itype, fd->loops[i].n1));
3762 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3763 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3764 fold_build1 (NEGATE_EXPR, itype, t),
3765 fold_build1 (NEGATE_EXPR, itype,
3766 fold_convert (itype,
3767 fd->loops[i].step)));
3768 else
3769 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3770 fold_convert (itype, fd->loops[i].step));
3771 t = fold_convert (type, t);
3772 if (TREE_CODE (t) == INTEGER_CST)
3773 counts[i] = t;
3774 else
3775 {
3776 counts[i] = create_tmp_var (type, ".count");
3777 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3778 true, GSI_SAME_STMT);
3779 stmt = gimple_build_assign (counts[i], t);
3780 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3781 }
3782 if (SSA_VAR_P (fd->loop.n2))
3783 {
3784 if (i == 0)
3785 t = counts[0];
3786 else
3787 {
3788 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3789 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3790 true, GSI_SAME_STMT);
3791 }
3792 stmt = gimple_build_assign (fd->loop.n2, t);
3793 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3794 }
3795 }
3796 }
3797 if (in_combined_parallel)
3798 {
3799 /* In a combined parallel loop, emit a call to
3800 GOMP_loop_foo_next. */
3801 t = build_call_expr (built_in_decls[next_fn], 2,
3802 build_fold_addr_expr (istart0),
3803 build_fold_addr_expr (iend0));
3804 }
3805 else
3806 {
3807 tree t0, t1, t2, t3, t4;
3808 /* If this is not a combined parallel loop, emit a call to
3809 GOMP_loop_foo_start in ENTRY_BB. */
3810 t4 = build_fold_addr_expr (iend0);
3811 t3 = build_fold_addr_expr (istart0);
3812 t2 = fold_convert (fd->iter_type, fd->loop.step);
3813 if (POINTER_TYPE_P (type)
3814 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3815 {
3816 /* Avoid casting pointers to integer of a different size. */
3817 tree itype
3818 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3819 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3820 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3821 }
3822 else
3823 {
3824 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3825 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3826 }
3827 if (bias)
3828 {
3829 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3830 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3831 }
3832 if (fd->iter_type == long_integer_type_node)
3833 {
3834 if (fd->chunk_size)
3835 {
3836 t = fold_convert (fd->iter_type, fd->chunk_size);
3837 t = build_call_expr (built_in_decls[start_fn], 6,
3838 t0, t1, t2, t, t3, t4);
3839 }
3840 else
3841 t = build_call_expr (built_in_decls[start_fn], 5,
3842 t0, t1, t2, t3, t4);
3843 }
3844 else
3845 {
3846 tree t5;
3847 tree c_bool_type;
3848
3849 /* The GOMP_loop_ull_*start functions have additional boolean
3850 argument, true for < loops and false for > loops.
3851 In Fortran, the C bool type can be different from
3852 boolean_type_node. */
3853 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3854 t5 = build_int_cst (c_bool_type,
3855 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3856 if (fd->chunk_size)
3857 {
3858 t = fold_convert (fd->iter_type, fd->chunk_size);
3859 t = build_call_expr (built_in_decls[start_fn], 7,
3860 t5, t0, t1, t2, t, t3, t4);
3861 }
3862 else
3863 t = build_call_expr (built_in_decls[start_fn], 6,
3864 t5, t0, t1, t2, t3, t4);
3865 }
3866 }
3867 if (TREE_TYPE (t) != boolean_type_node)
3868 t = fold_build2 (NE_EXPR, boolean_type_node,
3869 t, build_int_cst (TREE_TYPE (t), 0));
3870 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3871 true, GSI_SAME_STMT);
3872 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3873
3874 /* Remove the GIMPLE_OMP_FOR statement. */
3875 gsi_remove (&gsi, true);
3876
3877 /* Iteration setup for sequential loop goes in L0_BB. */
3878 gsi = gsi_start_bb (l0_bb);
3879 t = istart0;
3880 if (bias)
3881 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3882 if (POINTER_TYPE_P (type))
3883 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3884 0), t);
3885 t = fold_convert (type, t);
3886 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3887 false, GSI_CONTINUE_LINKING);
3888 stmt = gimple_build_assign (fd->loop.v, t);
3889 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3890
3891 t = iend0;
3892 if (bias)
3893 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3894 if (POINTER_TYPE_P (type))
3895 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3896 0), t);
3897 t = fold_convert (type, t);
3898 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3899 false, GSI_CONTINUE_LINKING);
3900 if (fd->collapse > 1)
3901 {
3902 tree tem = create_tmp_var (type, ".tem");
3903
3904 stmt = gimple_build_assign (tem, fd->loop.v);
3905 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3906 for (i = fd->collapse - 1; i >= 0; i--)
3907 {
3908 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3909 itype = vtype;
3910 if (POINTER_TYPE_P (vtype))
3911 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3912 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3913 t = fold_convert (itype, t);
3914 t = fold_build2 (MULT_EXPR, itype, t,
3915 fold_convert (itype, fd->loops[i].step));
3916 if (POINTER_TYPE_P (vtype))
3917 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3918 fd->loops[i].n1, fold_convert (sizetype, t));
3919 else
3920 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3921 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3922 false, GSI_CONTINUE_LINKING);
3923 stmt = gimple_build_assign (fd->loops[i].v, t);
3924 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3925 if (i != 0)
3926 {
3927 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3928 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3929 false, GSI_CONTINUE_LINKING);
3930 stmt = gimple_build_assign (tem, t);
3931 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3932 }
3933 }
3934 }
3935
3936 if (!broken_loop)
3937 {
3938 /* Code to control the increment and predicate for the sequential
3939 loop goes in the CONT_BB. */
3940 gsi = gsi_last_bb (cont_bb);
3941 stmt = gsi_stmt (gsi);
3942 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3943 vmain = gimple_omp_continue_control_use (stmt);
3944 vback = gimple_omp_continue_control_def (stmt);
3945
3946 if (POINTER_TYPE_P (type))
3947 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3948 fold_convert (sizetype, fd->loop.step));
3949 else
3950 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3951 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3952 true, GSI_SAME_STMT);
3953 stmt = gimple_build_assign (vback, t);
3954 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3955
3956 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3957 stmt = gimple_build_cond_empty (t);
3958 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3959
3960 /* Remove GIMPLE_OMP_CONTINUE. */
3961 gsi_remove (&gsi, true);
3962
3963 if (fd->collapse > 1)
3964 {
3965 basic_block last_bb, bb;
3966
3967 last_bb = cont_bb;
3968 for (i = fd->collapse - 1; i >= 0; i--)
3969 {
3970 tree vtype = TREE_TYPE (fd->loops[i].v);
3971
3972 bb = create_empty_bb (last_bb);
3973 gsi = gsi_start_bb (bb);
3974
3975 if (i < fd->collapse - 1)
3976 {
3977 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3978 e->probability = REG_BR_PROB_BASE / 8;
3979
3980 t = fd->loops[i + 1].n1;
3981 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3982 false, GSI_CONTINUE_LINKING);
3983 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3984 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3985 }
3986 else
3987 collapse_bb = bb;
3988
3989 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3990
3991 if (POINTER_TYPE_P (vtype))
3992 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3993 fd->loops[i].v,
3994 fold_convert (sizetype, fd->loops[i].step));
3995 else
3996 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3997 fd->loops[i].step);
3998 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3999 false, GSI_CONTINUE_LINKING);
4000 stmt = gimple_build_assign (fd->loops[i].v, t);
4001 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4002
4003 if (i > 0)
4004 {
4005 t = fd->loops[i].n2;
4006 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4007 false, GSI_CONTINUE_LINKING);
4008 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4009 fd->loops[i].v, t);
4010 stmt = gimple_build_cond_empty (t);
4011 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4012 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4013 e->probability = REG_BR_PROB_BASE * 7 / 8;
4014 }
4015 else
4016 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4017 last_bb = bb;
4018 }
4019 }
4020
4021 /* Emit code to get the next parallel iteration in L2_BB. */
4022 gsi = gsi_start_bb (l2_bb);
4023
4024 t = build_call_expr (built_in_decls[next_fn], 2,
4025 build_fold_addr_expr (istart0),
4026 build_fold_addr_expr (iend0));
4027 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4028 false, GSI_CONTINUE_LINKING);
4029 if (TREE_TYPE (t) != boolean_type_node)
4030 t = fold_build2 (NE_EXPR, boolean_type_node,
4031 t, build_int_cst (TREE_TYPE (t), 0));
4032 stmt = gimple_build_cond_empty (t);
4033 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4034 }
4035
4036 /* Add the loop cleanup function. */
4037 gsi = gsi_last_bb (exit_bb);
4038 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4039 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
4040 else
4041 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
4042 stmt = gimple_build_call (t, 0);
4043 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4044 gsi_remove (&gsi, true);
4045
4046 /* Connect the new blocks. */
4047 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4048 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4049
4050 if (!broken_loop)
4051 {
4052 gimple_seq phis;
4053
4054 e = find_edge (cont_bb, l3_bb);
4055 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4056
4057 phis = phi_nodes (l3_bb);
4058 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4059 {
4060 gimple phi = gsi_stmt (gsi);
4061 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4062 PHI_ARG_DEF_FROM_EDGE (phi, e));
4063 }
4064 remove_edge (e);
4065
4066 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4067 if (fd->collapse > 1)
4068 {
4069 e = find_edge (cont_bb, l1_bb);
4070 remove_edge (e);
4071 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4072 }
4073 else
4074 {
4075 e = find_edge (cont_bb, l1_bb);
4076 e->flags = EDGE_TRUE_VALUE;
4077 }
4078 e->probability = REG_BR_PROB_BASE * 7 / 8;
4079 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4080 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4081
4082 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4083 recompute_dominator (CDI_DOMINATORS, l2_bb));
4084 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4085 recompute_dominator (CDI_DOMINATORS, l3_bb));
4086 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4087 recompute_dominator (CDI_DOMINATORS, l0_bb));
4088 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4089 recompute_dominator (CDI_DOMINATORS, l1_bb));
4090 }
4091 }
4092
4093
4094 /* A subroutine of expand_omp_for. Generate code for a parallel
4095 loop with static schedule and no specified chunk size. Given
4096 parameters:
4097
4098 for (V = N1; V cond N2; V += STEP) BODY;
4099
4100 where COND is "<" or ">", we generate pseudocode
4101
4102 if (cond is <)
4103 adj = STEP - 1;
4104 else
4105 adj = STEP + 1;
4106 if ((__typeof (V)) -1 > 0 && cond is >)
4107 n = -(adj + N2 - N1) / -STEP;
4108 else
4109 n = (adj + N2 - N1) / STEP;
4110 q = n / nthreads;
4111 q += (q * nthreads != n);
4112 s0 = q * threadid;
4113 e0 = min(s0 + q, n);
4114 V = s0 * STEP + N1;
4115 if (s0 >= e0) goto L2; else goto L0;
4116 L0:
4117 e = e0 * STEP + N1;
4118 L1:
4119 BODY;
4120 V += STEP;
4121 if (V cond e) goto L1;
4122 L2:
4123 */
4124
4125 static void
4126 expand_omp_for_static_nochunk (struct omp_region *region,
4127 struct omp_for_data *fd)
4128 {
4129 tree n, q, s0, e0, e, t, nthreads, threadid;
4130 tree type, itype, vmain, vback;
4131 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4132 basic_block fin_bb;
4133 gimple_stmt_iterator gsi;
4134 gimple stmt;
4135
4136 itype = type = TREE_TYPE (fd->loop.v);
4137 if (POINTER_TYPE_P (type))
4138 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4139
4140 entry_bb = region->entry;
4141 cont_bb = region->cont;
4142 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4143 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4144 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4145 body_bb = single_succ (seq_start_bb);
4146 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4147 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4148 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4149 exit_bb = region->exit;
4150
4151 /* Iteration space partitioning goes in ENTRY_BB. */
4152 gsi = gsi_last_bb (entry_bb);
4153 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4154
4155 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4156 t = fold_convert (itype, t);
4157 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4158 true, GSI_SAME_STMT);
4159
4160 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4161 t = fold_convert (itype, t);
4162 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4163 true, GSI_SAME_STMT);
4164
4165 fd->loop.n1
4166 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4167 true, NULL_TREE, true, GSI_SAME_STMT);
4168 fd->loop.n2
4169 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4170 true, NULL_TREE, true, GSI_SAME_STMT);
4171 fd->loop.step
4172 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4173 true, NULL_TREE, true, GSI_SAME_STMT);
4174
4175 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4176 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4177 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4178 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4179 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4180 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4181 fold_build1 (NEGATE_EXPR, itype, t),
4182 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4183 else
4184 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4185 t = fold_convert (itype, t);
4186 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4187
4188 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4189 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4190
4191 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4192 t = fold_build2 (NE_EXPR, itype, t, n);
4193 t = fold_build2 (PLUS_EXPR, itype, q, t);
4194 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4195
4196 t = build2 (MULT_EXPR, itype, q, threadid);
4197 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4198
4199 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4200 t = fold_build2 (MIN_EXPR, itype, t, n);
4201 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4202
4203 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4204 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4205
4206 /* Remove the GIMPLE_OMP_FOR statement. */
4207 gsi_remove (&gsi, true);
4208
4209 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4210 gsi = gsi_start_bb (seq_start_bb);
4211
4212 t = fold_convert (itype, s0);
4213 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4214 if (POINTER_TYPE_P (type))
4215 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4216 fold_convert (sizetype, t));
4217 else
4218 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4219 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4220 false, GSI_CONTINUE_LINKING);
4221 stmt = gimple_build_assign (fd->loop.v, t);
4222 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4223
4224 t = fold_convert (itype, e0);
4225 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4226 if (POINTER_TYPE_P (type))
4227 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4228 fold_convert (sizetype, t));
4229 else
4230 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4231 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4232 false, GSI_CONTINUE_LINKING);
4233
4234 /* The code controlling the sequential loop replaces the
4235 GIMPLE_OMP_CONTINUE. */
4236 gsi = gsi_last_bb (cont_bb);
4237 stmt = gsi_stmt (gsi);
4238 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4239 vmain = gimple_omp_continue_control_use (stmt);
4240 vback = gimple_omp_continue_control_def (stmt);
4241
4242 if (POINTER_TYPE_P (type))
4243 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4244 fold_convert (sizetype, fd->loop.step));
4245 else
4246 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4247 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4248 true, GSI_SAME_STMT);
4249 stmt = gimple_build_assign (vback, t);
4250 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4251
4252 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4253 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4254
4255 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4256 gsi_remove (&gsi, true);
4257
4258 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4259 gsi = gsi_last_bb (exit_bb);
4260 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4261 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4262 false, GSI_SAME_STMT);
4263 gsi_remove (&gsi, true);
4264
4265 /* Connect all the blocks. */
4266 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4267 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4268
4269 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4270 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4271
4272 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4273 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4274 recompute_dominator (CDI_DOMINATORS, body_bb));
4275 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4276 recompute_dominator (CDI_DOMINATORS, fin_bb));
4277 }
4278
4279
4280 /* A subroutine of expand_omp_for. Generate code for a parallel
4281 loop with static schedule and a specified chunk size. Given
4282 parameters:
4283
4284 for (V = N1; V cond N2; V += STEP) BODY;
4285
4286 where COND is "<" or ">", we generate pseudocode
4287
4288 if (cond is <)
4289 adj = STEP - 1;
4290 else
4291 adj = STEP + 1;
4292 if ((__typeof (V)) -1 > 0 && cond is >)
4293 n = -(adj + N2 - N1) / -STEP;
4294 else
4295 n = (adj + N2 - N1) / STEP;
4296 trip = 0;
4297 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4298 here so that V is defined
4299 if the loop is not entered
4300 L0:
4301 s0 = (trip * nthreads + threadid) * CHUNK;
4302 e0 = min(s0 + CHUNK, n);
4303 if (s0 < n) goto L1; else goto L4;
4304 L1:
4305 V = s0 * STEP + N1;
4306 e = e0 * STEP + N1;
4307 L2:
4308 BODY;
4309 V += STEP;
4310 if (V cond e) goto L2; else goto L3;
4311 L3:
4312 trip += 1;
4313 goto L0;
4314 L4:
4315 */
4316
4317 static void
4318 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4319 {
4320 tree n, s0, e0, e, t;
4321 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4322 tree type, itype, v_main, v_back, v_extra;
4323 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4324 basic_block trip_update_bb, cont_bb, fin_bb;
4325 gimple_stmt_iterator si;
4326 gimple stmt;
4327 edge se;
4328
4329 itype = type = TREE_TYPE (fd->loop.v);
4330 if (POINTER_TYPE_P (type))
4331 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4332
4333 entry_bb = region->entry;
4334 se = split_block (entry_bb, last_stmt (entry_bb));
4335 entry_bb = se->src;
4336 iter_part_bb = se->dest;
4337 cont_bb = region->cont;
4338 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4339 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4340 == FALLTHRU_EDGE (cont_bb)->dest);
4341 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4342 body_bb = single_succ (seq_start_bb);
4343 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4344 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4345 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4346 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4347 exit_bb = region->exit;
4348
4349 /* Trip and adjustment setup goes in ENTRY_BB. */
4350 si = gsi_last_bb (entry_bb);
4351 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4352
4353 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4354 t = fold_convert (itype, t);
4355 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4356 true, GSI_SAME_STMT);
4357
4358 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4359 t = fold_convert (itype, t);
4360 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4361 true, GSI_SAME_STMT);
4362
4363 fd->loop.n1
4364 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4365 true, NULL_TREE, true, GSI_SAME_STMT);
4366 fd->loop.n2
4367 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4368 true, NULL_TREE, true, GSI_SAME_STMT);
4369 fd->loop.step
4370 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4371 true, NULL_TREE, true, GSI_SAME_STMT);
4372 fd->chunk_size
4373 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4374 true, NULL_TREE, true, GSI_SAME_STMT);
4375
4376 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4377 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4378 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4379 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4380 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4381 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4382 fold_build1 (NEGATE_EXPR, itype, t),
4383 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4384 else
4385 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4386 t = fold_convert (itype, t);
4387 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4388 true, GSI_SAME_STMT);
4389
4390 trip_var = create_tmp_var (itype, ".trip");
4391 if (gimple_in_ssa_p (cfun))
4392 {
4393 add_referenced_var (trip_var);
4394 trip_init = make_ssa_name (trip_var, NULL);
4395 trip_main = make_ssa_name (trip_var, NULL);
4396 trip_back = make_ssa_name (trip_var, NULL);
4397 }
4398 else
4399 {
4400 trip_init = trip_var;
4401 trip_main = trip_var;
4402 trip_back = trip_var;
4403 }
4404
4405 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4406 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4407
4408 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4409 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4410 if (POINTER_TYPE_P (type))
4411 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4412 fold_convert (sizetype, t));
4413 else
4414 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4415 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4416 true, GSI_SAME_STMT);
4417
4418 /* Remove the GIMPLE_OMP_FOR. */
4419 gsi_remove (&si, true);
4420
4421 /* Iteration space partitioning goes in ITER_PART_BB. */
4422 si = gsi_last_bb (iter_part_bb);
4423
4424 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4425 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4426 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4427 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4428 false, GSI_CONTINUE_LINKING);
4429
4430 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4431 t = fold_build2 (MIN_EXPR, itype, t, n);
4432 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4433 false, GSI_CONTINUE_LINKING);
4434
4435 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4436 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4437
4438 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4439 si = gsi_start_bb (seq_start_bb);
4440
4441 t = fold_convert (itype, s0);
4442 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4443 if (POINTER_TYPE_P (type))
4444 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4445 fold_convert (sizetype, t));
4446 else
4447 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4448 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4449 false, GSI_CONTINUE_LINKING);
4450 stmt = gimple_build_assign (fd->loop.v, t);
4451 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4452
4453 t = fold_convert (itype, e0);
4454 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4455 if (POINTER_TYPE_P (type))
4456 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4457 fold_convert (sizetype, t));
4458 else
4459 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4460 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4461 false, GSI_CONTINUE_LINKING);
4462
4463 /* The code controlling the sequential loop goes in CONT_BB,
4464 replacing the GIMPLE_OMP_CONTINUE. */
4465 si = gsi_last_bb (cont_bb);
4466 stmt = gsi_stmt (si);
4467 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4468 v_main = gimple_omp_continue_control_use (stmt);
4469 v_back = gimple_omp_continue_control_def (stmt);
4470
4471 if (POINTER_TYPE_P (type))
4472 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4473 fold_convert (sizetype, fd->loop.step));
4474 else
4475 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4476 stmt = gimple_build_assign (v_back, t);
4477 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4478
4479 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4480 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4481
4482 /* Remove GIMPLE_OMP_CONTINUE. */
4483 gsi_remove (&si, true);
4484
4485 /* Trip update code goes into TRIP_UPDATE_BB. */
4486 si = gsi_start_bb (trip_update_bb);
4487
4488 t = build_int_cst (itype, 1);
4489 t = build2 (PLUS_EXPR, itype, trip_main, t);
4490 stmt = gimple_build_assign (trip_back, t);
4491 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4492
4493 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4494 si = gsi_last_bb (exit_bb);
4495 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4496 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4497 false, GSI_SAME_STMT);
4498 gsi_remove (&si, true);
4499
4500 /* Connect the new blocks. */
4501 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4502 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4503
4504 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4505 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4506
4507 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4508
4509 if (gimple_in_ssa_p (cfun))
4510 {
4511 gimple_stmt_iterator psi;
4512 gimple phi;
4513 edge re, ene;
4514 edge_var_map_vector head;
4515 edge_var_map *vm;
4516 size_t i;
4517
4518 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4519 remove arguments of the phi nodes in fin_bb. We need to create
4520 appropriate phi nodes in iter_part_bb instead. */
4521 se = single_pred_edge (fin_bb);
4522 re = single_succ_edge (trip_update_bb);
4523 head = redirect_edge_var_map_vector (re);
4524 ene = single_succ_edge (entry_bb);
4525
4526 psi = gsi_start_phis (fin_bb);
4527 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4528 gsi_next (&psi), ++i)
4529 {
4530 gimple nphi;
4531 source_location locus;
4532
4533 phi = gsi_stmt (psi);
4534 t = gimple_phi_result (phi);
4535 gcc_assert (t == redirect_edge_var_map_result (vm));
4536 nphi = create_phi_node (t, iter_part_bb);
4537 SSA_NAME_DEF_STMT (t) = nphi;
4538
4539 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4540 locus = gimple_phi_arg_location_from_edge (phi, se);
4541
4542 /* A special case -- fd->loop.v is not yet computed in
4543 iter_part_bb, we need to use v_extra instead. */
4544 if (t == fd->loop.v)
4545 t = v_extra;
4546 add_phi_arg (nphi, t, ene, locus);
4547 locus = redirect_edge_var_map_location (vm);
4548 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4549 }
4550 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4551 redirect_edge_var_map_clear (re);
4552 while (1)
4553 {
4554 psi = gsi_start_phis (fin_bb);
4555 if (gsi_end_p (psi))
4556 break;
4557 remove_phi_node (&psi, false);
4558 }
4559
4560 /* Make phi node for trip. */
4561 phi = create_phi_node (trip_main, iter_part_bb);
4562 SSA_NAME_DEF_STMT (trip_main) = phi;
4563 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4564 UNKNOWN_LOCATION);
4565 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4566 UNKNOWN_LOCATION);
4567 }
4568
4569 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4570 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4571 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4572 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4573 recompute_dominator (CDI_DOMINATORS, fin_bb));
4574 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4575 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4576 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4577 recompute_dominator (CDI_DOMINATORS, body_bb));
4578 }
4579
4580
4581 /* Expand the OpenMP loop defined by REGION. */
4582
4583 static void
4584 expand_omp_for (struct omp_region *region)
4585 {
4586 struct omp_for_data fd;
4587 struct omp_for_data_loop *loops;
4588
4589 loops
4590 = (struct omp_for_data_loop *)
4591 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4592 * sizeof (struct omp_for_data_loop));
4593 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4594 region->sched_kind = fd.sched_kind;
4595
4596 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4597 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4598 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4599 if (region->cont)
4600 {
4601 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4602 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4603 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4604 }
4605
4606 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4607 && !fd.have_ordered
4608 && fd.collapse == 1
4609 && region->cont != NULL)
4610 {
4611 if (fd.chunk_size == NULL)
4612 expand_omp_for_static_nochunk (region, &fd);
4613 else
4614 expand_omp_for_static_chunk (region, &fd);
4615 }
4616 else
4617 {
4618 int fn_index, start_ix, next_ix;
4619
4620 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4621 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4622 ? 3 : fd.sched_kind;
4623 fn_index += fd.have_ordered * 4;
4624 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4625 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4626 if (fd.iter_type == long_long_unsigned_type_node)
4627 {
4628 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4629 - BUILT_IN_GOMP_LOOP_STATIC_START;
4630 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4631 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4632 }
4633 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4634 (enum built_in_function) next_ix);
4635 }
4636
4637 update_ssa (TODO_update_ssa_only_virtuals);
4638 }
4639
4640
4641 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4642
4643 v = GOMP_sections_start (n);
4644 L0:
4645 switch (v)
4646 {
4647 case 0:
4648 goto L2;
4649 case 1:
4650 section 1;
4651 goto L1;
4652 case 2:
4653 ...
4654 case n:
4655 ...
4656 default:
4657 abort ();
4658 }
4659 L1:
4660 v = GOMP_sections_next ();
4661 goto L0;
4662 L2:
4663 reduction;
4664
4665 If this is a combined parallel sections, replace the call to
4666 GOMP_sections_start with call to GOMP_sections_next. */
4667
4668 static void
4669 expand_omp_sections (struct omp_region *region)
4670 {
4671 tree t, u, vin = NULL, vmain, vnext, l2;
4672 VEC (tree,heap) *label_vec;
4673 unsigned len;
4674 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4675 gimple_stmt_iterator si, switch_si;
4676 gimple sections_stmt, stmt, cont;
4677 edge_iterator ei;
4678 edge e;
4679 struct omp_region *inner;
4680 unsigned i, casei;
4681 bool exit_reachable = region->cont != NULL;
4682
4683 gcc_assert (exit_reachable == (region->exit != NULL));
4684 entry_bb = region->entry;
4685 l0_bb = single_succ (entry_bb);
4686 l1_bb = region->cont;
4687 l2_bb = region->exit;
4688 if (exit_reachable)
4689 {
4690 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4691 l2 = gimple_block_label (l2_bb);
4692 else
4693 {
4694 /* This can happen if there are reductions. */
4695 len = EDGE_COUNT (l0_bb->succs);
4696 gcc_assert (len > 0);
4697 e = EDGE_SUCC (l0_bb, len - 1);
4698 si = gsi_last_bb (e->dest);
4699 l2 = NULL_TREE;
4700 if (gsi_end_p (si)
4701 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4702 l2 = gimple_block_label (e->dest);
4703 else
4704 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4705 {
4706 si = gsi_last_bb (e->dest);
4707 if (gsi_end_p (si)
4708 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4709 {
4710 l2 = gimple_block_label (e->dest);
4711 break;
4712 }
4713 }
4714 }
4715 default_bb = create_empty_bb (l1_bb->prev_bb);
4716 }
4717 else
4718 {
4719 default_bb = create_empty_bb (l0_bb);
4720 l2 = gimple_block_label (default_bb);
4721 }
4722
4723 /* We will build a switch() with enough cases for all the
4724 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4725 and a default case to abort if something goes wrong. */
4726 len = EDGE_COUNT (l0_bb->succs);
4727
4728 /* Use VEC_quick_push on label_vec throughout, since we know the size
4729 in advance. */
4730 label_vec = VEC_alloc (tree, heap, len);
4731
4732 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4733 GIMPLE_OMP_SECTIONS statement. */
4734 si = gsi_last_bb (entry_bb);
4735 sections_stmt = gsi_stmt (si);
4736 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4737 vin = gimple_omp_sections_control (sections_stmt);
4738 if (!is_combined_parallel (region))
4739 {
4740 /* If we are not inside a combined parallel+sections region,
4741 call GOMP_sections_start. */
4742 t = build_int_cst (unsigned_type_node,
4743 exit_reachable ? len - 1 : len);
4744 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4745 stmt = gimple_build_call (u, 1, t);
4746 }
4747 else
4748 {
4749 /* Otherwise, call GOMP_sections_next. */
4750 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4751 stmt = gimple_build_call (u, 0);
4752 }
4753 gimple_call_set_lhs (stmt, vin);
4754 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4755 gsi_remove (&si, true);
4756
4757 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4758 L0_BB. */
4759 switch_si = gsi_last_bb (l0_bb);
4760 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4761 if (exit_reachable)
4762 {
4763 cont = last_stmt (l1_bb);
4764 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4765 vmain = gimple_omp_continue_control_use (cont);
4766 vnext = gimple_omp_continue_control_def (cont);
4767 }
4768 else
4769 {
4770 vmain = vin;
4771 vnext = NULL_TREE;
4772 }
4773
4774 i = 0;
4775 if (exit_reachable)
4776 {
4777 t = build3 (CASE_LABEL_EXPR, void_type_node,
4778 build_int_cst (unsigned_type_node, 0), NULL, l2);
4779 VEC_quick_push (tree, label_vec, t);
4780 i++;
4781 }
4782
4783 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4784 for (inner = region->inner, casei = 1;
4785 inner;
4786 inner = inner->next, i++, casei++)
4787 {
4788 basic_block s_entry_bb, s_exit_bb;
4789
4790 /* Skip optional reduction region. */
4791 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4792 {
4793 --i;
4794 --casei;
4795 continue;
4796 }
4797
4798 s_entry_bb = inner->entry;
4799 s_exit_bb = inner->exit;
4800
4801 t = gimple_block_label (s_entry_bb);
4802 u = build_int_cst (unsigned_type_node, casei);
4803 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4804 VEC_quick_push (tree, label_vec, u);
4805
4806 si = gsi_last_bb (s_entry_bb);
4807 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4808 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4809 gsi_remove (&si, true);
4810 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4811
4812 if (s_exit_bb == NULL)
4813 continue;
4814
4815 si = gsi_last_bb (s_exit_bb);
4816 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4817 gsi_remove (&si, true);
4818
4819 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4820 }
4821
4822 /* Error handling code goes in DEFAULT_BB. */
4823 t = gimple_block_label (default_bb);
4824 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4825 make_edge (l0_bb, default_bb, 0);
4826
4827 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4828 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4829 gsi_remove (&switch_si, true);
4830 VEC_free (tree, heap, label_vec);
4831
4832 si = gsi_start_bb (default_bb);
4833 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4834 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4835
4836 if (exit_reachable)
4837 {
4838 /* Code to get the next section goes in L1_BB. */
4839 si = gsi_last_bb (l1_bb);
4840 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4841
4842 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4843 gimple_call_set_lhs (stmt, vnext);
4844 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4845 gsi_remove (&si, true);
4846
4847 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4848
4849 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4850 si = gsi_last_bb (l2_bb);
4851 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4852 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4853 else
4854 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4855 stmt = gimple_build_call (t, 0);
4856 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4857 gsi_remove (&si, true);
4858 }
4859
4860 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4861 }
4862
4863
4864 /* Expand code for an OpenMP single directive. We've already expanded
4865 much of the code, here we simply place the GOMP_barrier call. */
4866
4867 static void
4868 expand_omp_single (struct omp_region *region)
4869 {
4870 basic_block entry_bb, exit_bb;
4871 gimple_stmt_iterator si;
4872 bool need_barrier = false;
4873
4874 entry_bb = region->entry;
4875 exit_bb = region->exit;
4876
4877 si = gsi_last_bb (entry_bb);
4878 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4879 be removed. We need to ensure that the thread that entered the single
4880 does not exit before the data is copied out by the other threads. */
4881 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4882 OMP_CLAUSE_COPYPRIVATE))
4883 need_barrier = true;
4884 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4885 gsi_remove (&si, true);
4886 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4887
4888 si = gsi_last_bb (exit_bb);
4889 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4890 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4891 false, GSI_SAME_STMT);
4892 gsi_remove (&si, true);
4893 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4894 }
4895
4896
4897 /* Generic expansion for OpenMP synchronization directives: master,
4898 ordered and critical. All we need to do here is remove the entry
4899 and exit markers for REGION. */
4900
4901 static void
4902 expand_omp_synch (struct omp_region *region)
4903 {
4904 basic_block entry_bb, exit_bb;
4905 gimple_stmt_iterator si;
4906
4907 entry_bb = region->entry;
4908 exit_bb = region->exit;
4909
4910 si = gsi_last_bb (entry_bb);
4911 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4912 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4913 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4914 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4915 gsi_remove (&si, true);
4916 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4917
4918 if (exit_bb)
4919 {
4920 si = gsi_last_bb (exit_bb);
4921 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4922 gsi_remove (&si, true);
4923 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4924 }
4925 }
4926
4927 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4928 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4929 size of the data type, and thus usable to find the index of the builtin
4930 decl. Returns false if the expression is not of the proper form. */
4931
4932 static bool
4933 expand_omp_atomic_fetch_op (basic_block load_bb,
4934 tree addr, tree loaded_val,
4935 tree stored_val, int index)
4936 {
4937 enum built_in_function base;
4938 tree decl, itype, call;
4939 direct_optab optab;
4940 tree rhs;
4941 basic_block store_bb = single_succ (load_bb);
4942 gimple_stmt_iterator gsi;
4943 gimple stmt;
4944 location_t loc;
4945
4946 /* We expect to find the following sequences:
4947
4948 load_bb:
4949 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4950
4951 store_bb:
4952 val = tmp OP something; (or: something OP tmp)
4953 GIMPLE_OMP_STORE (val)
4954
4955 ???FIXME: Allow a more flexible sequence.
4956 Perhaps use data flow to pick the statements.
4957
4958 */
4959
4960 gsi = gsi_after_labels (store_bb);
4961 stmt = gsi_stmt (gsi);
4962 loc = gimple_location (stmt);
4963 if (!is_gimple_assign (stmt))
4964 return false;
4965 gsi_next (&gsi);
4966 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4967 return false;
4968
4969 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4970 return false;
4971
4972 /* Check for one of the supported fetch-op operations. */
4973 switch (gimple_assign_rhs_code (stmt))
4974 {
4975 case PLUS_EXPR:
4976 case POINTER_PLUS_EXPR:
4977 base = BUILT_IN_FETCH_AND_ADD_N;
4978 optab = sync_add_optab;
4979 break;
4980 case MINUS_EXPR:
4981 base = BUILT_IN_FETCH_AND_SUB_N;
4982 optab = sync_add_optab;
4983 break;
4984 case BIT_AND_EXPR:
4985 base = BUILT_IN_FETCH_AND_AND_N;
4986 optab = sync_and_optab;
4987 break;
4988 case BIT_IOR_EXPR:
4989 base = BUILT_IN_FETCH_AND_OR_N;
4990 optab = sync_ior_optab;
4991 break;
4992 case BIT_XOR_EXPR:
4993 base = BUILT_IN_FETCH_AND_XOR_N;
4994 optab = sync_xor_optab;
4995 break;
4996 default:
4997 return false;
4998 }
4999 /* Make sure the expression is of the proper form. */
5000 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5001 rhs = gimple_assign_rhs2 (stmt);
5002 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5003 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5004 rhs = gimple_assign_rhs1 (stmt);
5005 else
5006 return false;
5007
5008 decl = built_in_decls[base + index + 1];
5009 if (decl == NULL_TREE)
5010 return false;
5011 itype = TREE_TYPE (TREE_TYPE (decl));
5012
5013 if (direct_optab_handler (optab, TYPE_MODE (itype)) == CODE_FOR_nothing)
5014 return false;
5015
5016 gsi = gsi_last_bb (load_bb);
5017 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5018 call = build_call_expr_loc (loc,
5019 decl, 2, addr,
5020 fold_convert_loc (loc, itype, rhs));
5021 call = fold_convert_loc (loc, void_type_node, call);
5022 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5023 gsi_remove (&gsi, true);
5024
5025 gsi = gsi_last_bb (store_bb);
5026 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5027 gsi_remove (&gsi, true);
5028 gsi = gsi_last_bb (store_bb);
5029 gsi_remove (&gsi, true);
5030
5031 if (gimple_in_ssa_p (cfun))
5032 update_ssa (TODO_update_ssa_no_phi);
5033
5034 return true;
5035 }
5036
5037 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5038
5039 oldval = *addr;
5040 repeat:
5041 newval = rhs; // with oldval replacing *addr in rhs
5042 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5043 if (oldval != newval)
5044 goto repeat;
5045
5046 INDEX is log2 of the size of the data type, and thus usable to find the
5047 index of the builtin decl. */
5048
5049 static bool
5050 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5051 tree addr, tree loaded_val, tree stored_val,
5052 int index)
5053 {
5054 tree loadedi, storedi, initial, new_storedi, old_vali;
5055 tree type, itype, cmpxchg, iaddr;
5056 gimple_stmt_iterator si;
5057 basic_block loop_header = single_succ (load_bb);
5058 gimple phi, stmt;
5059 edge e;
5060
5061 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
5062 if (cmpxchg == NULL_TREE)
5063 return false;
5064 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5065 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5066
5067 if (direct_optab_handler (sync_compare_and_swap_optab, TYPE_MODE (itype))
5068 == CODE_FOR_nothing)
5069 return false;
5070
5071 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5072 si = gsi_last_bb (load_bb);
5073 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5074
5075 /* For floating-point values, we'll need to view-convert them to integers
5076 so that we can perform the atomic compare and swap. Simplify the
5077 following code by always setting up the "i"ntegral variables. */
5078 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5079 {
5080 tree iaddr_val;
5081
5082 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5083 true), NULL);
5084 iaddr_val
5085 = force_gimple_operand_gsi (&si,
5086 fold_convert (TREE_TYPE (iaddr), addr),
5087 false, NULL_TREE, true, GSI_SAME_STMT);
5088 stmt = gimple_build_assign (iaddr, iaddr_val);
5089 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5090 loadedi = create_tmp_var (itype, NULL);
5091 if (gimple_in_ssa_p (cfun))
5092 {
5093 add_referenced_var (iaddr);
5094 add_referenced_var (loadedi);
5095 loadedi = make_ssa_name (loadedi, NULL);
5096 }
5097 }
5098 else
5099 {
5100 iaddr = addr;
5101 loadedi = loaded_val;
5102 }
5103
5104 initial
5105 = force_gimple_operand_gsi (&si,
5106 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5107 iaddr,
5108 build_int_cst (TREE_TYPE (iaddr), 0)),
5109 true, NULL_TREE, true, GSI_SAME_STMT);
5110
5111 /* Move the value to the LOADEDI temporary. */
5112 if (gimple_in_ssa_p (cfun))
5113 {
5114 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5115 phi = create_phi_node (loadedi, loop_header);
5116 SSA_NAME_DEF_STMT (loadedi) = phi;
5117 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5118 initial);
5119 }
5120 else
5121 gsi_insert_before (&si,
5122 gimple_build_assign (loadedi, initial),
5123 GSI_SAME_STMT);
5124 if (loadedi != loaded_val)
5125 {
5126 gimple_stmt_iterator gsi2;
5127 tree x;
5128
5129 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5130 gsi2 = gsi_start_bb (loop_header);
5131 if (gimple_in_ssa_p (cfun))
5132 {
5133 gimple stmt;
5134 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5135 true, GSI_SAME_STMT);
5136 stmt = gimple_build_assign (loaded_val, x);
5137 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5138 }
5139 else
5140 {
5141 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5142 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5143 true, GSI_SAME_STMT);
5144 }
5145 }
5146 gsi_remove (&si, true);
5147
5148 si = gsi_last_bb (store_bb);
5149 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5150
5151 if (iaddr == addr)
5152 storedi = stored_val;
5153 else
5154 storedi =
5155 force_gimple_operand_gsi (&si,
5156 build1 (VIEW_CONVERT_EXPR, itype,
5157 stored_val), true, NULL_TREE, true,
5158 GSI_SAME_STMT);
5159
5160 /* Build the compare&swap statement. */
5161 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5162 new_storedi = force_gimple_operand_gsi (&si,
5163 fold_convert (TREE_TYPE (loadedi),
5164 new_storedi),
5165 true, NULL_TREE,
5166 true, GSI_SAME_STMT);
5167
5168 if (gimple_in_ssa_p (cfun))
5169 old_vali = loadedi;
5170 else
5171 {
5172 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5173 if (gimple_in_ssa_p (cfun))
5174 add_referenced_var (old_vali);
5175 stmt = gimple_build_assign (old_vali, loadedi);
5176 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5177
5178 stmt = gimple_build_assign (loadedi, new_storedi);
5179 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5180 }
5181
5182 /* Note that we always perform the comparison as an integer, even for
5183 floating point. This allows the atomic operation to properly
5184 succeed even with NaNs and -0.0. */
5185 stmt = gimple_build_cond_empty
5186 (build2 (NE_EXPR, boolean_type_node,
5187 new_storedi, old_vali));
5188 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5189
5190 /* Update cfg. */
5191 e = single_succ_edge (store_bb);
5192 e->flags &= ~EDGE_FALLTHRU;
5193 e->flags |= EDGE_FALSE_VALUE;
5194
5195 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5196
5197 /* Copy the new value to loadedi (we already did that before the condition
5198 if we are not in SSA). */
5199 if (gimple_in_ssa_p (cfun))
5200 {
5201 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5202 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5203 }
5204
5205 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5206 gsi_remove (&si, true);
5207
5208 if (gimple_in_ssa_p (cfun))
5209 update_ssa (TODO_update_ssa_no_phi);
5210
5211 return true;
5212 }
5213
5214 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5215
5216 GOMP_atomic_start ();
5217 *addr = rhs;
5218 GOMP_atomic_end ();
5219
5220 The result is not globally atomic, but works so long as all parallel
5221 references are within #pragma omp atomic directives. According to
5222 responses received from omp@openmp.org, appears to be within spec.
5223 Which makes sense, since that's how several other compilers handle
5224 this situation as well.
5225 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5226 expanding. STORED_VAL is the operand of the matching
5227 GIMPLE_OMP_ATOMIC_STORE.
5228
5229 We replace
5230 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5231 loaded_val = *addr;
5232
5233 and replace
5234 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5235 *addr = stored_val;
5236 */
5237
5238 static bool
5239 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5240 tree addr, tree loaded_val, tree stored_val)
5241 {
5242 gimple_stmt_iterator si;
5243 gimple stmt;
5244 tree t;
5245
5246 si = gsi_last_bb (load_bb);
5247 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5248
5249 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5250 t = build_call_expr (t, 0);
5251 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5252
5253 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5254 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5255 gsi_remove (&si, true);
5256
5257 si = gsi_last_bb (store_bb);
5258 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5259
5260 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5261 stored_val);
5262 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5263
5264 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5265 t = build_call_expr (t, 0);
5266 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5267 gsi_remove (&si, true);
5268
5269 if (gimple_in_ssa_p (cfun))
5270 update_ssa (TODO_update_ssa_no_phi);
5271 return true;
5272 }
5273
5274 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5275 using expand_omp_atomic_fetch_op. If it failed, we try to
5276 call expand_omp_atomic_pipeline, and if it fails too, the
5277 ultimate fallback is wrapping the operation in a mutex
5278 (expand_omp_atomic_mutex). REGION is the atomic region built
5279 by build_omp_regions_1(). */
5280
5281 static void
5282 expand_omp_atomic (struct omp_region *region)
5283 {
5284 basic_block load_bb = region->entry, store_bb = region->exit;
5285 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5286 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5287 tree addr = gimple_omp_atomic_load_rhs (load);
5288 tree stored_val = gimple_omp_atomic_store_val (store);
5289 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5290 HOST_WIDE_INT index;
5291
5292 /* Make sure the type is one of the supported sizes. */
5293 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5294 index = exact_log2 (index);
5295 if (index >= 0 && index <= 4)
5296 {
5297 unsigned int align = TYPE_ALIGN_UNIT (type);
5298
5299 /* __sync builtins require strict data alignment. */
5300 if (exact_log2 (align) >= index)
5301 {
5302 /* When possible, use specialized atomic update functions. */
5303 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5304 && store_bb == single_succ (load_bb))
5305 {
5306 if (expand_omp_atomic_fetch_op (load_bb, addr,
5307 loaded_val, stored_val, index))
5308 return;
5309 }
5310
5311 /* If we don't have specialized __sync builtins, try and implement
5312 as a compare and swap loop. */
5313 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5314 loaded_val, stored_val, index))
5315 return;
5316 }
5317 }
5318
5319 /* The ultimate fallback is wrapping the operation in a mutex. */
5320 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5321 }
5322
5323
5324 /* Expand the parallel region tree rooted at REGION. Expansion
5325 proceeds in depth-first order. Innermost regions are expanded
5326 first. This way, parallel regions that require a new function to
5327 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5328 internal dependencies in their body. */
5329
5330 static void
5331 expand_omp (struct omp_region *region)
5332 {
5333 while (region)
5334 {
5335 location_t saved_location;
5336
5337 /* First, determine whether this is a combined parallel+workshare
5338 region. */
5339 if (region->type == GIMPLE_OMP_PARALLEL)
5340 determine_parallel_type (region);
5341
5342 if (region->inner)
5343 expand_omp (region->inner);
5344
5345 saved_location = input_location;
5346 if (gimple_has_location (last_stmt (region->entry)))
5347 input_location = gimple_location (last_stmt (region->entry));
5348
5349 switch (region->type)
5350 {
5351 case GIMPLE_OMP_PARALLEL:
5352 case GIMPLE_OMP_TASK:
5353 expand_omp_taskreg (region);
5354 break;
5355
5356 case GIMPLE_OMP_FOR:
5357 expand_omp_for (region);
5358 break;
5359
5360 case GIMPLE_OMP_SECTIONS:
5361 expand_omp_sections (region);
5362 break;
5363
5364 case GIMPLE_OMP_SECTION:
5365 /* Individual omp sections are handled together with their
5366 parent GIMPLE_OMP_SECTIONS region. */
5367 break;
5368
5369 case GIMPLE_OMP_SINGLE:
5370 expand_omp_single (region);
5371 break;
5372
5373 case GIMPLE_OMP_MASTER:
5374 case GIMPLE_OMP_ORDERED:
5375 case GIMPLE_OMP_CRITICAL:
5376 expand_omp_synch (region);
5377 break;
5378
5379 case GIMPLE_OMP_ATOMIC_LOAD:
5380 expand_omp_atomic (region);
5381 break;
5382
5383 default:
5384 gcc_unreachable ();
5385 }
5386
5387 input_location = saved_location;
5388 region = region->next;
5389 }
5390 }
5391
5392
5393 /* Helper for build_omp_regions. Scan the dominator tree starting at
5394 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5395 true, the function ends once a single tree is built (otherwise, whole
5396 forest of OMP constructs may be built). */
5397
5398 static void
5399 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5400 bool single_tree)
5401 {
5402 gimple_stmt_iterator gsi;
5403 gimple stmt;
5404 basic_block son;
5405
5406 gsi = gsi_last_bb (bb);
5407 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5408 {
5409 struct omp_region *region;
5410 enum gimple_code code;
5411
5412 stmt = gsi_stmt (gsi);
5413 code = gimple_code (stmt);
5414 if (code == GIMPLE_OMP_RETURN)
5415 {
5416 /* STMT is the return point out of region PARENT. Mark it
5417 as the exit point and make PARENT the immediately
5418 enclosing region. */
5419 gcc_assert (parent);
5420 region = parent;
5421 region->exit = bb;
5422 parent = parent->outer;
5423 }
5424 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5425 {
5426 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5427 GIMPLE_OMP_RETURN, but matches with
5428 GIMPLE_OMP_ATOMIC_LOAD. */
5429 gcc_assert (parent);
5430 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5431 region = parent;
5432 region->exit = bb;
5433 parent = parent->outer;
5434 }
5435
5436 else if (code == GIMPLE_OMP_CONTINUE)
5437 {
5438 gcc_assert (parent);
5439 parent->cont = bb;
5440 }
5441 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5442 {
5443 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5444 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5445 ;
5446 }
5447 else
5448 {
5449 /* Otherwise, this directive becomes the parent for a new
5450 region. */
5451 region = new_omp_region (bb, code, parent);
5452 parent = region;
5453 }
5454 }
5455
5456 if (single_tree && !parent)
5457 return;
5458
5459 for (son = first_dom_son (CDI_DOMINATORS, bb);
5460 son;
5461 son = next_dom_son (CDI_DOMINATORS, son))
5462 build_omp_regions_1 (son, parent, single_tree);
5463 }
5464
5465 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5466 root_omp_region. */
5467
5468 static void
5469 build_omp_regions_root (basic_block root)
5470 {
5471 gcc_assert (root_omp_region == NULL);
5472 build_omp_regions_1 (root, NULL, true);
5473 gcc_assert (root_omp_region != NULL);
5474 }
5475
5476 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5477
5478 void
5479 omp_expand_local (basic_block head)
5480 {
5481 build_omp_regions_root (head);
5482 if (dump_file && (dump_flags & TDF_DETAILS))
5483 {
5484 fprintf (dump_file, "\nOMP region tree\n\n");
5485 dump_omp_region (dump_file, root_omp_region, 0);
5486 fprintf (dump_file, "\n");
5487 }
5488
5489 remove_exit_barriers (root_omp_region);
5490 expand_omp (root_omp_region);
5491
5492 free_omp_regions ();
5493 }
5494
5495 /* Scan the CFG and build a tree of OMP regions. Return the root of
5496 the OMP region tree. */
5497
5498 static void
5499 build_omp_regions (void)
5500 {
5501 gcc_assert (root_omp_region == NULL);
5502 calculate_dominance_info (CDI_DOMINATORS);
5503 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5504 }
5505
5506 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5507
5508 static unsigned int
5509 execute_expand_omp (void)
5510 {
5511 build_omp_regions ();
5512
5513 if (!root_omp_region)
5514 return 0;
5515
5516 if (dump_file)
5517 {
5518 fprintf (dump_file, "\nOMP region tree\n\n");
5519 dump_omp_region (dump_file, root_omp_region, 0);
5520 fprintf (dump_file, "\n");
5521 }
5522
5523 remove_exit_barriers (root_omp_region);
5524
5525 expand_omp (root_omp_region);
5526
5527 cleanup_tree_cfg ();
5528
5529 free_omp_regions ();
5530
5531 return 0;
5532 }
5533
5534 /* OMP expansion -- the default pass, run before creation of SSA form. */
5535
5536 static bool
5537 gate_expand_omp (void)
5538 {
5539 return (flag_openmp != 0 && !seen_error ());
5540 }
5541
5542 struct gimple_opt_pass pass_expand_omp =
5543 {
5544 {
5545 GIMPLE_PASS,
5546 "ompexp", /* name */
5547 gate_expand_omp, /* gate */
5548 execute_expand_omp, /* execute */
5549 NULL, /* sub */
5550 NULL, /* next */
5551 0, /* static_pass_number */
5552 TV_NONE, /* tv_id */
5553 PROP_gimple_any, /* properties_required */
5554 0, /* properties_provided */
5555 0, /* properties_destroyed */
5556 0, /* todo_flags_start */
5557 TODO_dump_func /* todo_flags_finish */
5558 }
5559 };
5560 \f
5561 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5562
5563 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5564 CTX is the enclosing OMP context for the current statement. */
5565
5566 static void
5567 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5568 {
5569 tree block, control;
5570 gimple_stmt_iterator tgsi;
5571 unsigned i, len;
5572 gimple stmt, new_stmt, bind, t;
5573 gimple_seq ilist, dlist, olist, new_body, body;
5574 struct gimplify_ctx gctx;
5575
5576 stmt = gsi_stmt (*gsi_p);
5577
5578 push_gimplify_context (&gctx);
5579
5580 dlist = NULL;
5581 ilist = NULL;
5582 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5583 &ilist, &dlist, ctx);
5584
5585 tgsi = gsi_start (gimple_omp_body (stmt));
5586 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5587 continue;
5588
5589 tgsi = gsi_start (gimple_omp_body (stmt));
5590 body = NULL;
5591 for (i = 0; i < len; i++, gsi_next (&tgsi))
5592 {
5593 omp_context *sctx;
5594 gimple sec_start;
5595
5596 sec_start = gsi_stmt (tgsi);
5597 sctx = maybe_lookup_ctx (sec_start);
5598 gcc_assert (sctx);
5599
5600 gimple_seq_add_stmt (&body, sec_start);
5601
5602 lower_omp (gimple_omp_body (sec_start), sctx);
5603 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5604 gimple_omp_set_body (sec_start, NULL);
5605
5606 if (i == len - 1)
5607 {
5608 gimple_seq l = NULL;
5609 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5610 &l, ctx);
5611 gimple_seq_add_seq (&body, l);
5612 gimple_omp_section_set_last (sec_start);
5613 }
5614
5615 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5616 }
5617
5618 block = make_node (BLOCK);
5619 bind = gimple_build_bind (NULL, body, block);
5620
5621 olist = NULL;
5622 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5623
5624 block = make_node (BLOCK);
5625 new_stmt = gimple_build_bind (NULL, NULL, block);
5626
5627 pop_gimplify_context (new_stmt);
5628 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5629 BLOCK_VARS (block) = gimple_bind_vars (bind);
5630 if (BLOCK_VARS (block))
5631 TREE_USED (block) = 1;
5632
5633 new_body = NULL;
5634 gimple_seq_add_seq (&new_body, ilist);
5635 gimple_seq_add_stmt (&new_body, stmt);
5636 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5637 gimple_seq_add_stmt (&new_body, bind);
5638
5639 control = create_tmp_var (unsigned_type_node, ".section");
5640 t = gimple_build_omp_continue (control, control);
5641 gimple_omp_sections_set_control (stmt, control);
5642 gimple_seq_add_stmt (&new_body, t);
5643
5644 gimple_seq_add_seq (&new_body, olist);
5645 gimple_seq_add_seq (&new_body, dlist);
5646
5647 new_body = maybe_catch_exception (new_body);
5648
5649 t = gimple_build_omp_return
5650 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5651 OMP_CLAUSE_NOWAIT));
5652 gimple_seq_add_stmt (&new_body, t);
5653
5654 gimple_bind_set_body (new_stmt, new_body);
5655 gimple_omp_set_body (stmt, NULL);
5656
5657 gsi_replace (gsi_p, new_stmt, true);
5658 }
5659
5660
5661 /* A subroutine of lower_omp_single. Expand the simple form of
5662 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5663
5664 if (GOMP_single_start ())
5665 BODY;
5666 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5667
5668 FIXME. It may be better to delay expanding the logic of this until
5669 pass_expand_omp. The expanded logic may make the job more difficult
5670 to a synchronization analysis pass. */
5671
5672 static void
5673 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5674 {
5675 location_t loc = gimple_location (single_stmt);
5676 tree tlabel = create_artificial_label (loc);
5677 tree flabel = create_artificial_label (loc);
5678 gimple call, cond;
5679 tree lhs, decl;
5680
5681 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5682 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5683 call = gimple_build_call (decl, 0);
5684 gimple_call_set_lhs (call, lhs);
5685 gimple_seq_add_stmt (pre_p, call);
5686
5687 cond = gimple_build_cond (EQ_EXPR, lhs,
5688 fold_convert_loc (loc, TREE_TYPE (lhs),
5689 boolean_true_node),
5690 tlabel, flabel);
5691 gimple_seq_add_stmt (pre_p, cond);
5692 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5693 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5694 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5695 }
5696
5697
5698 /* A subroutine of lower_omp_single. Expand the simple form of
5699 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5700
5701 #pragma omp single copyprivate (a, b, c)
5702
5703 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5704
5705 {
5706 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5707 {
5708 BODY;
5709 copyout.a = a;
5710 copyout.b = b;
5711 copyout.c = c;
5712 GOMP_single_copy_end (&copyout);
5713 }
5714 else
5715 {
5716 a = copyout_p->a;
5717 b = copyout_p->b;
5718 c = copyout_p->c;
5719 }
5720 GOMP_barrier ();
5721 }
5722
5723 FIXME. It may be better to delay expanding the logic of this until
5724 pass_expand_omp. The expanded logic may make the job more difficult
5725 to a synchronization analysis pass. */
5726
5727 static void
5728 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5729 {
5730 tree ptr_type, t, l0, l1, l2;
5731 gimple_seq copyin_seq;
5732 location_t loc = gimple_location (single_stmt);
5733
5734 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5735
5736 ptr_type = build_pointer_type (ctx->record_type);
5737 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5738
5739 l0 = create_artificial_label (loc);
5740 l1 = create_artificial_label (loc);
5741 l2 = create_artificial_label (loc);
5742
5743 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5744 t = fold_convert_loc (loc, ptr_type, t);
5745 gimplify_assign (ctx->receiver_decl, t, pre_p);
5746
5747 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5748 build_int_cst (ptr_type, 0));
5749 t = build3 (COND_EXPR, void_type_node, t,
5750 build_and_jump (&l0), build_and_jump (&l1));
5751 gimplify_and_add (t, pre_p);
5752
5753 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5754
5755 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5756
5757 copyin_seq = NULL;
5758 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5759 &copyin_seq, ctx);
5760
5761 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5762 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
5763 1, t);
5764 gimplify_and_add (t, pre_p);
5765
5766 t = build_and_jump (&l2);
5767 gimplify_and_add (t, pre_p);
5768
5769 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5770
5771 gimple_seq_add_seq (pre_p, copyin_seq);
5772
5773 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5774 }
5775
5776
5777 /* Expand code for an OpenMP single directive. */
5778
5779 static void
5780 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5781 {
5782 tree block;
5783 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5784 gimple_seq bind_body, dlist;
5785 struct gimplify_ctx gctx;
5786
5787 push_gimplify_context (&gctx);
5788
5789 bind_body = NULL;
5790 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5791 &bind_body, &dlist, ctx);
5792 lower_omp (gimple_omp_body (single_stmt), ctx);
5793
5794 gimple_seq_add_stmt (&bind_body, single_stmt);
5795
5796 if (ctx->record_type)
5797 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5798 else
5799 lower_omp_single_simple (single_stmt, &bind_body);
5800
5801 gimple_omp_set_body (single_stmt, NULL);
5802
5803 gimple_seq_add_seq (&bind_body, dlist);
5804
5805 bind_body = maybe_catch_exception (bind_body);
5806
5807 t = gimple_build_omp_return
5808 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5809 OMP_CLAUSE_NOWAIT));
5810 gimple_seq_add_stmt (&bind_body, t);
5811
5812 block = make_node (BLOCK);
5813 bind = gimple_build_bind (NULL, bind_body, block);
5814
5815 pop_gimplify_context (bind);
5816
5817 gimple_bind_append_vars (bind, ctx->block_vars);
5818 BLOCK_VARS (block) = ctx->block_vars;
5819 gsi_replace (gsi_p, bind, true);
5820 if (BLOCK_VARS (block))
5821 TREE_USED (block) = 1;
5822 }
5823
5824
5825 /* Expand code for an OpenMP master directive. */
5826
5827 static void
5828 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5829 {
5830 tree block, lab = NULL, x;
5831 gimple stmt = gsi_stmt (*gsi_p), bind;
5832 location_t loc = gimple_location (stmt);
5833 gimple_seq tseq;
5834 struct gimplify_ctx gctx;
5835
5836 push_gimplify_context (&gctx);
5837
5838 block = make_node (BLOCK);
5839 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5840 block);
5841
5842 x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5843 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5844 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5845 tseq = NULL;
5846 gimplify_and_add (x, &tseq);
5847 gimple_bind_add_seq (bind, tseq);
5848
5849 lower_omp (gimple_omp_body (stmt), ctx);
5850 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5851 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5852 gimple_omp_set_body (stmt, NULL);
5853
5854 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5855
5856 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5857
5858 pop_gimplify_context (bind);
5859
5860 gimple_bind_append_vars (bind, ctx->block_vars);
5861 BLOCK_VARS (block) = ctx->block_vars;
5862 gsi_replace (gsi_p, bind, true);
5863 }
5864
5865
5866 /* Expand code for an OpenMP ordered directive. */
5867
5868 static void
5869 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5870 {
5871 tree block;
5872 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5873 struct gimplify_ctx gctx;
5874
5875 push_gimplify_context (&gctx);
5876
5877 block = make_node (BLOCK);
5878 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5879 block);
5880
5881 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5882 gimple_bind_add_stmt (bind, x);
5883
5884 lower_omp (gimple_omp_body (stmt), ctx);
5885 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5886 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5887 gimple_omp_set_body (stmt, NULL);
5888
5889 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5890 gimple_bind_add_stmt (bind, x);
5891
5892 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5893
5894 pop_gimplify_context (bind);
5895
5896 gimple_bind_append_vars (bind, ctx->block_vars);
5897 BLOCK_VARS (block) = gimple_bind_vars (bind);
5898 gsi_replace (gsi_p, bind, true);
5899 }
5900
5901
5902 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5903 substitution of a couple of function calls. But in the NAMED case,
5904 requires that languages coordinate a symbol name. It is therefore
5905 best put here in common code. */
5906
5907 static GTY((param1_is (tree), param2_is (tree)))
5908 splay_tree critical_name_mutexes;
5909
5910 static void
5911 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5912 {
5913 tree block;
5914 tree name, lock, unlock;
5915 gimple stmt = gsi_stmt (*gsi_p), bind;
5916 location_t loc = gimple_location (stmt);
5917 gimple_seq tbody;
5918 struct gimplify_ctx gctx;
5919
5920 name = gimple_omp_critical_name (stmt);
5921 if (name)
5922 {
5923 tree decl;
5924 splay_tree_node n;
5925
5926 if (!critical_name_mutexes)
5927 critical_name_mutexes
5928 = splay_tree_new_ggc (splay_tree_compare_pointers,
5929 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
5930 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
5931
5932 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5933 if (n == NULL)
5934 {
5935 char *new_str;
5936
5937 decl = create_tmp_var_raw (ptr_type_node, NULL);
5938
5939 new_str = ACONCAT ((".gomp_critical_user_",
5940 IDENTIFIER_POINTER (name), NULL));
5941 DECL_NAME (decl) = get_identifier (new_str);
5942 TREE_PUBLIC (decl) = 1;
5943 TREE_STATIC (decl) = 1;
5944 DECL_COMMON (decl) = 1;
5945 DECL_ARTIFICIAL (decl) = 1;
5946 DECL_IGNORED_P (decl) = 1;
5947 varpool_finalize_decl (decl);
5948
5949 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5950 (splay_tree_value) decl);
5951 }
5952 else
5953 decl = (tree) n->value;
5954
5955 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5956 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
5957
5958 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5959 unlock = build_call_expr_loc (loc, unlock, 1,
5960 build_fold_addr_expr_loc (loc, decl));
5961 }
5962 else
5963 {
5964 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5965 lock = build_call_expr_loc (loc, lock, 0);
5966
5967 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5968 unlock = build_call_expr_loc (loc, unlock, 0);
5969 }
5970
5971 push_gimplify_context (&gctx);
5972
5973 block = make_node (BLOCK);
5974 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5975
5976 tbody = gimple_bind_body (bind);
5977 gimplify_and_add (lock, &tbody);
5978 gimple_bind_set_body (bind, tbody);
5979
5980 lower_omp (gimple_omp_body (stmt), ctx);
5981 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5982 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5983 gimple_omp_set_body (stmt, NULL);
5984
5985 tbody = gimple_bind_body (bind);
5986 gimplify_and_add (unlock, &tbody);
5987 gimple_bind_set_body (bind, tbody);
5988
5989 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5990
5991 pop_gimplify_context (bind);
5992 gimple_bind_append_vars (bind, ctx->block_vars);
5993 BLOCK_VARS (block) = gimple_bind_vars (bind);
5994 gsi_replace (gsi_p, bind, true);
5995 }
5996
5997
5998 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5999 for a lastprivate clause. Given a loop control predicate of (V
6000 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6001 is appended to *DLIST, iterator initialization is appended to
6002 *BODY_P. */
6003
6004 static void
6005 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6006 gimple_seq *dlist, struct omp_context *ctx)
6007 {
6008 tree clauses, cond, vinit;
6009 enum tree_code cond_code;
6010 gimple_seq stmts;
6011
6012 cond_code = fd->loop.cond_code;
6013 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6014
6015 /* When possible, use a strict equality expression. This can let VRP
6016 type optimizations deduce the value and remove a copy. */
6017 if (host_integerp (fd->loop.step, 0))
6018 {
6019 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6020 if (step == 1 || step == -1)
6021 cond_code = EQ_EXPR;
6022 }
6023
6024 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6025
6026 clauses = gimple_omp_for_clauses (fd->for_stmt);
6027 stmts = NULL;
6028 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6029 if (!gimple_seq_empty_p (stmts))
6030 {
6031 gimple_seq_add_seq (&stmts, *dlist);
6032 *dlist = stmts;
6033
6034 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6035 vinit = fd->loop.n1;
6036 if (cond_code == EQ_EXPR
6037 && host_integerp (fd->loop.n2, 0)
6038 && ! integer_zerop (fd->loop.n2))
6039 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6040
6041 /* Initialize the iterator variable, so that threads that don't execute
6042 any iterations don't execute the lastprivate clauses by accident. */
6043 gimplify_assign (fd->loop.v, vinit, body_p);
6044 }
6045 }
6046
6047
6048 /* Lower code for an OpenMP loop directive. */
6049
6050 static void
6051 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6052 {
6053 tree *rhs_p, block;
6054 struct omp_for_data fd;
6055 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6056 gimple_seq omp_for_body, body, dlist;
6057 size_t i;
6058 struct gimplify_ctx gctx;
6059
6060 push_gimplify_context (&gctx);
6061
6062 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6063 lower_omp (gimple_omp_body (stmt), ctx);
6064
6065 block = make_node (BLOCK);
6066 new_stmt = gimple_build_bind (NULL, NULL, block);
6067
6068 /* Move declaration of temporaries in the loop body before we make
6069 it go away. */
6070 omp_for_body = gimple_omp_body (stmt);
6071 if (!gimple_seq_empty_p (omp_for_body)
6072 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6073 {
6074 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6075 gimple_bind_append_vars (new_stmt, vars);
6076 }
6077
6078 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6079 dlist = NULL;
6080 body = NULL;
6081 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6082 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6083
6084 /* Lower the header expressions. At this point, we can assume that
6085 the header is of the form:
6086
6087 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6088
6089 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6090 using the .omp_data_s mapping, if needed. */
6091 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6092 {
6093 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6094 if (!is_gimple_min_invariant (*rhs_p))
6095 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6096
6097 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6098 if (!is_gimple_min_invariant (*rhs_p))
6099 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6100
6101 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6102 if (!is_gimple_min_invariant (*rhs_p))
6103 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6104 }
6105
6106 /* Once lowered, extract the bounds and clauses. */
6107 extract_omp_for_data (stmt, &fd, NULL);
6108
6109 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6110
6111 gimple_seq_add_stmt (&body, stmt);
6112 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6113
6114 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6115 fd.loop.v));
6116
6117 /* After the loop, add exit clauses. */
6118 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6119 gimple_seq_add_seq (&body, dlist);
6120
6121 body = maybe_catch_exception (body);
6122
6123 /* Region exit marker goes at the end of the loop body. */
6124 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6125
6126 pop_gimplify_context (new_stmt);
6127
6128 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6129 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6130 if (BLOCK_VARS (block))
6131 TREE_USED (block) = 1;
6132
6133 gimple_bind_set_body (new_stmt, body);
6134 gimple_omp_set_body (stmt, NULL);
6135 gimple_omp_for_set_pre_body (stmt, NULL);
6136 gsi_replace (gsi_p, new_stmt, true);
6137 }
6138
6139 /* Callback for walk_stmts. Check if the current statement only contains
6140 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6141
6142 static tree
6143 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6144 bool *handled_ops_p,
6145 struct walk_stmt_info *wi)
6146 {
6147 int *info = (int *) wi->info;
6148 gimple stmt = gsi_stmt (*gsi_p);
6149
6150 *handled_ops_p = true;
6151 switch (gimple_code (stmt))
6152 {
6153 WALK_SUBSTMTS;
6154
6155 case GIMPLE_OMP_FOR:
6156 case GIMPLE_OMP_SECTIONS:
6157 *info = *info == 0 ? 1 : -1;
6158 break;
6159 default:
6160 *info = -1;
6161 break;
6162 }
6163 return NULL;
6164 }
6165
6166 struct omp_taskcopy_context
6167 {
6168 /* This field must be at the beginning, as we do "inheritance": Some
6169 callback functions for tree-inline.c (e.g., omp_copy_decl)
6170 receive a copy_body_data pointer that is up-casted to an
6171 omp_context pointer. */
6172 copy_body_data cb;
6173 omp_context *ctx;
6174 };
6175
6176 static tree
6177 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6178 {
6179 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6180
6181 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6182 return create_tmp_var (TREE_TYPE (var), NULL);
6183
6184 return var;
6185 }
6186
6187 static tree
6188 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6189 {
6190 tree name, new_fields = NULL, type, f;
6191
6192 type = lang_hooks.types.make_type (RECORD_TYPE);
6193 name = DECL_NAME (TYPE_NAME (orig_type));
6194 name = build_decl (gimple_location (tcctx->ctx->stmt),
6195 TYPE_DECL, name, type);
6196 TYPE_NAME (type) = name;
6197
6198 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6199 {
6200 tree new_f = copy_node (f);
6201 DECL_CONTEXT (new_f) = type;
6202 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6203 TREE_CHAIN (new_f) = new_fields;
6204 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6205 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6206 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6207 &tcctx->cb, NULL);
6208 new_fields = new_f;
6209 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6210 }
6211 TYPE_FIELDS (type) = nreverse (new_fields);
6212 layout_type (type);
6213 return type;
6214 }
6215
6216 /* Create task copyfn. */
6217
6218 static void
6219 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6220 {
6221 struct function *child_cfun;
6222 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6223 tree record_type, srecord_type, bind, list;
6224 bool record_needs_remap = false, srecord_needs_remap = false;
6225 splay_tree_node n;
6226 struct omp_taskcopy_context tcctx;
6227 struct gimplify_ctx gctx;
6228 location_t loc = gimple_location (task_stmt);
6229
6230 child_fn = gimple_omp_task_copy_fn (task_stmt);
6231 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6232 gcc_assert (child_cfun->cfg == NULL);
6233 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6234
6235 /* Reset DECL_CONTEXT on function arguments. */
6236 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6237 DECL_CONTEXT (t) = child_fn;
6238
6239 /* Populate the function. */
6240 push_gimplify_context (&gctx);
6241 current_function_decl = child_fn;
6242
6243 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6244 TREE_SIDE_EFFECTS (bind) = 1;
6245 list = NULL;
6246 DECL_SAVED_TREE (child_fn) = bind;
6247 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6248
6249 /* Remap src and dst argument types if needed. */
6250 record_type = ctx->record_type;
6251 srecord_type = ctx->srecord_type;
6252 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6253 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6254 {
6255 record_needs_remap = true;
6256 break;
6257 }
6258 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6259 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6260 {
6261 srecord_needs_remap = true;
6262 break;
6263 }
6264
6265 if (record_needs_remap || srecord_needs_remap)
6266 {
6267 memset (&tcctx, '\0', sizeof (tcctx));
6268 tcctx.cb.src_fn = ctx->cb.src_fn;
6269 tcctx.cb.dst_fn = child_fn;
6270 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6271 gcc_checking_assert (tcctx.cb.src_node);
6272 tcctx.cb.dst_node = tcctx.cb.src_node;
6273 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6274 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6275 tcctx.cb.eh_lp_nr = 0;
6276 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6277 tcctx.cb.decl_map = pointer_map_create ();
6278 tcctx.ctx = ctx;
6279
6280 if (record_needs_remap)
6281 record_type = task_copyfn_remap_type (&tcctx, record_type);
6282 if (srecord_needs_remap)
6283 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6284 }
6285 else
6286 tcctx.cb.decl_map = NULL;
6287
6288 push_cfun (child_cfun);
6289
6290 arg = DECL_ARGUMENTS (child_fn);
6291 TREE_TYPE (arg) = build_pointer_type (record_type);
6292 sarg = DECL_CHAIN (arg);
6293 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6294
6295 /* First pass: initialize temporaries used in record_type and srecord_type
6296 sizes and field offsets. */
6297 if (tcctx.cb.decl_map)
6298 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6299 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6300 {
6301 tree *p;
6302
6303 decl = OMP_CLAUSE_DECL (c);
6304 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6305 if (p == NULL)
6306 continue;
6307 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6308 sf = (tree) n->value;
6309 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6310 src = build_simple_mem_ref_loc (loc, sarg);
6311 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6312 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6313 append_to_statement_list (t, &list);
6314 }
6315
6316 /* Second pass: copy shared var pointers and copy construct non-VLA
6317 firstprivate vars. */
6318 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6319 switch (OMP_CLAUSE_CODE (c))
6320 {
6321 case OMP_CLAUSE_SHARED:
6322 decl = OMP_CLAUSE_DECL (c);
6323 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6324 if (n == NULL)
6325 break;
6326 f = (tree) n->value;
6327 if (tcctx.cb.decl_map)
6328 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6329 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6330 sf = (tree) n->value;
6331 if (tcctx.cb.decl_map)
6332 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6333 src = build_simple_mem_ref_loc (loc, sarg);
6334 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6335 dst = build_simple_mem_ref_loc (loc, arg);
6336 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6337 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6338 append_to_statement_list (t, &list);
6339 break;
6340 case OMP_CLAUSE_FIRSTPRIVATE:
6341 decl = OMP_CLAUSE_DECL (c);
6342 if (is_variable_sized (decl))
6343 break;
6344 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6345 if (n == NULL)
6346 break;
6347 f = (tree) n->value;
6348 if (tcctx.cb.decl_map)
6349 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6350 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6351 if (n != NULL)
6352 {
6353 sf = (tree) n->value;
6354 if (tcctx.cb.decl_map)
6355 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6356 src = build_simple_mem_ref_loc (loc, sarg);
6357 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6358 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6359 src = build_simple_mem_ref_loc (loc, src);
6360 }
6361 else
6362 src = decl;
6363 dst = build_simple_mem_ref_loc (loc, arg);
6364 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6365 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6366 append_to_statement_list (t, &list);
6367 break;
6368 case OMP_CLAUSE_PRIVATE:
6369 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6370 break;
6371 decl = OMP_CLAUSE_DECL (c);
6372 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6373 f = (tree) n->value;
6374 if (tcctx.cb.decl_map)
6375 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6376 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6377 if (n != NULL)
6378 {
6379 sf = (tree) n->value;
6380 if (tcctx.cb.decl_map)
6381 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6382 src = build_simple_mem_ref_loc (loc, sarg);
6383 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6384 if (use_pointer_for_field (decl, NULL))
6385 src = build_simple_mem_ref_loc (loc, src);
6386 }
6387 else
6388 src = decl;
6389 dst = build_simple_mem_ref_loc (loc, arg);
6390 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6391 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6392 append_to_statement_list (t, &list);
6393 break;
6394 default:
6395 break;
6396 }
6397
6398 /* Last pass: handle VLA firstprivates. */
6399 if (tcctx.cb.decl_map)
6400 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6401 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6402 {
6403 tree ind, ptr, df;
6404
6405 decl = OMP_CLAUSE_DECL (c);
6406 if (!is_variable_sized (decl))
6407 continue;
6408 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6409 if (n == NULL)
6410 continue;
6411 f = (tree) n->value;
6412 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6413 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6414 ind = DECL_VALUE_EXPR (decl);
6415 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6416 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6417 n = splay_tree_lookup (ctx->sfield_map,
6418 (splay_tree_key) TREE_OPERAND (ind, 0));
6419 sf = (tree) n->value;
6420 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6421 src = build_simple_mem_ref_loc (loc, sarg);
6422 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6423 src = build_simple_mem_ref_loc (loc, src);
6424 dst = build_simple_mem_ref_loc (loc, arg);
6425 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6426 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6427 append_to_statement_list (t, &list);
6428 n = splay_tree_lookup (ctx->field_map,
6429 (splay_tree_key) TREE_OPERAND (ind, 0));
6430 df = (tree) n->value;
6431 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6432 ptr = build_simple_mem_ref_loc (loc, arg);
6433 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6434 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6435 build_fold_addr_expr_loc (loc, dst));
6436 append_to_statement_list (t, &list);
6437 }
6438
6439 t = build1 (RETURN_EXPR, void_type_node, NULL);
6440 append_to_statement_list (t, &list);
6441
6442 if (tcctx.cb.decl_map)
6443 pointer_map_destroy (tcctx.cb.decl_map);
6444 pop_gimplify_context (NULL);
6445 BIND_EXPR_BODY (bind) = list;
6446 pop_cfun ();
6447 current_function_decl = ctx->cb.src_fn;
6448 }
6449
6450 /* Lower the OpenMP parallel or task directive in the current statement
6451 in GSI_P. CTX holds context information for the directive. */
6452
6453 static void
6454 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6455 {
6456 tree clauses;
6457 tree child_fn, t;
6458 gimple stmt = gsi_stmt (*gsi_p);
6459 gimple par_bind, bind;
6460 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6461 struct gimplify_ctx gctx;
6462 location_t loc = gimple_location (stmt);
6463
6464 clauses = gimple_omp_taskreg_clauses (stmt);
6465 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6466 par_body = gimple_bind_body (par_bind);
6467 child_fn = ctx->cb.dst_fn;
6468 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6469 && !gimple_omp_parallel_combined_p (stmt))
6470 {
6471 struct walk_stmt_info wi;
6472 int ws_num = 0;
6473
6474 memset (&wi, 0, sizeof (wi));
6475 wi.info = &ws_num;
6476 wi.val_only = true;
6477 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6478 if (ws_num == 1)
6479 gimple_omp_parallel_set_combined_p (stmt, true);
6480 }
6481 if (ctx->srecord_type)
6482 create_task_copyfn (stmt, ctx);
6483
6484 push_gimplify_context (&gctx);
6485
6486 par_olist = NULL;
6487 par_ilist = NULL;
6488 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6489 lower_omp (par_body, ctx);
6490 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6491 lower_reduction_clauses (clauses, &par_olist, ctx);
6492
6493 /* Declare all the variables created by mapping and the variables
6494 declared in the scope of the parallel body. */
6495 record_vars_into (ctx->block_vars, child_fn);
6496 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6497
6498 if (ctx->record_type)
6499 {
6500 ctx->sender_decl
6501 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6502 : ctx->record_type, ".omp_data_o");
6503 DECL_NAMELESS (ctx->sender_decl) = 1;
6504 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6505 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6506 }
6507
6508 olist = NULL;
6509 ilist = NULL;
6510 lower_send_clauses (clauses, &ilist, &olist, ctx);
6511 lower_send_shared_vars (&ilist, &olist, ctx);
6512
6513 /* Once all the expansions are done, sequence all the different
6514 fragments inside gimple_omp_body. */
6515
6516 new_body = NULL;
6517
6518 if (ctx->record_type)
6519 {
6520 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6521 /* fixup_child_record_type might have changed receiver_decl's type. */
6522 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6523 gimple_seq_add_stmt (&new_body,
6524 gimple_build_assign (ctx->receiver_decl, t));
6525 }
6526
6527 gimple_seq_add_seq (&new_body, par_ilist);
6528 gimple_seq_add_seq (&new_body, par_body);
6529 gimple_seq_add_seq (&new_body, par_olist);
6530 new_body = maybe_catch_exception (new_body);
6531 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6532 gimple_omp_set_body (stmt, new_body);
6533
6534 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6535 gimple_bind_add_stmt (bind, stmt);
6536 if (ilist || olist)
6537 {
6538 gimple_seq_add_stmt (&ilist, bind);
6539 gimple_seq_add_seq (&ilist, olist);
6540 bind = gimple_build_bind (NULL, ilist, NULL);
6541 }
6542
6543 gsi_replace (gsi_p, bind, true);
6544
6545 pop_gimplify_context (NULL);
6546 }
6547
6548 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6549 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6550 of OpenMP context, but with task_shared_vars set. */
6551
6552 static tree
6553 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6554 void *data)
6555 {
6556 tree t = *tp;
6557
6558 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6559 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6560 return t;
6561
6562 if (task_shared_vars
6563 && DECL_P (t)
6564 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6565 return t;
6566
6567 /* If a global variable has been privatized, TREE_CONSTANT on
6568 ADDR_EXPR might be wrong. */
6569 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6570 recompute_tree_invariant_for_addr_expr (t);
6571
6572 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6573 return NULL_TREE;
6574 }
6575
6576 static void
6577 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6578 {
6579 gimple stmt = gsi_stmt (*gsi_p);
6580 struct walk_stmt_info wi;
6581
6582 if (gimple_has_location (stmt))
6583 input_location = gimple_location (stmt);
6584
6585 if (task_shared_vars)
6586 memset (&wi, '\0', sizeof (wi));
6587
6588 /* If we have issued syntax errors, avoid doing any heavy lifting.
6589 Just replace the OpenMP directives with a NOP to avoid
6590 confusing RTL expansion. */
6591 if (seen_error () && is_gimple_omp (stmt))
6592 {
6593 gsi_replace (gsi_p, gimple_build_nop (), true);
6594 return;
6595 }
6596
6597 switch (gimple_code (stmt))
6598 {
6599 case GIMPLE_COND:
6600 if ((ctx || task_shared_vars)
6601 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6602 ctx ? NULL : &wi, NULL)
6603 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6604 ctx ? NULL : &wi, NULL)))
6605 gimple_regimplify_operands (stmt, gsi_p);
6606 break;
6607 case GIMPLE_CATCH:
6608 lower_omp (gimple_catch_handler (stmt), ctx);
6609 break;
6610 case GIMPLE_EH_FILTER:
6611 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6612 break;
6613 case GIMPLE_TRY:
6614 lower_omp (gimple_try_eval (stmt), ctx);
6615 lower_omp (gimple_try_cleanup (stmt), ctx);
6616 break;
6617 case GIMPLE_BIND:
6618 lower_omp (gimple_bind_body (stmt), ctx);
6619 break;
6620 case GIMPLE_OMP_PARALLEL:
6621 case GIMPLE_OMP_TASK:
6622 ctx = maybe_lookup_ctx (stmt);
6623 lower_omp_taskreg (gsi_p, ctx);
6624 break;
6625 case GIMPLE_OMP_FOR:
6626 ctx = maybe_lookup_ctx (stmt);
6627 gcc_assert (ctx);
6628 lower_omp_for (gsi_p, ctx);
6629 break;
6630 case GIMPLE_OMP_SECTIONS:
6631 ctx = maybe_lookup_ctx (stmt);
6632 gcc_assert (ctx);
6633 lower_omp_sections (gsi_p, ctx);
6634 break;
6635 case GIMPLE_OMP_SINGLE:
6636 ctx = maybe_lookup_ctx (stmt);
6637 gcc_assert (ctx);
6638 lower_omp_single (gsi_p, ctx);
6639 break;
6640 case GIMPLE_OMP_MASTER:
6641 ctx = maybe_lookup_ctx (stmt);
6642 gcc_assert (ctx);
6643 lower_omp_master (gsi_p, ctx);
6644 break;
6645 case GIMPLE_OMP_ORDERED:
6646 ctx = maybe_lookup_ctx (stmt);
6647 gcc_assert (ctx);
6648 lower_omp_ordered (gsi_p, ctx);
6649 break;
6650 case GIMPLE_OMP_CRITICAL:
6651 ctx = maybe_lookup_ctx (stmt);
6652 gcc_assert (ctx);
6653 lower_omp_critical (gsi_p, ctx);
6654 break;
6655 case GIMPLE_OMP_ATOMIC_LOAD:
6656 if ((ctx || task_shared_vars)
6657 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6658 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6659 gimple_regimplify_operands (stmt, gsi_p);
6660 break;
6661 default:
6662 if ((ctx || task_shared_vars)
6663 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6664 ctx ? NULL : &wi))
6665 gimple_regimplify_operands (stmt, gsi_p);
6666 break;
6667 }
6668 }
6669
6670 static void
6671 lower_omp (gimple_seq body, omp_context *ctx)
6672 {
6673 location_t saved_location = input_location;
6674 gimple_stmt_iterator gsi = gsi_start (body);
6675 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6676 lower_omp_1 (&gsi, ctx);
6677 input_location = saved_location;
6678 }
6679 \f
6680 /* Main entry point. */
6681
6682 static unsigned int
6683 execute_lower_omp (void)
6684 {
6685 gimple_seq body;
6686
6687 /* This pass always runs, to provide PROP_gimple_lomp.
6688 But there is nothing to do unless -fopenmp is given. */
6689 if (flag_openmp == 0)
6690 return 0;
6691
6692 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6693 delete_omp_context);
6694
6695 body = gimple_body (current_function_decl);
6696 scan_omp (body, NULL);
6697 gcc_assert (taskreg_nesting_level == 0);
6698
6699 if (all_contexts->root)
6700 {
6701 struct gimplify_ctx gctx;
6702
6703 if (task_shared_vars)
6704 push_gimplify_context (&gctx);
6705 lower_omp (body, NULL);
6706 if (task_shared_vars)
6707 pop_gimplify_context (NULL);
6708 }
6709
6710 if (all_contexts)
6711 {
6712 splay_tree_delete (all_contexts);
6713 all_contexts = NULL;
6714 }
6715 BITMAP_FREE (task_shared_vars);
6716 return 0;
6717 }
6718
6719 struct gimple_opt_pass pass_lower_omp =
6720 {
6721 {
6722 GIMPLE_PASS,
6723 "omplower", /* name */
6724 NULL, /* gate */
6725 execute_lower_omp, /* execute */
6726 NULL, /* sub */
6727 NULL, /* next */
6728 0, /* static_pass_number */
6729 TV_NONE, /* tv_id */
6730 PROP_gimple_any, /* properties_required */
6731 PROP_gimple_lomp, /* properties_provided */
6732 0, /* properties_destroyed */
6733 0, /* todo_flags_start */
6734 TODO_dump_func /* todo_flags_finish */
6735 }
6736 };
6737 \f
6738 /* The following is a utility to diagnose OpenMP structured block violations.
6739 It is not part of the "omplower" pass, as that's invoked too late. It
6740 should be invoked by the respective front ends after gimplification. */
6741
6742 static splay_tree all_labels;
6743
6744 /* Check for mismatched contexts and generate an error if needed. Return
6745 true if an error is detected. */
6746
6747 static bool
6748 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6749 gimple branch_ctx, gimple label_ctx)
6750 {
6751 if (label_ctx == branch_ctx)
6752 return false;
6753
6754
6755 /*
6756 Previously we kept track of the label's entire context in diagnose_sb_[12]
6757 so we could traverse it and issue a correct "exit" or "enter" error
6758 message upon a structured block violation.
6759
6760 We built the context by building a list with tree_cons'ing, but there is
6761 no easy counterpart in gimple tuples. It seems like far too much work
6762 for issuing exit/enter error messages. If someone really misses the
6763 distinct error message... patches welcome.
6764 */
6765
6766 #if 0
6767 /* Try to avoid confusing the user by producing and error message
6768 with correct "exit" or "enter" verbiage. We prefer "exit"
6769 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6770 if (branch_ctx == NULL)
6771 exit_p = false;
6772 else
6773 {
6774 while (label_ctx)
6775 {
6776 if (TREE_VALUE (label_ctx) == branch_ctx)
6777 {
6778 exit_p = false;
6779 break;
6780 }
6781 label_ctx = TREE_CHAIN (label_ctx);
6782 }
6783 }
6784
6785 if (exit_p)
6786 error ("invalid exit from OpenMP structured block");
6787 else
6788 error ("invalid entry to OpenMP structured block");
6789 #endif
6790
6791 /* If it's obvious we have an invalid entry, be specific about the error. */
6792 if (branch_ctx == NULL)
6793 error ("invalid entry to OpenMP structured block");
6794 else
6795 /* Otherwise, be vague and lazy, but efficient. */
6796 error ("invalid branch to/from an OpenMP structured block");
6797
6798 gsi_replace (gsi_p, gimple_build_nop (), false);
6799 return true;
6800 }
6801
6802 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6803 where each label is found. */
6804
6805 static tree
6806 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6807 struct walk_stmt_info *wi)
6808 {
6809 gimple context = (gimple) wi->info;
6810 gimple inner_context;
6811 gimple stmt = gsi_stmt (*gsi_p);
6812
6813 *handled_ops_p = true;
6814
6815 switch (gimple_code (stmt))
6816 {
6817 WALK_SUBSTMTS;
6818
6819 case GIMPLE_OMP_PARALLEL:
6820 case GIMPLE_OMP_TASK:
6821 case GIMPLE_OMP_SECTIONS:
6822 case GIMPLE_OMP_SINGLE:
6823 case GIMPLE_OMP_SECTION:
6824 case GIMPLE_OMP_MASTER:
6825 case GIMPLE_OMP_ORDERED:
6826 case GIMPLE_OMP_CRITICAL:
6827 /* The minimal context here is just the current OMP construct. */
6828 inner_context = stmt;
6829 wi->info = inner_context;
6830 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6831 wi->info = context;
6832 break;
6833
6834 case GIMPLE_OMP_FOR:
6835 inner_context = stmt;
6836 wi->info = inner_context;
6837 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6838 walk them. */
6839 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6840 diagnose_sb_1, NULL, wi);
6841 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6842 wi->info = context;
6843 break;
6844
6845 case GIMPLE_LABEL:
6846 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6847 (splay_tree_value) context);
6848 break;
6849
6850 default:
6851 break;
6852 }
6853
6854 return NULL_TREE;
6855 }
6856
6857 /* Pass 2: Check each branch and see if its context differs from that of
6858 the destination label's context. */
6859
6860 static tree
6861 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6862 struct walk_stmt_info *wi)
6863 {
6864 gimple context = (gimple) wi->info;
6865 splay_tree_node n;
6866 gimple stmt = gsi_stmt (*gsi_p);
6867
6868 *handled_ops_p = true;
6869
6870 switch (gimple_code (stmt))
6871 {
6872 WALK_SUBSTMTS;
6873
6874 case GIMPLE_OMP_PARALLEL:
6875 case GIMPLE_OMP_TASK:
6876 case GIMPLE_OMP_SECTIONS:
6877 case GIMPLE_OMP_SINGLE:
6878 case GIMPLE_OMP_SECTION:
6879 case GIMPLE_OMP_MASTER:
6880 case GIMPLE_OMP_ORDERED:
6881 case GIMPLE_OMP_CRITICAL:
6882 wi->info = stmt;
6883 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6884 wi->info = context;
6885 break;
6886
6887 case GIMPLE_OMP_FOR:
6888 wi->info = stmt;
6889 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6890 walk them. */
6891 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6892 diagnose_sb_2, NULL, wi);
6893 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6894 wi->info = context;
6895 break;
6896
6897 case GIMPLE_COND:
6898 {
6899 tree lab = gimple_cond_true_label (stmt);
6900 if (lab)
6901 {
6902 n = splay_tree_lookup (all_labels,
6903 (splay_tree_key) lab);
6904 diagnose_sb_0 (gsi_p, context,
6905 n ? (gimple) n->value : NULL);
6906 }
6907 lab = gimple_cond_false_label (stmt);
6908 if (lab)
6909 {
6910 n = splay_tree_lookup (all_labels,
6911 (splay_tree_key) lab);
6912 diagnose_sb_0 (gsi_p, context,
6913 n ? (gimple) n->value : NULL);
6914 }
6915 }
6916 break;
6917
6918 case GIMPLE_GOTO:
6919 {
6920 tree lab = gimple_goto_dest (stmt);
6921 if (TREE_CODE (lab) != LABEL_DECL)
6922 break;
6923
6924 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6925 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6926 }
6927 break;
6928
6929 case GIMPLE_SWITCH:
6930 {
6931 unsigned int i;
6932 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6933 {
6934 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6935 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6936 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6937 break;
6938 }
6939 }
6940 break;
6941
6942 case GIMPLE_RETURN:
6943 diagnose_sb_0 (gsi_p, context, NULL);
6944 break;
6945
6946 default:
6947 break;
6948 }
6949
6950 return NULL_TREE;
6951 }
6952
6953 static unsigned int
6954 diagnose_omp_structured_block_errors (void)
6955 {
6956 struct walk_stmt_info wi;
6957 gimple_seq body = gimple_body (current_function_decl);
6958
6959 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6960
6961 memset (&wi, 0, sizeof (wi));
6962 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6963
6964 memset (&wi, 0, sizeof (wi));
6965 wi.want_locations = true;
6966 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6967
6968 splay_tree_delete (all_labels);
6969 all_labels = NULL;
6970
6971 return 0;
6972 }
6973
6974 static bool
6975 gate_diagnose_omp_blocks (void)
6976 {
6977 return flag_openmp != 0;
6978 }
6979
6980 struct gimple_opt_pass pass_diagnose_omp_blocks =
6981 {
6982 {
6983 GIMPLE_PASS,
6984 "*diagnose_omp_blocks", /* name */
6985 gate_diagnose_omp_blocks, /* gate */
6986 diagnose_omp_structured_block_errors, /* execute */
6987 NULL, /* sub */
6988 NULL, /* next */
6989 0, /* static_pass_number */
6990 TV_NONE, /* tv_id */
6991 PROP_gimple_any, /* properties_required */
6992 0, /* properties_provided */
6993 0, /* properties_destroyed */
6994 0, /* todo_flags_start */
6995 0, /* todo_flags_finish */
6996 }
6997 };
6998
6999 #include "gt-omp-low.h"