]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/omp-low.c
gcc/java/
[thirdparty/gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic.h"
35 #include "tree-flow.h"
36 #include "timevar.h"
37 #include "flags.h"
38 #include "function.h"
39 #include "expr.h"
40 #include "toplev.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
47
48
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
55
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
59
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63 typedef struct omp_context
64 {
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
74
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
106
107
108 struct omp_for_data_loop
109 {
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112 };
113
114 /* A structure describing the main elements of a parallel loop. */
115
116 struct omp_for_data
117 {
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
126 };
127
128
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
133
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
136
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
145
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
147
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
150 {
151 struct walk_stmt_info wi;
152
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
156
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158 }
159
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
163
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
165
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
168 {
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
172
173 return NULL_TREE;
174 }
175
176 /* Return true if CTX is for an omp parallel. */
177
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
180 {
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
182 }
183
184
185 /* Return true if CTX is for an omp task. */
186
187 static inline bool
188 is_task_ctx (omp_context *ctx)
189 {
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
191 }
192
193
194 /* Return true if CTX is for an omp parallel or omp task. */
195
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
198 {
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
201 }
202
203
204 /* Return true if REGION is a combined parallel+workshare region. */
205
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
208 {
209 return region->is_combined_parallel;
210 }
211
212
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
215
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
219 {
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225
226 fd->for_stmt = for_stmt;
227 fd->pre = NULL;
228 fd->collapse = gimple_omp_for_collapse (for_stmt);
229 if (fd->collapse > 1)
230 fd->loops = loops;
231 else
232 fd->loops = &fd->loop;
233
234 fd->have_nowait = fd->have_ordered = false;
235 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
236 fd->chunk_size = NULL_TREE;
237 collapse_iter = NULL;
238 collapse_count = NULL;
239
240 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
241 switch (OMP_CLAUSE_CODE (t))
242 {
243 case OMP_CLAUSE_NOWAIT:
244 fd->have_nowait = true;
245 break;
246 case OMP_CLAUSE_ORDERED:
247 fd->have_ordered = true;
248 break;
249 case OMP_CLAUSE_SCHEDULE:
250 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
251 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
252 break;
253 case OMP_CLAUSE_COLLAPSE:
254 if (fd->collapse > 1)
255 {
256 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
257 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
258 }
259 default:
260 break;
261 }
262
263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
266 is best) or if it varies (then schedule(dynamic,N) is better). */
267 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
268 {
269 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
270 gcc_assert (fd->chunk_size == NULL);
271 }
272 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
273 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
274 gcc_assert (fd->chunk_size == NULL);
275 else if (fd->chunk_size == NULL)
276 {
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
279 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
280 || fd->have_ordered
281 || fd->collapse > 1)
282 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
283 ? integer_zero_node : integer_one_node;
284 }
285
286 for (i = 0; i < fd->collapse; i++)
287 {
288 if (fd->collapse == 1)
289 loop = &fd->loop;
290 else if (loops != NULL)
291 loop = loops + i;
292 else
293 loop = &dummy_loop;
294
295
296 loop->v = gimple_omp_for_index (for_stmt, i);
297 gcc_assert (SSA_VAR_P (loop->v));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
300 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
301 loop->n1 = gimple_omp_for_initial (for_stmt, i);
302
303 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
304 loop->n2 = gimple_omp_for_final (for_stmt, i);
305 switch (loop->cond_code)
306 {
307 case LT_EXPR:
308 case GT_EXPR:
309 break;
310 case LE_EXPR:
311 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
312 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
313 loop->n2, size_one_node);
314 else
315 loop->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
316 build_int_cst (TREE_TYPE (loop->n2), 1));
317 loop->cond_code = LT_EXPR;
318 break;
319 case GE_EXPR:
320 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
321 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
322 loop->n2, size_int (-1));
323 else
324 loop->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
325 build_int_cst (TREE_TYPE (loop->n2), 1));
326 loop->cond_code = GT_EXPR;
327 break;
328 default:
329 gcc_unreachable ();
330 }
331
332 t = gimple_omp_for_incr (for_stmt, i);
333 gcc_assert (TREE_OPERAND (t, 0) == var);
334 switch (TREE_CODE (t))
335 {
336 case PLUS_EXPR:
337 case POINTER_PLUS_EXPR:
338 loop->step = TREE_OPERAND (t, 1);
339 break;
340 case MINUS_EXPR:
341 loop->step = TREE_OPERAND (t, 1);
342 loop->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (loop->step),
343 loop->step);
344 break;
345 default:
346 gcc_unreachable ();
347 }
348
349 if (iter_type != long_long_unsigned_type_node)
350 {
351 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
352 iter_type = long_long_unsigned_type_node;
353 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
354 && TYPE_PRECISION (TREE_TYPE (loop->v))
355 >= TYPE_PRECISION (iter_type))
356 {
357 tree n;
358
359 if (loop->cond_code == LT_EXPR)
360 n = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
361 loop->n2, loop->step);
362 else
363 n = loop->n1;
364 if (TREE_CODE (n) != INTEGER_CST
365 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
366 iter_type = long_long_unsigned_type_node;
367 }
368 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
369 > TYPE_PRECISION (iter_type))
370 {
371 tree n1, n2;
372
373 if (loop->cond_code == LT_EXPR)
374 {
375 n1 = loop->n1;
376 n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
377 loop->n2, loop->step);
378 }
379 else
380 {
381 n1 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
383 n2 = loop->n1;
384 }
385 if (TREE_CODE (n1) != INTEGER_CST
386 || TREE_CODE (n2) != INTEGER_CST
387 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
388 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
389 iter_type = long_long_unsigned_type_node;
390 }
391 }
392
393 if (collapse_count && *collapse_count == NULL)
394 {
395 if ((i == 0 || count != NULL_TREE)
396 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
397 && TREE_CONSTANT (loop->n1)
398 && TREE_CONSTANT (loop->n2)
399 && TREE_CODE (loop->step) == INTEGER_CST)
400 {
401 tree itype = TREE_TYPE (loop->v);
402
403 if (POINTER_TYPE_P (itype))
404 itype
405 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
406 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
407 t = fold_build2 (PLUS_EXPR, itype,
408 fold_convert (itype, loop->step), t);
409 t = fold_build2 (PLUS_EXPR, itype, t,
410 fold_convert (itype, loop->n2));
411 t = fold_build2 (MINUS_EXPR, itype, t,
412 fold_convert (itype, loop->n1));
413 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
414 t = fold_build2 (TRUNC_DIV_EXPR, itype,
415 fold_build1 (NEGATE_EXPR, itype, t),
416 fold_build1 (NEGATE_EXPR, itype,
417 fold_convert (itype,
418 loop->step)));
419 else
420 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
421 fold_convert (itype, loop->step));
422 t = fold_convert (long_long_unsigned_type_node, t);
423 if (count != NULL_TREE)
424 count = fold_build2 (MULT_EXPR, long_long_unsigned_type_node,
425 count, t);
426 else
427 count = t;
428 if (TREE_CODE (count) != INTEGER_CST)
429 count = NULL_TREE;
430 }
431 else
432 count = NULL_TREE;
433 }
434 }
435
436 if (count)
437 {
438 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
439 iter_type = long_long_unsigned_type_node;
440 else
441 iter_type = long_integer_type_node;
442 }
443 else if (collapse_iter && *collapse_iter != NULL)
444 iter_type = TREE_TYPE (*collapse_iter);
445 fd->iter_type = iter_type;
446 if (collapse_iter && *collapse_iter == NULL)
447 *collapse_iter = create_tmp_var (iter_type, ".iter");
448 if (collapse_count && *collapse_count == NULL)
449 {
450 if (count)
451 *collapse_count = fold_convert (iter_type, count);
452 else
453 *collapse_count = create_tmp_var (iter_type, ".count");
454 }
455
456 if (fd->collapse > 1)
457 {
458 fd->loop.v = *collapse_iter;
459 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
460 fd->loop.n2 = *collapse_count;
461 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
462 fd->loop.cond_code = LT_EXPR;
463 }
464 }
465
466
467 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
468 is the immediate dominator of PAR_ENTRY_BB, return true if there
469 are no data dependencies that would prevent expanding the parallel
470 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
471
472 When expanding a combined parallel+workshare region, the call to
473 the child function may need additional arguments in the case of
474 GIMPLE_OMP_FOR regions. In some cases, these arguments are
475 computed out of variables passed in from the parent to the child
476 via 'struct .omp_data_s'. For instance:
477
478 #pragma omp parallel for schedule (guided, i * 4)
479 for (j ...)
480
481 Is lowered into:
482
483 # BLOCK 2 (PAR_ENTRY_BB)
484 .omp_data_o.i = i;
485 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
486
487 # BLOCK 3 (WS_ENTRY_BB)
488 .omp_data_i = &.omp_data_o;
489 D.1667 = .omp_data_i->i;
490 D.1598 = D.1667 * 4;
491 #pragma omp for schedule (guided, D.1598)
492
493 When we outline the parallel region, the call to the child function
494 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
495 that value is computed *after* the call site. So, in principle we
496 cannot do the transformation.
497
498 To see whether the code in WS_ENTRY_BB blocks the combined
499 parallel+workshare call, we collect all the variables used in the
500 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
501 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
502 call.
503
504 FIXME. If we had the SSA form built at this point, we could merely
505 hoist the code in block 3 into block 2 and be done with it. But at
506 this point we don't have dataflow information and though we could
507 hack something up here, it is really not worth the aggravation. */
508
509 static bool
510 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
511 {
512 struct omp_for_data fd;
513 gimple par_stmt, ws_stmt;
514
515 par_stmt = last_stmt (par_entry_bb);
516 ws_stmt = last_stmt (ws_entry_bb);
517
518 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
519 return true;
520
521 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
522
523 extract_omp_for_data (ws_stmt, &fd, NULL);
524
525 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
526 return false;
527 if (fd.iter_type != long_integer_type_node)
528 return false;
529
530 /* FIXME. We give up too easily here. If any of these arguments
531 are not constants, they will likely involve variables that have
532 been mapped into fields of .omp_data_s for sharing with the child
533 function. With appropriate data flow, it would be possible to
534 see through this. */
535 if (!is_gimple_min_invariant (fd.loop.n1)
536 || !is_gimple_min_invariant (fd.loop.n2)
537 || !is_gimple_min_invariant (fd.loop.step)
538 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
539 return false;
540
541 return true;
542 }
543
544
545 /* Collect additional arguments needed to emit a combined
546 parallel+workshare call. WS_STMT is the workshare directive being
547 expanded. */
548
549 static tree
550 get_ws_args_for (gimple ws_stmt)
551 {
552 tree t;
553
554 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
555 {
556 struct omp_for_data fd;
557 tree ws_args;
558
559 extract_omp_for_data (ws_stmt, &fd, NULL);
560
561 ws_args = NULL_TREE;
562 if (fd.chunk_size)
563 {
564 t = fold_convert (long_integer_type_node, fd.chunk_size);
565 ws_args = tree_cons (NULL, t, ws_args);
566 }
567
568 t = fold_convert (long_integer_type_node, fd.loop.step);
569 ws_args = tree_cons (NULL, t, ws_args);
570
571 t = fold_convert (long_integer_type_node, fd.loop.n2);
572 ws_args = tree_cons (NULL, t, ws_args);
573
574 t = fold_convert (long_integer_type_node, fd.loop.n1);
575 ws_args = tree_cons (NULL, t, ws_args);
576
577 return ws_args;
578 }
579 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
580 {
581 /* Number of sections is equal to the number of edges from the
582 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
583 the exit of the sections region. */
584 basic_block bb = single_succ (gimple_bb (ws_stmt));
585 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
586 t = tree_cons (NULL, t, NULL);
587 return t;
588 }
589
590 gcc_unreachable ();
591 }
592
593
594 /* Discover whether REGION is a combined parallel+workshare region. */
595
596 static void
597 determine_parallel_type (struct omp_region *region)
598 {
599 basic_block par_entry_bb, par_exit_bb;
600 basic_block ws_entry_bb, ws_exit_bb;
601
602 if (region == NULL || region->inner == NULL
603 || region->exit == NULL || region->inner->exit == NULL
604 || region->inner->cont == NULL)
605 return;
606
607 /* We only support parallel+for and parallel+sections. */
608 if (region->type != GIMPLE_OMP_PARALLEL
609 || (region->inner->type != GIMPLE_OMP_FOR
610 && region->inner->type != GIMPLE_OMP_SECTIONS))
611 return;
612
613 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
614 WS_EXIT_BB -> PAR_EXIT_BB. */
615 par_entry_bb = region->entry;
616 par_exit_bb = region->exit;
617 ws_entry_bb = region->inner->entry;
618 ws_exit_bb = region->inner->exit;
619
620 if (single_succ (par_entry_bb) == ws_entry_bb
621 && single_succ (ws_exit_bb) == par_exit_bb
622 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
623 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
624 || (last_and_only_stmt (ws_entry_bb)
625 && last_and_only_stmt (par_exit_bb))))
626 {
627 gimple ws_stmt = last_stmt (ws_entry_bb);
628
629 if (region->inner->type == GIMPLE_OMP_FOR)
630 {
631 /* If this is a combined parallel loop, we need to determine
632 whether or not to use the combined library calls. There
633 are two cases where we do not apply the transformation:
634 static loops and any kind of ordered loop. In the first
635 case, we already open code the loop so there is no need
636 to do anything else. In the latter case, the combined
637 parallel loop call would still need extra synchronization
638 to implement ordered semantics, so there would not be any
639 gain in using the combined call. */
640 tree clauses = gimple_omp_for_clauses (ws_stmt);
641 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
642 if (c == NULL
643 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
644 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
645 {
646 region->is_combined_parallel = false;
647 region->inner->is_combined_parallel = false;
648 return;
649 }
650 }
651
652 region->is_combined_parallel = true;
653 region->inner->is_combined_parallel = true;
654 region->ws_args = get_ws_args_for (ws_stmt);
655 }
656 }
657
658
659 /* Return true if EXPR is variable sized. */
660
661 static inline bool
662 is_variable_sized (const_tree expr)
663 {
664 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
665 }
666
667 /* Return true if DECL is a reference type. */
668
669 static inline bool
670 is_reference (tree decl)
671 {
672 return lang_hooks.decls.omp_privatize_by_reference (decl);
673 }
674
675 /* Lookup variables in the decl or field splay trees. The "maybe" form
676 allows for the variable form to not have been entered, otherwise we
677 assert that the variable must have been entered. */
678
679 static inline tree
680 lookup_decl (tree var, omp_context *ctx)
681 {
682 tree *n;
683 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
684 return *n;
685 }
686
687 static inline tree
688 maybe_lookup_decl (const_tree var, omp_context *ctx)
689 {
690 tree *n;
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return n ? *n : NULL_TREE;
693 }
694
695 static inline tree
696 lookup_field (tree var, omp_context *ctx)
697 {
698 splay_tree_node n;
699 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
700 return (tree) n->value;
701 }
702
703 static inline tree
704 lookup_sfield (tree var, omp_context *ctx)
705 {
706 splay_tree_node n;
707 n = splay_tree_lookup (ctx->sfield_map
708 ? ctx->sfield_map : ctx->field_map,
709 (splay_tree_key) var);
710 return (tree) n->value;
711 }
712
713 static inline tree
714 maybe_lookup_field (tree var, omp_context *ctx)
715 {
716 splay_tree_node n;
717 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
718 return n ? (tree) n->value : NULL_TREE;
719 }
720
721 /* Return true if DECL should be copied by pointer. SHARED_CTX is
722 the parallel context if DECL is to be shared. */
723
724 static bool
725 use_pointer_for_field (tree decl, omp_context *shared_ctx)
726 {
727 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
728 return true;
729
730 /* We can only use copy-in/copy-out semantics for shared variables
731 when we know the value is not accessible from an outer scope. */
732 if (shared_ctx)
733 {
734 /* ??? Trivially accessible from anywhere. But why would we even
735 be passing an address in this case? Should we simply assert
736 this to be false, or should we have a cleanup pass that removes
737 these from the list of mappings? */
738 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
739 return true;
740
741 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
742 without analyzing the expression whether or not its location
743 is accessible to anyone else. In the case of nested parallel
744 regions it certainly may be. */
745 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
746 return true;
747
748 /* Do not use copy-in/copy-out for variables that have their
749 address taken. */
750 if (TREE_ADDRESSABLE (decl))
751 return true;
752
753 /* Disallow copy-in/out in nested parallel if
754 decl is shared in outer parallel, otherwise
755 each thread could store the shared variable
756 in its own copy-in location, making the
757 variable no longer really shared. */
758 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
759 {
760 omp_context *up;
761
762 for (up = shared_ctx->outer; up; up = up->outer)
763 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
764 break;
765
766 if (up)
767 {
768 tree c;
769
770 for (c = gimple_omp_taskreg_clauses (up->stmt);
771 c; c = OMP_CLAUSE_CHAIN (c))
772 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
773 && OMP_CLAUSE_DECL (c) == decl)
774 break;
775
776 if (c)
777 return true;
778 }
779 }
780
781 /* For tasks avoid using copy-in/out, unless they are readonly
782 (in which case just copy-in is used). As tasks can be
783 deferred or executed in different thread, when GOMP_task
784 returns, the task hasn't necessarily terminated. */
785 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
786 {
787 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
788 if (is_gimple_reg (outer))
789 {
790 /* Taking address of OUTER in lower_send_shared_vars
791 might need regimplification of everything that uses the
792 variable. */
793 if (!task_shared_vars)
794 task_shared_vars = BITMAP_ALLOC (NULL);
795 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
796 TREE_ADDRESSABLE (outer) = 1;
797 }
798 return true;
799 }
800 }
801
802 return false;
803 }
804
805 /* Create a new VAR_DECL and copy information from VAR to it. */
806
807 tree
808 copy_var_decl (tree var, tree name, tree type)
809 {
810 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
811
812 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
813 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
814 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
815 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
816 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
817 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
818 TREE_USED (copy) = 1;
819 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
820
821 return copy;
822 }
823
824 /* Construct a new automatic decl similar to VAR. */
825
826 static tree
827 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
828 {
829 tree copy = copy_var_decl (var, name, type);
830
831 DECL_CONTEXT (copy) = current_function_decl;
832 TREE_CHAIN (copy) = ctx->block_vars;
833 ctx->block_vars = copy;
834
835 return copy;
836 }
837
838 static tree
839 omp_copy_decl_1 (tree var, omp_context *ctx)
840 {
841 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
842 }
843
844 /* Build tree nodes to access the field for VAR on the receiver side. */
845
846 static tree
847 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
848 {
849 tree x, field = lookup_field (var, ctx);
850
851 /* If the receiver record type was remapped in the child function,
852 remap the field into the new record type. */
853 x = maybe_lookup_field (field, ctx);
854 if (x != NULL)
855 field = x;
856
857 x = build_fold_indirect_ref (ctx->receiver_decl);
858 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
859 if (by_ref)
860 x = build_fold_indirect_ref (x);
861
862 return x;
863 }
864
865 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
866 of a parallel, this is a component reference; for workshare constructs
867 this is some variable. */
868
869 static tree
870 build_outer_var_ref (tree var, omp_context *ctx)
871 {
872 tree x;
873
874 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
875 x = var;
876 else if (is_variable_sized (var))
877 {
878 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
879 x = build_outer_var_ref (x, ctx);
880 x = build_fold_indirect_ref (x);
881 }
882 else if (is_taskreg_ctx (ctx))
883 {
884 bool by_ref = use_pointer_for_field (var, NULL);
885 x = build_receiver_ref (var, by_ref, ctx);
886 }
887 else if (ctx->outer)
888 x = lookup_decl (var, ctx->outer);
889 else if (is_reference (var))
890 /* This can happen with orphaned constructs. If var is reference, it is
891 possible it is shared and as such valid. */
892 x = var;
893 else
894 gcc_unreachable ();
895
896 if (is_reference (var))
897 x = build_fold_indirect_ref (x);
898
899 return x;
900 }
901
902 /* Build tree nodes to access the field for VAR on the sender side. */
903
904 static tree
905 build_sender_ref (tree var, omp_context *ctx)
906 {
907 tree field = lookup_sfield (var, ctx);
908 return build3 (COMPONENT_REF, TREE_TYPE (field),
909 ctx->sender_decl, field, NULL);
910 }
911
912 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
913
914 static void
915 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
916 {
917 tree field, type, sfield = NULL_TREE;
918
919 gcc_assert ((mask & 1) == 0
920 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
921 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
922 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
923
924 type = TREE_TYPE (var);
925 if (by_ref)
926 type = build_pointer_type (type);
927 else if ((mask & 3) == 1 && is_reference (var))
928 type = TREE_TYPE (type);
929
930 field = build_decl (DECL_SOURCE_LOCATION (var),
931 FIELD_DECL, DECL_NAME (var), type);
932
933 /* Remember what variable this field was created for. This does have a
934 side effect of making dwarf2out ignore this member, so for helpful
935 debugging we clear it later in delete_omp_context. */
936 DECL_ABSTRACT_ORIGIN (field) = var;
937 if (type == TREE_TYPE (var))
938 {
939 DECL_ALIGN (field) = DECL_ALIGN (var);
940 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
941 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
942 }
943 else
944 DECL_ALIGN (field) = TYPE_ALIGN (type);
945
946 if ((mask & 3) == 3)
947 {
948 insert_field_into_struct (ctx->record_type, field);
949 if (ctx->srecord_type)
950 {
951 sfield = build_decl (DECL_SOURCE_LOCATION (var),
952 FIELD_DECL, DECL_NAME (var), type);
953 DECL_ABSTRACT_ORIGIN (sfield) = var;
954 DECL_ALIGN (sfield) = DECL_ALIGN (field);
955 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
956 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
957 insert_field_into_struct (ctx->srecord_type, sfield);
958 }
959 }
960 else
961 {
962 if (ctx->srecord_type == NULL_TREE)
963 {
964 tree t;
965
966 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
967 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
968 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
969 {
970 sfield = build_decl (DECL_SOURCE_LOCATION (var),
971 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
972 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
973 insert_field_into_struct (ctx->srecord_type, sfield);
974 splay_tree_insert (ctx->sfield_map,
975 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
976 (splay_tree_value) sfield);
977 }
978 }
979 sfield = field;
980 insert_field_into_struct ((mask & 1) ? ctx->record_type
981 : ctx->srecord_type, field);
982 }
983
984 if (mask & 1)
985 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
986 (splay_tree_value) field);
987 if ((mask & 2) && ctx->sfield_map)
988 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
989 (splay_tree_value) sfield);
990 }
991
992 static tree
993 install_var_local (tree var, omp_context *ctx)
994 {
995 tree new_var = omp_copy_decl_1 (var, ctx);
996 insert_decl_map (&ctx->cb, var, new_var);
997 return new_var;
998 }
999
1000 /* Adjust the replacement for DECL in CTX for the new context. This means
1001 copying the DECL_VALUE_EXPR, and fixing up the type. */
1002
1003 static void
1004 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1005 {
1006 tree new_decl, size;
1007
1008 new_decl = lookup_decl (decl, ctx);
1009
1010 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1011
1012 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1013 && DECL_HAS_VALUE_EXPR_P (decl))
1014 {
1015 tree ve = DECL_VALUE_EXPR (decl);
1016 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1017 SET_DECL_VALUE_EXPR (new_decl, ve);
1018 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1019 }
1020
1021 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1022 {
1023 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1024 if (size == error_mark_node)
1025 size = TYPE_SIZE (TREE_TYPE (new_decl));
1026 DECL_SIZE (new_decl) = size;
1027
1028 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1029 if (size == error_mark_node)
1030 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1031 DECL_SIZE_UNIT (new_decl) = size;
1032 }
1033 }
1034
1035 /* The callback for remap_decl. Search all containing contexts for a
1036 mapping of the variable; this avoids having to duplicate the splay
1037 tree ahead of time. We know a mapping doesn't already exist in the
1038 given context. Create new mappings to implement default semantics. */
1039
1040 static tree
1041 omp_copy_decl (tree var, copy_body_data *cb)
1042 {
1043 omp_context *ctx = (omp_context *) cb;
1044 tree new_var;
1045
1046 if (TREE_CODE (var) == LABEL_DECL)
1047 {
1048 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1049 DECL_CONTEXT (new_var) = current_function_decl;
1050 insert_decl_map (&ctx->cb, var, new_var);
1051 return new_var;
1052 }
1053
1054 while (!is_taskreg_ctx (ctx))
1055 {
1056 ctx = ctx->outer;
1057 if (ctx == NULL)
1058 return var;
1059 new_var = maybe_lookup_decl (var, ctx);
1060 if (new_var)
1061 return new_var;
1062 }
1063
1064 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1065 return var;
1066
1067 return error_mark_node;
1068 }
1069
1070
1071 /* Return the parallel region associated with STMT. */
1072
1073 /* Debugging dumps for parallel regions. */
1074 void dump_omp_region (FILE *, struct omp_region *, int);
1075 void debug_omp_region (struct omp_region *);
1076 void debug_all_omp_regions (void);
1077
1078 /* Dump the parallel region tree rooted at REGION. */
1079
1080 void
1081 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1082 {
1083 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1084 gimple_code_name[region->type]);
1085
1086 if (region->inner)
1087 dump_omp_region (file, region->inner, indent + 4);
1088
1089 if (region->cont)
1090 {
1091 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1092 region->cont->index);
1093 }
1094
1095 if (region->exit)
1096 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1097 region->exit->index);
1098 else
1099 fprintf (file, "%*s[no exit marker]\n", indent, "");
1100
1101 if (region->next)
1102 dump_omp_region (file, region->next, indent);
1103 }
1104
1105 void
1106 debug_omp_region (struct omp_region *region)
1107 {
1108 dump_omp_region (stderr, region, 0);
1109 }
1110
1111 void
1112 debug_all_omp_regions (void)
1113 {
1114 dump_omp_region (stderr, root_omp_region, 0);
1115 }
1116
1117
1118 /* Create a new parallel region starting at STMT inside region PARENT. */
1119
1120 struct omp_region *
1121 new_omp_region (basic_block bb, enum gimple_code type,
1122 struct omp_region *parent)
1123 {
1124 struct omp_region *region = XCNEW (struct omp_region);
1125
1126 region->outer = parent;
1127 region->entry = bb;
1128 region->type = type;
1129
1130 if (parent)
1131 {
1132 /* This is a nested region. Add it to the list of inner
1133 regions in PARENT. */
1134 region->next = parent->inner;
1135 parent->inner = region;
1136 }
1137 else
1138 {
1139 /* This is a toplevel region. Add it to the list of toplevel
1140 regions in ROOT_OMP_REGION. */
1141 region->next = root_omp_region;
1142 root_omp_region = region;
1143 }
1144
1145 return region;
1146 }
1147
1148 /* Release the memory associated with the region tree rooted at REGION. */
1149
1150 static void
1151 free_omp_region_1 (struct omp_region *region)
1152 {
1153 struct omp_region *i, *n;
1154
1155 for (i = region->inner; i ; i = n)
1156 {
1157 n = i->next;
1158 free_omp_region_1 (i);
1159 }
1160
1161 free (region);
1162 }
1163
1164 /* Release the memory for the entire omp region tree. */
1165
1166 void
1167 free_omp_regions (void)
1168 {
1169 struct omp_region *r, *n;
1170 for (r = root_omp_region; r ; r = n)
1171 {
1172 n = r->next;
1173 free_omp_region_1 (r);
1174 }
1175 root_omp_region = NULL;
1176 }
1177
1178
1179 /* Create a new context, with OUTER_CTX being the surrounding context. */
1180
1181 static omp_context *
1182 new_omp_context (gimple stmt, omp_context *outer_ctx)
1183 {
1184 omp_context *ctx = XCNEW (omp_context);
1185
1186 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1187 (splay_tree_value) ctx);
1188 ctx->stmt = stmt;
1189
1190 if (outer_ctx)
1191 {
1192 ctx->outer = outer_ctx;
1193 ctx->cb = outer_ctx->cb;
1194 ctx->cb.block = NULL;
1195 ctx->depth = outer_ctx->depth + 1;
1196 }
1197 else
1198 {
1199 ctx->cb.src_fn = current_function_decl;
1200 ctx->cb.dst_fn = current_function_decl;
1201 ctx->cb.src_node = cgraph_node (current_function_decl);
1202 ctx->cb.dst_node = ctx->cb.src_node;
1203 ctx->cb.src_cfun = cfun;
1204 ctx->cb.copy_decl = omp_copy_decl;
1205 ctx->cb.eh_region = -1;
1206 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1207 ctx->depth = 1;
1208 }
1209
1210 ctx->cb.decl_map = pointer_map_create ();
1211
1212 return ctx;
1213 }
1214
1215 static gimple_seq maybe_catch_exception (gimple_seq);
1216
1217 /* Finalize task copyfn. */
1218
1219 static void
1220 finalize_task_copyfn (gimple task_stmt)
1221 {
1222 struct function *child_cfun;
1223 tree child_fn, old_fn;
1224 gimple_seq seq, new_seq;
1225 gimple bind;
1226
1227 child_fn = gimple_omp_task_copy_fn (task_stmt);
1228 if (child_fn == NULL_TREE)
1229 return;
1230
1231 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1232
1233 /* Inform the callgraph about the new function. */
1234 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1235 = cfun->curr_properties;
1236
1237 old_fn = current_function_decl;
1238 push_cfun (child_cfun);
1239 current_function_decl = child_fn;
1240 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1241 seq = gimple_seq_alloc ();
1242 gimple_seq_add_stmt (&seq, bind);
1243 new_seq = maybe_catch_exception (seq);
1244 if (new_seq != seq)
1245 {
1246 bind = gimple_build_bind (NULL, new_seq, NULL);
1247 seq = gimple_seq_alloc ();
1248 gimple_seq_add_stmt (&seq, bind);
1249 }
1250 gimple_set_body (child_fn, seq);
1251 pop_cfun ();
1252 current_function_decl = old_fn;
1253
1254 cgraph_add_new_function (child_fn, false);
1255 }
1256
1257 /* Destroy a omp_context data structures. Called through the splay tree
1258 value delete callback. */
1259
1260 static void
1261 delete_omp_context (splay_tree_value value)
1262 {
1263 omp_context *ctx = (omp_context *) value;
1264
1265 pointer_map_destroy (ctx->cb.decl_map);
1266
1267 if (ctx->field_map)
1268 splay_tree_delete (ctx->field_map);
1269 if (ctx->sfield_map)
1270 splay_tree_delete (ctx->sfield_map);
1271
1272 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1273 it produces corrupt debug information. */
1274 if (ctx->record_type)
1275 {
1276 tree t;
1277 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1278 DECL_ABSTRACT_ORIGIN (t) = NULL;
1279 }
1280 if (ctx->srecord_type)
1281 {
1282 tree t;
1283 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1284 DECL_ABSTRACT_ORIGIN (t) = NULL;
1285 }
1286
1287 if (is_task_ctx (ctx))
1288 finalize_task_copyfn (ctx->stmt);
1289
1290 XDELETE (ctx);
1291 }
1292
1293 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1294 context. */
1295
1296 static void
1297 fixup_child_record_type (omp_context *ctx)
1298 {
1299 tree f, type = ctx->record_type;
1300
1301 /* ??? It isn't sufficient to just call remap_type here, because
1302 variably_modified_type_p doesn't work the way we expect for
1303 record types. Testing each field for whether it needs remapping
1304 and creating a new record by hand works, however. */
1305 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1306 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1307 break;
1308 if (f)
1309 {
1310 tree name, new_fields = NULL;
1311
1312 type = lang_hooks.types.make_type (RECORD_TYPE);
1313 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1314 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1315 TYPE_DECL, name, type);
1316 TYPE_NAME (type) = name;
1317
1318 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1319 {
1320 tree new_f = copy_node (f);
1321 DECL_CONTEXT (new_f) = type;
1322 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1323 TREE_CHAIN (new_f) = new_fields;
1324 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1325 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1326 &ctx->cb, NULL);
1327 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1328 &ctx->cb, NULL);
1329 new_fields = new_f;
1330
1331 /* Arrange to be able to look up the receiver field
1332 given the sender field. */
1333 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1334 (splay_tree_value) new_f);
1335 }
1336 TYPE_FIELDS (type) = nreverse (new_fields);
1337 layout_type (type);
1338 }
1339
1340 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1341 }
1342
1343 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1344 specified by CLAUSES. */
1345
1346 static void
1347 scan_sharing_clauses (tree clauses, omp_context *ctx)
1348 {
1349 tree c, decl;
1350 bool scan_array_reductions = false;
1351
1352 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1353 {
1354 bool by_ref;
1355
1356 switch (OMP_CLAUSE_CODE (c))
1357 {
1358 case OMP_CLAUSE_PRIVATE:
1359 decl = OMP_CLAUSE_DECL (c);
1360 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1361 goto do_private;
1362 else if (!is_variable_sized (decl))
1363 install_var_local (decl, ctx);
1364 break;
1365
1366 case OMP_CLAUSE_SHARED:
1367 gcc_assert (is_taskreg_ctx (ctx));
1368 decl = OMP_CLAUSE_DECL (c);
1369 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1370 || !is_variable_sized (decl));
1371 /* Global variables don't need to be copied,
1372 the receiver side will use them directly. */
1373 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1374 break;
1375 by_ref = use_pointer_for_field (decl, ctx);
1376 if (! TREE_READONLY (decl)
1377 || TREE_ADDRESSABLE (decl)
1378 || by_ref
1379 || is_reference (decl))
1380 {
1381 install_var_field (decl, by_ref, 3, ctx);
1382 install_var_local (decl, ctx);
1383 break;
1384 }
1385 /* We don't need to copy const scalar vars back. */
1386 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1387 goto do_private;
1388
1389 case OMP_CLAUSE_LASTPRIVATE:
1390 /* Let the corresponding firstprivate clause create
1391 the variable. */
1392 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1393 break;
1394 /* FALLTHRU */
1395
1396 case OMP_CLAUSE_FIRSTPRIVATE:
1397 case OMP_CLAUSE_REDUCTION:
1398 decl = OMP_CLAUSE_DECL (c);
1399 do_private:
1400 if (is_variable_sized (decl))
1401 {
1402 if (is_task_ctx (ctx))
1403 install_var_field (decl, false, 1, ctx);
1404 break;
1405 }
1406 else if (is_taskreg_ctx (ctx))
1407 {
1408 bool global
1409 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1410 by_ref = use_pointer_for_field (decl, NULL);
1411
1412 if (is_task_ctx (ctx)
1413 && (global || by_ref || is_reference (decl)))
1414 {
1415 install_var_field (decl, false, 1, ctx);
1416 if (!global)
1417 install_var_field (decl, by_ref, 2, ctx);
1418 }
1419 else if (!global)
1420 install_var_field (decl, by_ref, 3, ctx);
1421 }
1422 install_var_local (decl, ctx);
1423 break;
1424
1425 case OMP_CLAUSE_COPYPRIVATE:
1426 if (ctx->outer)
1427 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1428 /* FALLTHRU */
1429
1430 case OMP_CLAUSE_COPYIN:
1431 decl = OMP_CLAUSE_DECL (c);
1432 by_ref = use_pointer_for_field (decl, NULL);
1433 install_var_field (decl, by_ref, 3, ctx);
1434 break;
1435
1436 case OMP_CLAUSE_DEFAULT:
1437 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1438 break;
1439
1440 case OMP_CLAUSE_IF:
1441 case OMP_CLAUSE_NUM_THREADS:
1442 case OMP_CLAUSE_SCHEDULE:
1443 if (ctx->outer)
1444 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1445 break;
1446
1447 case OMP_CLAUSE_NOWAIT:
1448 case OMP_CLAUSE_ORDERED:
1449 case OMP_CLAUSE_COLLAPSE:
1450 case OMP_CLAUSE_UNTIED:
1451 break;
1452
1453 default:
1454 gcc_unreachable ();
1455 }
1456 }
1457
1458 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1459 {
1460 switch (OMP_CLAUSE_CODE (c))
1461 {
1462 case OMP_CLAUSE_LASTPRIVATE:
1463 /* Let the corresponding firstprivate clause create
1464 the variable. */
1465 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1466 scan_array_reductions = true;
1467 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1468 break;
1469 /* FALLTHRU */
1470
1471 case OMP_CLAUSE_PRIVATE:
1472 case OMP_CLAUSE_FIRSTPRIVATE:
1473 case OMP_CLAUSE_REDUCTION:
1474 decl = OMP_CLAUSE_DECL (c);
1475 if (is_variable_sized (decl))
1476 install_var_local (decl, ctx);
1477 fixup_remapped_decl (decl, ctx,
1478 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1479 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1480 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1481 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1482 scan_array_reductions = true;
1483 break;
1484
1485 case OMP_CLAUSE_SHARED:
1486 decl = OMP_CLAUSE_DECL (c);
1487 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1488 fixup_remapped_decl (decl, ctx, false);
1489 break;
1490
1491 case OMP_CLAUSE_COPYPRIVATE:
1492 case OMP_CLAUSE_COPYIN:
1493 case OMP_CLAUSE_DEFAULT:
1494 case OMP_CLAUSE_IF:
1495 case OMP_CLAUSE_NUM_THREADS:
1496 case OMP_CLAUSE_SCHEDULE:
1497 case OMP_CLAUSE_NOWAIT:
1498 case OMP_CLAUSE_ORDERED:
1499 case OMP_CLAUSE_COLLAPSE:
1500 case OMP_CLAUSE_UNTIED:
1501 break;
1502
1503 default:
1504 gcc_unreachable ();
1505 }
1506 }
1507
1508 if (scan_array_reductions)
1509 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1510 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1511 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1512 {
1513 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1514 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1515 }
1516 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1517 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1518 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1519 }
1520
1521 /* Create a new name for omp child function. Returns an identifier. */
1522
1523 static GTY(()) unsigned int tmp_ompfn_id_num;
1524
1525 static tree
1526 create_omp_child_function_name (bool task_copy)
1527 {
1528 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1529 size_t len = IDENTIFIER_LENGTH (name);
1530 char *tmp_name, *prefix;
1531 const char *suffix;
1532
1533 suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
1534 prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
1535 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1536 strcpy (prefix + len, suffix);
1537 #ifndef NO_DOT_IN_LABEL
1538 prefix[len] = '.';
1539 #elif !defined NO_DOLLAR_IN_LABEL
1540 prefix[len] = '$';
1541 #endif
1542 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1543 return get_identifier (tmp_name);
1544 }
1545
1546 /* Build a decl for the omp child function. It'll not contain a body
1547 yet, just the bare decl. */
1548
1549 static void
1550 create_omp_child_function (omp_context *ctx, bool task_copy)
1551 {
1552 tree decl, type, name, t;
1553
1554 name = create_omp_child_function_name (task_copy);
1555 if (task_copy)
1556 type = build_function_type_list (void_type_node, ptr_type_node,
1557 ptr_type_node, NULL_TREE);
1558 else
1559 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1560
1561 decl = build_decl (gimple_location (ctx->stmt),
1562 FUNCTION_DECL, name, type);
1563 decl = lang_hooks.decls.pushdecl (decl);
1564
1565 if (!task_copy)
1566 ctx->cb.dst_fn = decl;
1567 else
1568 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1569
1570 TREE_STATIC (decl) = 1;
1571 TREE_USED (decl) = 1;
1572 DECL_ARTIFICIAL (decl) = 1;
1573 DECL_IGNORED_P (decl) = 0;
1574 TREE_PUBLIC (decl) = 0;
1575 DECL_UNINLINABLE (decl) = 1;
1576 DECL_EXTERNAL (decl) = 0;
1577 DECL_CONTEXT (decl) = NULL_TREE;
1578 DECL_INITIAL (decl) = make_node (BLOCK);
1579
1580 t = build_decl (DECL_SOURCE_LOCATION (decl),
1581 RESULT_DECL, NULL_TREE, void_type_node);
1582 DECL_ARTIFICIAL (t) = 1;
1583 DECL_IGNORED_P (t) = 1;
1584 DECL_CONTEXT (t) = decl;
1585 DECL_RESULT (decl) = t;
1586
1587 t = build_decl (DECL_SOURCE_LOCATION (decl),
1588 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1589 DECL_ARTIFICIAL (t) = 1;
1590 DECL_ARG_TYPE (t) = ptr_type_node;
1591 DECL_CONTEXT (t) = current_function_decl;
1592 TREE_USED (t) = 1;
1593 DECL_ARGUMENTS (decl) = t;
1594 if (!task_copy)
1595 ctx->receiver_decl = t;
1596 else
1597 {
1598 t = build_decl (DECL_SOURCE_LOCATION (decl),
1599 PARM_DECL, get_identifier (".omp_data_o"),
1600 ptr_type_node);
1601 DECL_ARTIFICIAL (t) = 1;
1602 DECL_ARG_TYPE (t) = ptr_type_node;
1603 DECL_CONTEXT (t) = current_function_decl;
1604 TREE_USED (t) = 1;
1605 TREE_ADDRESSABLE (t) = 1;
1606 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1607 DECL_ARGUMENTS (decl) = t;
1608 }
1609
1610 /* Allocate memory for the function structure. The call to
1611 allocate_struct_function clobbers CFUN, so we need to restore
1612 it afterward. */
1613 push_struct_function (decl);
1614 cfun->function_end_locus = gimple_location (ctx->stmt);
1615 pop_cfun ();
1616 }
1617
1618
1619 /* Scan an OpenMP parallel directive. */
1620
1621 static void
1622 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1623 {
1624 omp_context *ctx;
1625 tree name;
1626 gimple stmt = gsi_stmt (*gsi);
1627
1628 /* Ignore parallel directives with empty bodies, unless there
1629 are copyin clauses. */
1630 if (optimize > 0
1631 && empty_body_p (gimple_omp_body (stmt))
1632 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1633 OMP_CLAUSE_COPYIN) == NULL)
1634 {
1635 gsi_replace (gsi, gimple_build_nop (), false);
1636 return;
1637 }
1638
1639 ctx = new_omp_context (stmt, outer_ctx);
1640 if (taskreg_nesting_level > 1)
1641 ctx->is_nested = true;
1642 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1643 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1644 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1645 name = create_tmp_var_name (".omp_data_s");
1646 name = build_decl (gimple_location (stmt),
1647 TYPE_DECL, name, ctx->record_type);
1648 TYPE_NAME (ctx->record_type) = name;
1649 create_omp_child_function (ctx, false);
1650 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1651
1652 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1653 scan_omp (gimple_omp_body (stmt), ctx);
1654
1655 if (TYPE_FIELDS (ctx->record_type) == NULL)
1656 ctx->record_type = ctx->receiver_decl = NULL;
1657 else
1658 {
1659 layout_type (ctx->record_type);
1660 fixup_child_record_type (ctx);
1661 }
1662 }
1663
1664 /* Scan an OpenMP task directive. */
1665
1666 static void
1667 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1668 {
1669 omp_context *ctx;
1670 tree name, t;
1671 gimple stmt = gsi_stmt (*gsi);
1672
1673 /* Ignore task directives with empty bodies. */
1674 if (optimize > 0
1675 && empty_body_p (gimple_omp_body (stmt)))
1676 {
1677 gsi_replace (gsi, gimple_build_nop (), false);
1678 return;
1679 }
1680
1681 ctx = new_omp_context (stmt, outer_ctx);
1682 if (taskreg_nesting_level > 1)
1683 ctx->is_nested = true;
1684 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1685 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1686 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1687 name = create_tmp_var_name (".omp_data_s");
1688 name = build_decl (gimple_location (stmt),
1689 TYPE_DECL, name, ctx->record_type);
1690 TYPE_NAME (ctx->record_type) = name;
1691 create_omp_child_function (ctx, false);
1692 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1693
1694 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1695
1696 if (ctx->srecord_type)
1697 {
1698 name = create_tmp_var_name (".omp_data_a");
1699 name = build_decl (gimple_location (stmt),
1700 TYPE_DECL, name, ctx->srecord_type);
1701 TYPE_NAME (ctx->srecord_type) = name;
1702 create_omp_child_function (ctx, true);
1703 }
1704
1705 scan_omp (gimple_omp_body (stmt), ctx);
1706
1707 if (TYPE_FIELDS (ctx->record_type) == NULL)
1708 {
1709 ctx->record_type = ctx->receiver_decl = NULL;
1710 t = build_int_cst (long_integer_type_node, 0);
1711 gimple_omp_task_set_arg_size (stmt, t);
1712 t = build_int_cst (long_integer_type_node, 1);
1713 gimple_omp_task_set_arg_align (stmt, t);
1714 }
1715 else
1716 {
1717 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1718 /* Move VLA fields to the end. */
1719 p = &TYPE_FIELDS (ctx->record_type);
1720 while (*p)
1721 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1722 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1723 {
1724 *q = *p;
1725 *p = TREE_CHAIN (*p);
1726 TREE_CHAIN (*q) = NULL_TREE;
1727 q = &TREE_CHAIN (*q);
1728 }
1729 else
1730 p = &TREE_CHAIN (*p);
1731 *p = vla_fields;
1732 layout_type (ctx->record_type);
1733 fixup_child_record_type (ctx);
1734 if (ctx->srecord_type)
1735 layout_type (ctx->srecord_type);
1736 t = fold_convert (long_integer_type_node,
1737 TYPE_SIZE_UNIT (ctx->record_type));
1738 gimple_omp_task_set_arg_size (stmt, t);
1739 t = build_int_cst (long_integer_type_node,
1740 TYPE_ALIGN_UNIT (ctx->record_type));
1741 gimple_omp_task_set_arg_align (stmt, t);
1742 }
1743 }
1744
1745
1746 /* Scan an OpenMP loop directive. */
1747
1748 static void
1749 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1750 {
1751 omp_context *ctx;
1752 size_t i;
1753
1754 ctx = new_omp_context (stmt, outer_ctx);
1755
1756 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1757
1758 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1759 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1760 {
1761 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1762 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1763 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1764 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1765 }
1766 scan_omp (gimple_omp_body (stmt), ctx);
1767 }
1768
1769 /* Scan an OpenMP sections directive. */
1770
1771 static void
1772 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1773 {
1774 omp_context *ctx;
1775
1776 ctx = new_omp_context (stmt, outer_ctx);
1777 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1778 scan_omp (gimple_omp_body (stmt), ctx);
1779 }
1780
1781 /* Scan an OpenMP single directive. */
1782
1783 static void
1784 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1785 {
1786 omp_context *ctx;
1787 tree name;
1788
1789 ctx = new_omp_context (stmt, outer_ctx);
1790 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1791 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1792 name = create_tmp_var_name (".omp_copy_s");
1793 name = build_decl (gimple_location (stmt),
1794 TYPE_DECL, name, ctx->record_type);
1795 TYPE_NAME (ctx->record_type) = name;
1796
1797 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1798 scan_omp (gimple_omp_body (stmt), ctx);
1799
1800 if (TYPE_FIELDS (ctx->record_type) == NULL)
1801 ctx->record_type = NULL;
1802 else
1803 layout_type (ctx->record_type);
1804 }
1805
1806
1807 /* Check OpenMP nesting restrictions. */
1808 static void
1809 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1810 {
1811 switch (gimple_code (stmt))
1812 {
1813 case GIMPLE_OMP_FOR:
1814 case GIMPLE_OMP_SECTIONS:
1815 case GIMPLE_OMP_SINGLE:
1816 case GIMPLE_CALL:
1817 for (; ctx != NULL; ctx = ctx->outer)
1818 switch (gimple_code (ctx->stmt))
1819 {
1820 case GIMPLE_OMP_FOR:
1821 case GIMPLE_OMP_SECTIONS:
1822 case GIMPLE_OMP_SINGLE:
1823 case GIMPLE_OMP_ORDERED:
1824 case GIMPLE_OMP_MASTER:
1825 case GIMPLE_OMP_TASK:
1826 if (is_gimple_call (stmt))
1827 {
1828 warning (0, "barrier region may not be closely nested inside "
1829 "of work-sharing, critical, ordered, master or "
1830 "explicit task region");
1831 return;
1832 }
1833 warning (0, "work-sharing region may not be closely nested inside "
1834 "of work-sharing, critical, ordered, master or explicit "
1835 "task region");
1836 return;
1837 case GIMPLE_OMP_PARALLEL:
1838 return;
1839 default:
1840 break;
1841 }
1842 break;
1843 case GIMPLE_OMP_MASTER:
1844 for (; ctx != NULL; ctx = ctx->outer)
1845 switch (gimple_code (ctx->stmt))
1846 {
1847 case GIMPLE_OMP_FOR:
1848 case GIMPLE_OMP_SECTIONS:
1849 case GIMPLE_OMP_SINGLE:
1850 case GIMPLE_OMP_TASK:
1851 warning (0, "master region may not be closely nested inside "
1852 "of work-sharing or explicit task region");
1853 return;
1854 case GIMPLE_OMP_PARALLEL:
1855 return;
1856 default:
1857 break;
1858 }
1859 break;
1860 case GIMPLE_OMP_ORDERED:
1861 for (; ctx != NULL; ctx = ctx->outer)
1862 switch (gimple_code (ctx->stmt))
1863 {
1864 case GIMPLE_OMP_CRITICAL:
1865 case GIMPLE_OMP_TASK:
1866 warning (0, "ordered region may not be closely nested inside "
1867 "of critical or explicit task region");
1868 return;
1869 case GIMPLE_OMP_FOR:
1870 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1871 OMP_CLAUSE_ORDERED) == NULL)
1872 warning (0, "ordered region must be closely nested inside "
1873 "a loop region with an ordered clause");
1874 return;
1875 case GIMPLE_OMP_PARALLEL:
1876 return;
1877 default:
1878 break;
1879 }
1880 break;
1881 case GIMPLE_OMP_CRITICAL:
1882 for (; ctx != NULL; ctx = ctx->outer)
1883 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1884 && (gimple_omp_critical_name (stmt)
1885 == gimple_omp_critical_name (ctx->stmt)))
1886 {
1887 warning (0, "critical region may not be nested inside a critical "
1888 "region with the same name");
1889 return;
1890 }
1891 break;
1892 default:
1893 break;
1894 }
1895 }
1896
1897
1898 /* Helper function scan_omp.
1899
1900 Callback for walk_tree or operators in walk_gimple_stmt used to
1901 scan for OpenMP directives in TP. */
1902
1903 static tree
1904 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1905 {
1906 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1907 omp_context *ctx = (omp_context *) wi->info;
1908 tree t = *tp;
1909
1910 switch (TREE_CODE (t))
1911 {
1912 case VAR_DECL:
1913 case PARM_DECL:
1914 case LABEL_DECL:
1915 case RESULT_DECL:
1916 if (ctx)
1917 *tp = remap_decl (t, &ctx->cb);
1918 break;
1919
1920 default:
1921 if (ctx && TYPE_P (t))
1922 *tp = remap_type (t, &ctx->cb);
1923 else if (!DECL_P (t))
1924 {
1925 *walk_subtrees = 1;
1926 if (ctx)
1927 TREE_TYPE (t) = remap_type (TREE_TYPE (t), &ctx->cb);
1928 }
1929 break;
1930 }
1931
1932 return NULL_TREE;
1933 }
1934
1935
1936 /* Helper function for scan_omp.
1937
1938 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1939 the current statement in GSI. */
1940
1941 static tree
1942 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1943 struct walk_stmt_info *wi)
1944 {
1945 gimple stmt = gsi_stmt (*gsi);
1946 omp_context *ctx = (omp_context *) wi->info;
1947
1948 if (gimple_has_location (stmt))
1949 input_location = gimple_location (stmt);
1950
1951 /* Check the OpenMP nesting restrictions. */
1952 if (ctx != NULL)
1953 {
1954 if (is_gimple_omp (stmt))
1955 check_omp_nesting_restrictions (stmt, ctx);
1956 else if (is_gimple_call (stmt))
1957 {
1958 tree fndecl = gimple_call_fndecl (stmt);
1959 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1960 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1961 check_omp_nesting_restrictions (stmt, ctx);
1962 }
1963 }
1964
1965 *handled_ops_p = true;
1966
1967 switch (gimple_code (stmt))
1968 {
1969 case GIMPLE_OMP_PARALLEL:
1970 taskreg_nesting_level++;
1971 scan_omp_parallel (gsi, ctx);
1972 taskreg_nesting_level--;
1973 break;
1974
1975 case GIMPLE_OMP_TASK:
1976 taskreg_nesting_level++;
1977 scan_omp_task (gsi, ctx);
1978 taskreg_nesting_level--;
1979 break;
1980
1981 case GIMPLE_OMP_FOR:
1982 scan_omp_for (stmt, ctx);
1983 break;
1984
1985 case GIMPLE_OMP_SECTIONS:
1986 scan_omp_sections (stmt, ctx);
1987 break;
1988
1989 case GIMPLE_OMP_SINGLE:
1990 scan_omp_single (stmt, ctx);
1991 break;
1992
1993 case GIMPLE_OMP_SECTION:
1994 case GIMPLE_OMP_MASTER:
1995 case GIMPLE_OMP_ORDERED:
1996 case GIMPLE_OMP_CRITICAL:
1997 ctx = new_omp_context (stmt, ctx);
1998 scan_omp (gimple_omp_body (stmt), ctx);
1999 break;
2000
2001 case GIMPLE_BIND:
2002 {
2003 tree var;
2004
2005 *handled_ops_p = false;
2006 if (ctx)
2007 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
2008 insert_decl_map (&ctx->cb, var, var);
2009 }
2010 break;
2011 default:
2012 *handled_ops_p = false;
2013 break;
2014 }
2015
2016 return NULL_TREE;
2017 }
2018
2019
2020 /* Scan all the statements starting at the current statement. CTX
2021 contains context information about the OpenMP directives and
2022 clauses found during the scan. */
2023
2024 static void
2025 scan_omp (gimple_seq body, omp_context *ctx)
2026 {
2027 location_t saved_location;
2028 struct walk_stmt_info wi;
2029
2030 memset (&wi, 0, sizeof (wi));
2031 wi.info = ctx;
2032 wi.want_locations = true;
2033
2034 saved_location = input_location;
2035 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2036 input_location = saved_location;
2037 }
2038 \f
2039 /* Re-gimplification and code generation routines. */
2040
2041 /* Build a call to GOMP_barrier. */
2042
2043 static tree
2044 build_omp_barrier (void)
2045 {
2046 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2047 }
2048
2049 /* If a context was created for STMT when it was scanned, return it. */
2050
2051 static omp_context *
2052 maybe_lookup_ctx (gimple stmt)
2053 {
2054 splay_tree_node n;
2055 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2056 return n ? (omp_context *) n->value : NULL;
2057 }
2058
2059
2060 /* Find the mapping for DECL in CTX or the immediately enclosing
2061 context that has a mapping for DECL.
2062
2063 If CTX is a nested parallel directive, we may have to use the decl
2064 mappings created in CTX's parent context. Suppose that we have the
2065 following parallel nesting (variable UIDs showed for clarity):
2066
2067 iD.1562 = 0;
2068 #omp parallel shared(iD.1562) -> outer parallel
2069 iD.1562 = iD.1562 + 1;
2070
2071 #omp parallel shared (iD.1562) -> inner parallel
2072 iD.1562 = iD.1562 - 1;
2073
2074 Each parallel structure will create a distinct .omp_data_s structure
2075 for copying iD.1562 in/out of the directive:
2076
2077 outer parallel .omp_data_s.1.i -> iD.1562
2078 inner parallel .omp_data_s.2.i -> iD.1562
2079
2080 A shared variable mapping will produce a copy-out operation before
2081 the parallel directive and a copy-in operation after it. So, in
2082 this case we would have:
2083
2084 iD.1562 = 0;
2085 .omp_data_o.1.i = iD.1562;
2086 #omp parallel shared(iD.1562) -> outer parallel
2087 .omp_data_i.1 = &.omp_data_o.1
2088 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2089
2090 .omp_data_o.2.i = iD.1562; -> **
2091 #omp parallel shared(iD.1562) -> inner parallel
2092 .omp_data_i.2 = &.omp_data_o.2
2093 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2094
2095
2096 ** This is a problem. The symbol iD.1562 cannot be referenced
2097 inside the body of the outer parallel region. But since we are
2098 emitting this copy operation while expanding the inner parallel
2099 directive, we need to access the CTX structure of the outer
2100 parallel directive to get the correct mapping:
2101
2102 .omp_data_o.2.i = .omp_data_i.1->i
2103
2104 Since there may be other workshare or parallel directives enclosing
2105 the parallel directive, it may be necessary to walk up the context
2106 parent chain. This is not a problem in general because nested
2107 parallelism happens only rarely. */
2108
2109 static tree
2110 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2111 {
2112 tree t;
2113 omp_context *up;
2114
2115 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2116 t = maybe_lookup_decl (decl, up);
2117
2118 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2119
2120 return t ? t : decl;
2121 }
2122
2123
2124 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2125 in outer contexts. */
2126
2127 static tree
2128 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2129 {
2130 tree t = NULL;
2131 omp_context *up;
2132
2133 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2134 t = maybe_lookup_decl (decl, up);
2135
2136 return t ? t : decl;
2137 }
2138
2139
2140 /* Construct the initialization value for reduction CLAUSE. */
2141
2142 tree
2143 omp_reduction_init (tree clause, tree type)
2144 {
2145 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2146 {
2147 case PLUS_EXPR:
2148 case MINUS_EXPR:
2149 case BIT_IOR_EXPR:
2150 case BIT_XOR_EXPR:
2151 case TRUTH_OR_EXPR:
2152 case TRUTH_ORIF_EXPR:
2153 case TRUTH_XOR_EXPR:
2154 case NE_EXPR:
2155 return fold_convert (type, integer_zero_node);
2156
2157 case MULT_EXPR:
2158 case TRUTH_AND_EXPR:
2159 case TRUTH_ANDIF_EXPR:
2160 case EQ_EXPR:
2161 return fold_convert (type, integer_one_node);
2162
2163 case BIT_AND_EXPR:
2164 return fold_convert (type, integer_minus_one_node);
2165
2166 case MAX_EXPR:
2167 if (SCALAR_FLOAT_TYPE_P (type))
2168 {
2169 REAL_VALUE_TYPE max, min;
2170 if (HONOR_INFINITIES (TYPE_MODE (type)))
2171 {
2172 real_inf (&max);
2173 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2174 }
2175 else
2176 real_maxval (&min, 1, TYPE_MODE (type));
2177 return build_real (type, min);
2178 }
2179 else
2180 {
2181 gcc_assert (INTEGRAL_TYPE_P (type));
2182 return TYPE_MIN_VALUE (type);
2183 }
2184
2185 case MIN_EXPR:
2186 if (SCALAR_FLOAT_TYPE_P (type))
2187 {
2188 REAL_VALUE_TYPE max;
2189 if (HONOR_INFINITIES (TYPE_MODE (type)))
2190 real_inf (&max);
2191 else
2192 real_maxval (&max, 0, TYPE_MODE (type));
2193 return build_real (type, max);
2194 }
2195 else
2196 {
2197 gcc_assert (INTEGRAL_TYPE_P (type));
2198 return TYPE_MAX_VALUE (type);
2199 }
2200
2201 default:
2202 gcc_unreachable ();
2203 }
2204 }
2205
2206 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2207 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2208 private variables. Initialization statements go in ILIST, while calls
2209 to destructors go in DLIST. */
2210
2211 static void
2212 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2213 omp_context *ctx)
2214 {
2215 gimple_stmt_iterator diter;
2216 tree c, dtor, copyin_seq, x, ptr;
2217 bool copyin_by_ref = false;
2218 bool lastprivate_firstprivate = false;
2219 int pass;
2220
2221 *dlist = gimple_seq_alloc ();
2222 diter = gsi_start (*dlist);
2223 copyin_seq = NULL;
2224
2225 /* Do all the fixed sized types in the first pass, and the variable sized
2226 types in the second pass. This makes sure that the scalar arguments to
2227 the variable sized types are processed before we use them in the
2228 variable sized operations. */
2229 for (pass = 0; pass < 2; ++pass)
2230 {
2231 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2232 {
2233 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2234 tree var, new_var;
2235 bool by_ref;
2236
2237 switch (c_kind)
2238 {
2239 case OMP_CLAUSE_PRIVATE:
2240 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2241 continue;
2242 break;
2243 case OMP_CLAUSE_SHARED:
2244 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2245 {
2246 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2247 continue;
2248 }
2249 case OMP_CLAUSE_FIRSTPRIVATE:
2250 case OMP_CLAUSE_COPYIN:
2251 case OMP_CLAUSE_REDUCTION:
2252 break;
2253 case OMP_CLAUSE_LASTPRIVATE:
2254 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2255 {
2256 lastprivate_firstprivate = true;
2257 if (pass != 0)
2258 continue;
2259 }
2260 break;
2261 default:
2262 continue;
2263 }
2264
2265 new_var = var = OMP_CLAUSE_DECL (c);
2266 if (c_kind != OMP_CLAUSE_COPYIN)
2267 new_var = lookup_decl (var, ctx);
2268
2269 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2270 {
2271 if (pass != 0)
2272 continue;
2273 }
2274 else if (is_variable_sized (var))
2275 {
2276 /* For variable sized types, we need to allocate the
2277 actual storage here. Call alloca and store the
2278 result in the pointer decl that we created elsewhere. */
2279 if (pass == 0)
2280 continue;
2281
2282 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2283 {
2284 gimple stmt;
2285 tree tmp;
2286
2287 ptr = DECL_VALUE_EXPR (new_var);
2288 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2289 ptr = TREE_OPERAND (ptr, 0);
2290 gcc_assert (DECL_P (ptr));
2291 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2292
2293 /* void *tmp = __builtin_alloca */
2294 stmt
2295 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2296 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2297 gimple_add_tmp_var (tmp);
2298 gimple_call_set_lhs (stmt, tmp);
2299
2300 gimple_seq_add_stmt (ilist, stmt);
2301
2302 x = fold_convert (TREE_TYPE (ptr), tmp);
2303 gimplify_assign (ptr, x, ilist);
2304 }
2305 }
2306 else if (is_reference (var))
2307 {
2308 /* For references that are being privatized for Fortran,
2309 allocate new backing storage for the new pointer
2310 variable. This allows us to avoid changing all the
2311 code that expects a pointer to something that expects
2312 a direct variable. Note that this doesn't apply to
2313 C++, since reference types are disallowed in data
2314 sharing clauses there, except for NRV optimized
2315 return values. */
2316 if (pass == 0)
2317 continue;
2318
2319 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2320 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2321 {
2322 x = build_receiver_ref (var, false, ctx);
2323 x = build_fold_addr_expr (x);
2324 }
2325 else if (TREE_CONSTANT (x))
2326 {
2327 const char *name = NULL;
2328 if (DECL_NAME (var))
2329 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2330
2331 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2332 name);
2333 gimple_add_tmp_var (x);
2334 TREE_ADDRESSABLE (x) = 1;
2335 x = build_fold_addr_expr (x);
2336 }
2337 else
2338 {
2339 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2340 }
2341
2342 x = fold_convert (TREE_TYPE (new_var), x);
2343 gimplify_assign (new_var, x, ilist);
2344
2345 new_var = build_fold_indirect_ref (new_var);
2346 }
2347 else if (c_kind == OMP_CLAUSE_REDUCTION
2348 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2349 {
2350 if (pass == 0)
2351 continue;
2352 }
2353 else if (pass != 0)
2354 continue;
2355
2356 switch (OMP_CLAUSE_CODE (c))
2357 {
2358 case OMP_CLAUSE_SHARED:
2359 /* Shared global vars are just accessed directly. */
2360 if (is_global_var (new_var))
2361 break;
2362 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2363 needs to be delayed until after fixup_child_record_type so
2364 that we get the correct type during the dereference. */
2365 by_ref = use_pointer_for_field (var, ctx);
2366 x = build_receiver_ref (var, by_ref, ctx);
2367 SET_DECL_VALUE_EXPR (new_var, x);
2368 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2369
2370 /* ??? If VAR is not passed by reference, and the variable
2371 hasn't been initialized yet, then we'll get a warning for
2372 the store into the omp_data_s structure. Ideally, we'd be
2373 able to notice this and not store anything at all, but
2374 we're generating code too early. Suppress the warning. */
2375 if (!by_ref)
2376 TREE_NO_WARNING (var) = 1;
2377 break;
2378
2379 case OMP_CLAUSE_LASTPRIVATE:
2380 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2381 break;
2382 /* FALLTHRU */
2383
2384 case OMP_CLAUSE_PRIVATE:
2385 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2386 x = build_outer_var_ref (var, ctx);
2387 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2388 {
2389 if (is_task_ctx (ctx))
2390 x = build_receiver_ref (var, false, ctx);
2391 else
2392 x = build_outer_var_ref (var, ctx);
2393 }
2394 else
2395 x = NULL;
2396 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2397 if (x)
2398 gimplify_and_add (x, ilist);
2399 /* FALLTHRU */
2400
2401 do_dtor:
2402 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2403 if (x)
2404 {
2405 gimple_seq tseq = NULL;
2406
2407 dtor = x;
2408 gimplify_stmt (&dtor, &tseq);
2409 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2410 }
2411 break;
2412
2413 case OMP_CLAUSE_FIRSTPRIVATE:
2414 if (is_task_ctx (ctx))
2415 {
2416 if (is_reference (var) || is_variable_sized (var))
2417 goto do_dtor;
2418 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2419 ctx))
2420 || use_pointer_for_field (var, NULL))
2421 {
2422 x = build_receiver_ref (var, false, ctx);
2423 SET_DECL_VALUE_EXPR (new_var, x);
2424 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2425 goto do_dtor;
2426 }
2427 }
2428 x = build_outer_var_ref (var, ctx);
2429 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2430 gimplify_and_add (x, ilist);
2431 goto do_dtor;
2432 break;
2433
2434 case OMP_CLAUSE_COPYIN:
2435 by_ref = use_pointer_for_field (var, NULL);
2436 x = build_receiver_ref (var, by_ref, ctx);
2437 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2438 append_to_statement_list (x, &copyin_seq);
2439 copyin_by_ref |= by_ref;
2440 break;
2441
2442 case OMP_CLAUSE_REDUCTION:
2443 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2444 {
2445 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2446 x = build_outer_var_ref (var, ctx);
2447
2448 if (is_reference (var))
2449 x = build_fold_addr_expr (x);
2450 SET_DECL_VALUE_EXPR (placeholder, x);
2451 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2452 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2453 gimple_seq_add_seq (ilist,
2454 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2455 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2456 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2457 }
2458 else
2459 {
2460 x = omp_reduction_init (c, TREE_TYPE (new_var));
2461 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2462 gimplify_assign (new_var, x, ilist);
2463 }
2464 break;
2465
2466 default:
2467 gcc_unreachable ();
2468 }
2469 }
2470 }
2471
2472 /* The copyin sequence is not to be executed by the main thread, since
2473 that would result in self-copies. Perhaps not visible to scalars,
2474 but it certainly is to C++ operator=. */
2475 if (copyin_seq)
2476 {
2477 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2478 x = build2 (NE_EXPR, boolean_type_node, x,
2479 build_int_cst (TREE_TYPE (x), 0));
2480 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2481 gimplify_and_add (x, ilist);
2482 }
2483
2484 /* If any copyin variable is passed by reference, we must ensure the
2485 master thread doesn't modify it before it is copied over in all
2486 threads. Similarly for variables in both firstprivate and
2487 lastprivate clauses we need to ensure the lastprivate copying
2488 happens after firstprivate copying in all threads. */
2489 if (copyin_by_ref || lastprivate_firstprivate)
2490 gimplify_and_add (build_omp_barrier (), ilist);
2491 }
2492
2493
2494 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2495 both parallel and workshare constructs. PREDICATE may be NULL if it's
2496 always true. */
2497
2498 static void
2499 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2500 omp_context *ctx)
2501 {
2502 tree x, c, label = NULL;
2503 bool par_clauses = false;
2504
2505 /* Early exit if there are no lastprivate clauses. */
2506 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2507 if (clauses == NULL)
2508 {
2509 /* If this was a workshare clause, see if it had been combined
2510 with its parallel. In that case, look for the clauses on the
2511 parallel statement itself. */
2512 if (is_parallel_ctx (ctx))
2513 return;
2514
2515 ctx = ctx->outer;
2516 if (ctx == NULL || !is_parallel_ctx (ctx))
2517 return;
2518
2519 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2520 OMP_CLAUSE_LASTPRIVATE);
2521 if (clauses == NULL)
2522 return;
2523 par_clauses = true;
2524 }
2525
2526 if (predicate)
2527 {
2528 gimple stmt;
2529 tree label_true, arm1, arm2;
2530
2531 label = create_artificial_label (UNKNOWN_LOCATION);
2532 label_true = create_artificial_label (UNKNOWN_LOCATION);
2533 arm1 = TREE_OPERAND (predicate, 0);
2534 arm2 = TREE_OPERAND (predicate, 1);
2535 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2536 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2537 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2538 label_true, label);
2539 gimple_seq_add_stmt (stmt_list, stmt);
2540 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2541 }
2542
2543 for (c = clauses; c ;)
2544 {
2545 tree var, new_var;
2546
2547 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2548 {
2549 var = OMP_CLAUSE_DECL (c);
2550 new_var = lookup_decl (var, ctx);
2551
2552 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2553 {
2554 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2555 gimple_seq_add_seq (stmt_list,
2556 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2557 }
2558 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2559
2560 x = build_outer_var_ref (var, ctx);
2561 if (is_reference (var))
2562 new_var = build_fold_indirect_ref (new_var);
2563 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2564 gimplify_and_add (x, stmt_list);
2565 }
2566 c = OMP_CLAUSE_CHAIN (c);
2567 if (c == NULL && !par_clauses)
2568 {
2569 /* If this was a workshare clause, see if it had been combined
2570 with its parallel. In that case, continue looking for the
2571 clauses also on the parallel statement itself. */
2572 if (is_parallel_ctx (ctx))
2573 break;
2574
2575 ctx = ctx->outer;
2576 if (ctx == NULL || !is_parallel_ctx (ctx))
2577 break;
2578
2579 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2580 OMP_CLAUSE_LASTPRIVATE);
2581 par_clauses = true;
2582 }
2583 }
2584
2585 if (label)
2586 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2587 }
2588
2589
2590 /* Generate code to implement the REDUCTION clauses. */
2591
2592 static void
2593 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2594 {
2595 gimple_seq sub_seq = NULL;
2596 gimple stmt;
2597 tree x, c;
2598 int count = 0;
2599
2600 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2601 update in that case, otherwise use a lock. */
2602 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2603 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2604 {
2605 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2606 {
2607 /* Never use OMP_ATOMIC for array reductions. */
2608 count = -1;
2609 break;
2610 }
2611 count++;
2612 }
2613
2614 if (count == 0)
2615 return;
2616
2617 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2618 {
2619 tree var, ref, new_var;
2620 enum tree_code code;
2621
2622 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2623 continue;
2624
2625 var = OMP_CLAUSE_DECL (c);
2626 new_var = lookup_decl (var, ctx);
2627 if (is_reference (var))
2628 new_var = build_fold_indirect_ref (new_var);
2629 ref = build_outer_var_ref (var, ctx);
2630 code = OMP_CLAUSE_REDUCTION_CODE (c);
2631
2632 /* reduction(-:var) sums up the partial results, so it acts
2633 identically to reduction(+:var). */
2634 if (code == MINUS_EXPR)
2635 code = PLUS_EXPR;
2636
2637 if (count == 1)
2638 {
2639 tree addr = build_fold_addr_expr (ref);
2640
2641 addr = save_expr (addr);
2642 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2643 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
2644 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2645 gimplify_and_add (x, stmt_seqp);
2646 return;
2647 }
2648
2649 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2650 {
2651 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2652
2653 if (is_reference (var))
2654 ref = build_fold_addr_expr (ref);
2655 SET_DECL_VALUE_EXPR (placeholder, ref);
2656 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2657 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2658 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2659 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2660 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2661 }
2662 else
2663 {
2664 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2665 ref = build_outer_var_ref (var, ctx);
2666 gimplify_assign (ref, x, &sub_seq);
2667 }
2668 }
2669
2670 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2671 gimple_seq_add_stmt (stmt_seqp, stmt);
2672
2673 gimple_seq_add_seq (stmt_seqp, sub_seq);
2674
2675 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2676 gimple_seq_add_stmt (stmt_seqp, stmt);
2677 }
2678
2679
2680 /* Generate code to implement the COPYPRIVATE clauses. */
2681
2682 static void
2683 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2684 omp_context *ctx)
2685 {
2686 tree c;
2687
2688 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2689 {
2690 tree var, ref, x;
2691 bool by_ref;
2692
2693 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2694 continue;
2695
2696 var = OMP_CLAUSE_DECL (c);
2697 by_ref = use_pointer_for_field (var, NULL);
2698
2699 ref = build_sender_ref (var, ctx);
2700 x = lookup_decl_in_outer_ctx (var, ctx);
2701 x = by_ref ? build_fold_addr_expr (x) : x;
2702 gimplify_assign (ref, x, slist);
2703
2704 ref = build_receiver_ref (var, by_ref, ctx);
2705 if (is_reference (var))
2706 {
2707 ref = build_fold_indirect_ref (ref);
2708 var = build_fold_indirect_ref (var);
2709 }
2710 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2711 gimplify_and_add (x, rlist);
2712 }
2713 }
2714
2715
2716 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2717 and REDUCTION from the sender (aka parent) side. */
2718
2719 static void
2720 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2721 omp_context *ctx)
2722 {
2723 tree c;
2724
2725 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2726 {
2727 tree val, ref, x, var;
2728 bool by_ref, do_in = false, do_out = false;
2729
2730 switch (OMP_CLAUSE_CODE (c))
2731 {
2732 case OMP_CLAUSE_PRIVATE:
2733 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2734 break;
2735 continue;
2736 case OMP_CLAUSE_FIRSTPRIVATE:
2737 case OMP_CLAUSE_COPYIN:
2738 case OMP_CLAUSE_LASTPRIVATE:
2739 case OMP_CLAUSE_REDUCTION:
2740 break;
2741 default:
2742 continue;
2743 }
2744
2745 val = OMP_CLAUSE_DECL (c);
2746 var = lookup_decl_in_outer_ctx (val, ctx);
2747
2748 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2749 && is_global_var (var))
2750 continue;
2751 if (is_variable_sized (val))
2752 continue;
2753 by_ref = use_pointer_for_field (val, NULL);
2754
2755 switch (OMP_CLAUSE_CODE (c))
2756 {
2757 case OMP_CLAUSE_PRIVATE:
2758 case OMP_CLAUSE_FIRSTPRIVATE:
2759 case OMP_CLAUSE_COPYIN:
2760 do_in = true;
2761 break;
2762
2763 case OMP_CLAUSE_LASTPRIVATE:
2764 if (by_ref || is_reference (val))
2765 {
2766 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2767 continue;
2768 do_in = true;
2769 }
2770 else
2771 {
2772 do_out = true;
2773 if (lang_hooks.decls.omp_private_outer_ref (val))
2774 do_in = true;
2775 }
2776 break;
2777
2778 case OMP_CLAUSE_REDUCTION:
2779 do_in = true;
2780 do_out = !(by_ref || is_reference (val));
2781 break;
2782
2783 default:
2784 gcc_unreachable ();
2785 }
2786
2787 if (do_in)
2788 {
2789 ref = build_sender_ref (val, ctx);
2790 x = by_ref ? build_fold_addr_expr (var) : var;
2791 gimplify_assign (ref, x, ilist);
2792 if (is_task_ctx (ctx))
2793 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2794 }
2795
2796 if (do_out)
2797 {
2798 ref = build_sender_ref (val, ctx);
2799 gimplify_assign (var, ref, olist);
2800 }
2801 }
2802 }
2803
2804 /* Generate code to implement SHARED from the sender (aka parent)
2805 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2806 list things that got automatically shared. */
2807
2808 static void
2809 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2810 {
2811 tree var, ovar, nvar, f, x, record_type;
2812
2813 if (ctx->record_type == NULL)
2814 return;
2815
2816 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2817 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2818 {
2819 ovar = DECL_ABSTRACT_ORIGIN (f);
2820 nvar = maybe_lookup_decl (ovar, ctx);
2821 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2822 continue;
2823
2824 /* If CTX is a nested parallel directive. Find the immediately
2825 enclosing parallel or workshare construct that contains a
2826 mapping for OVAR. */
2827 var = lookup_decl_in_outer_ctx (ovar, ctx);
2828
2829 if (use_pointer_for_field (ovar, ctx))
2830 {
2831 x = build_sender_ref (ovar, ctx);
2832 var = build_fold_addr_expr (var);
2833 gimplify_assign (x, var, ilist);
2834 }
2835 else
2836 {
2837 x = build_sender_ref (ovar, ctx);
2838 gimplify_assign (x, var, ilist);
2839
2840 if (!TREE_READONLY (var)
2841 /* We don't need to receive a new reference to a result
2842 or parm decl. In fact we may not store to it as we will
2843 invalidate any pending RSO and generate wrong gimple
2844 during inlining. */
2845 && !((TREE_CODE (var) == RESULT_DECL
2846 || TREE_CODE (var) == PARM_DECL)
2847 && DECL_BY_REFERENCE (var)))
2848 {
2849 x = build_sender_ref (ovar, ctx);
2850 gimplify_assign (var, x, olist);
2851 }
2852 }
2853 }
2854 }
2855
2856
2857 /* A convenience function to build an empty GIMPLE_COND with just the
2858 condition. */
2859
2860 static gimple
2861 gimple_build_cond_empty (tree cond)
2862 {
2863 enum tree_code pred_code;
2864 tree lhs, rhs;
2865
2866 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2867 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2868 }
2869
2870
2871 /* Build the function calls to GOMP_parallel_start etc to actually
2872 generate the parallel operation. REGION is the parallel region
2873 being expanded. BB is the block where to insert the code. WS_ARGS
2874 will be set if this is a call to a combined parallel+workshare
2875 construct, it contains the list of additional arguments needed by
2876 the workshare construct. */
2877
2878 static void
2879 expand_parallel_call (struct omp_region *region, basic_block bb,
2880 gimple entry_stmt, tree ws_args)
2881 {
2882 tree t, t1, t2, val, cond, c, clauses;
2883 gimple_stmt_iterator gsi;
2884 gimple stmt;
2885 int start_ix;
2886
2887 clauses = gimple_omp_parallel_clauses (entry_stmt);
2888
2889 /* Determine what flavor of GOMP_parallel_start we will be
2890 emitting. */
2891 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2892 if (is_combined_parallel (region))
2893 {
2894 switch (region->inner->type)
2895 {
2896 case GIMPLE_OMP_FOR:
2897 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2898 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2899 + (region->inner->sched_kind
2900 == OMP_CLAUSE_SCHEDULE_RUNTIME
2901 ? 3 : region->inner->sched_kind);
2902 break;
2903 case GIMPLE_OMP_SECTIONS:
2904 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2905 break;
2906 default:
2907 gcc_unreachable ();
2908 }
2909 }
2910
2911 /* By default, the value of NUM_THREADS is zero (selected at run time)
2912 and there is no conditional. */
2913 cond = NULL_TREE;
2914 val = build_int_cst (unsigned_type_node, 0);
2915
2916 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2917 if (c)
2918 cond = OMP_CLAUSE_IF_EXPR (c);
2919
2920 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2921 if (c)
2922 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2923
2924 /* Ensure 'val' is of the correct type. */
2925 val = fold_convert (unsigned_type_node, val);
2926
2927 /* If we found the clause 'if (cond)', build either
2928 (cond != 0) or (cond ? val : 1u). */
2929 if (cond)
2930 {
2931 gimple_stmt_iterator gsi;
2932
2933 cond = gimple_boolify (cond);
2934
2935 if (integer_zerop (val))
2936 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2937 build_int_cst (TREE_TYPE (cond), 0));
2938 else
2939 {
2940 basic_block cond_bb, then_bb, else_bb;
2941 edge e, e_then, e_else;
2942 tree tmp_then, tmp_else, tmp_join, tmp_var;
2943
2944 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2945 if (gimple_in_ssa_p (cfun))
2946 {
2947 tmp_then = make_ssa_name (tmp_var, NULL);
2948 tmp_else = make_ssa_name (tmp_var, NULL);
2949 tmp_join = make_ssa_name (tmp_var, NULL);
2950 }
2951 else
2952 {
2953 tmp_then = tmp_var;
2954 tmp_else = tmp_var;
2955 tmp_join = tmp_var;
2956 }
2957
2958 e = split_block (bb, NULL);
2959 cond_bb = e->src;
2960 bb = e->dest;
2961 remove_edge (e);
2962
2963 then_bb = create_empty_bb (cond_bb);
2964 else_bb = create_empty_bb (then_bb);
2965 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2966 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2967
2968 stmt = gimple_build_cond_empty (cond);
2969 gsi = gsi_start_bb (cond_bb);
2970 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2971
2972 gsi = gsi_start_bb (then_bb);
2973 stmt = gimple_build_assign (tmp_then, val);
2974 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2975
2976 gsi = gsi_start_bb (else_bb);
2977 stmt = gimple_build_assign
2978 (tmp_else, build_int_cst (unsigned_type_node, 1));
2979 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2980
2981 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2982 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2983 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2984 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
2985
2986 if (gimple_in_ssa_p (cfun))
2987 {
2988 gimple phi = create_phi_node (tmp_join, bb);
2989 SSA_NAME_DEF_STMT (tmp_join) = phi;
2990 add_phi_arg (phi, tmp_then, e_then);
2991 add_phi_arg (phi, tmp_else, e_else);
2992 }
2993
2994 val = tmp_join;
2995 }
2996
2997 gsi = gsi_start_bb (bb);
2998 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
2999 false, GSI_CONTINUE_LINKING);
3000 }
3001
3002 gsi = gsi_last_bb (bb);
3003 t = gimple_omp_parallel_data_arg (entry_stmt);
3004 if (t == NULL)
3005 t1 = null_pointer_node;
3006 else
3007 t1 = build_fold_addr_expr (t);
3008 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3009
3010 if (ws_args)
3011 {
3012 tree args = tree_cons (NULL, t2,
3013 tree_cons (NULL, t1,
3014 tree_cons (NULL, val, ws_args)));
3015 t = build_function_call_expr (built_in_decls[start_ix], args);
3016 }
3017 else
3018 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
3019
3020 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3021 false, GSI_CONTINUE_LINKING);
3022
3023 t = gimple_omp_parallel_data_arg (entry_stmt);
3024 if (t == NULL)
3025 t = null_pointer_node;
3026 else
3027 t = build_fold_addr_expr (t);
3028 t = build_call_expr (gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3029 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3030 false, GSI_CONTINUE_LINKING);
3031
3032 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3033 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3034 false, GSI_CONTINUE_LINKING);
3035 }
3036
3037
3038 /* Build the function call to GOMP_task to actually
3039 generate the task operation. BB is the block where to insert the code. */
3040
3041 static void
3042 expand_task_call (basic_block bb, gimple entry_stmt)
3043 {
3044 tree t, t1, t2, t3, flags, cond, c, clauses;
3045 gimple_stmt_iterator gsi;
3046
3047 clauses = gimple_omp_task_clauses (entry_stmt);
3048
3049 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3050 if (c)
3051 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3052 else
3053 cond = boolean_true_node;
3054
3055 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3056 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3057
3058 gsi = gsi_last_bb (bb);
3059 t = gimple_omp_task_data_arg (entry_stmt);
3060 if (t == NULL)
3061 t2 = null_pointer_node;
3062 else
3063 t2 = build_fold_addr_expr (t);
3064 t1 = build_fold_addr_expr (gimple_omp_task_child_fn (entry_stmt));
3065 t = gimple_omp_task_copy_fn (entry_stmt);
3066 if (t == NULL)
3067 t3 = null_pointer_node;
3068 else
3069 t3 = build_fold_addr_expr (t);
3070
3071 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3072 gimple_omp_task_arg_size (entry_stmt),
3073 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3074
3075 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3076 false, GSI_CONTINUE_LINKING);
3077 }
3078
3079
3080 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3081 catch handler and return it. This prevents programs from violating the
3082 structured block semantics with throws. */
3083
3084 static gimple_seq
3085 maybe_catch_exception (gimple_seq body)
3086 {
3087 gimple f, t;
3088
3089 if (!flag_exceptions)
3090 return body;
3091
3092 if (lang_protect_cleanup_actions)
3093 t = lang_protect_cleanup_actions ();
3094 else
3095 t = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
3096
3097 f = gimple_build_eh_filter (NULL, gimple_seq_alloc_with_stmt (t));
3098 gimple_eh_filter_set_must_not_throw (f, true);
3099
3100 t = gimple_build_try (body, gimple_seq_alloc_with_stmt (f),
3101 GIMPLE_TRY_CATCH);
3102
3103 return gimple_seq_alloc_with_stmt (t);
3104 }
3105
3106 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3107
3108 static tree
3109 list2chain (tree list)
3110 {
3111 tree t;
3112
3113 for (t = list; t; t = TREE_CHAIN (t))
3114 {
3115 tree var = TREE_VALUE (t);
3116 if (TREE_CHAIN (t))
3117 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3118 else
3119 TREE_CHAIN (var) = NULL_TREE;
3120 }
3121
3122 return list ? TREE_VALUE (list) : NULL_TREE;
3123 }
3124
3125
3126 /* Remove barriers in REGION->EXIT's block. Note that this is only
3127 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3128 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3129 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3130 removed. */
3131
3132 static void
3133 remove_exit_barrier (struct omp_region *region)
3134 {
3135 gimple_stmt_iterator gsi;
3136 basic_block exit_bb;
3137 edge_iterator ei;
3138 edge e;
3139 gimple stmt;
3140 int any_addressable_vars = -1;
3141
3142 exit_bb = region->exit;
3143
3144 /* If the parallel region doesn't return, we don't have REGION->EXIT
3145 block at all. */
3146 if (! exit_bb)
3147 return;
3148
3149 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3150 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3151 statements that can appear in between are extremely limited -- no
3152 memory operations at all. Here, we allow nothing at all, so the
3153 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3154 gsi = gsi_last_bb (exit_bb);
3155 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3156 gsi_prev (&gsi);
3157 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3158 return;
3159
3160 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3161 {
3162 gsi = gsi_last_bb (e->src);
3163 if (gsi_end_p (gsi))
3164 continue;
3165 stmt = gsi_stmt (gsi);
3166 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3167 && !gimple_omp_return_nowait_p (stmt))
3168 {
3169 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3170 in many cases. If there could be tasks queued, the barrier
3171 might be needed to let the tasks run before some local
3172 variable of the parallel that the task uses as shared
3173 runs out of scope. The task can be spawned either
3174 from within current function (this would be easy to check)
3175 or from some function it calls and gets passed an address
3176 of such a variable. */
3177 if (any_addressable_vars < 0)
3178 {
3179 gimple parallel_stmt = last_stmt (region->entry);
3180 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3181 tree local_decls = DECL_STRUCT_FUNCTION (child_fun)->local_decls;
3182 tree block;
3183
3184 any_addressable_vars = 0;
3185 for (; local_decls; local_decls = TREE_CHAIN (local_decls))
3186 if (TREE_ADDRESSABLE (TREE_VALUE (local_decls)))
3187 {
3188 any_addressable_vars = 1;
3189 break;
3190 }
3191 for (block = gimple_block (stmt);
3192 !any_addressable_vars
3193 && block
3194 && TREE_CODE (block) == BLOCK;
3195 block = BLOCK_SUPERCONTEXT (block))
3196 {
3197 for (local_decls = BLOCK_VARS (block);
3198 local_decls;
3199 local_decls = TREE_CHAIN (local_decls))
3200 if (TREE_ADDRESSABLE (local_decls))
3201 {
3202 any_addressable_vars = 1;
3203 break;
3204 }
3205 if (block == gimple_block (parallel_stmt))
3206 break;
3207 }
3208 }
3209 if (!any_addressable_vars)
3210 gimple_omp_return_set_nowait (stmt);
3211 }
3212 }
3213 }
3214
3215 static void
3216 remove_exit_barriers (struct omp_region *region)
3217 {
3218 if (region->type == GIMPLE_OMP_PARALLEL)
3219 remove_exit_barrier (region);
3220
3221 if (region->inner)
3222 {
3223 region = region->inner;
3224 remove_exit_barriers (region);
3225 while (region->next)
3226 {
3227 region = region->next;
3228 remove_exit_barriers (region);
3229 }
3230 }
3231 }
3232
3233 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3234 calls. These can't be declared as const functions, but
3235 within one parallel body they are constant, so they can be
3236 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3237 which are declared const. Similarly for task body, except
3238 that in untied task omp_get_thread_num () can change at any task
3239 scheduling point. */
3240
3241 static void
3242 optimize_omp_library_calls (gimple entry_stmt)
3243 {
3244 basic_block bb;
3245 gimple_stmt_iterator gsi;
3246 tree thr_num_id
3247 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3248 tree num_thr_id
3249 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3250 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3251 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3252 OMP_CLAUSE_UNTIED) != NULL);
3253
3254 FOR_EACH_BB (bb)
3255 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3256 {
3257 gimple call = gsi_stmt (gsi);
3258 tree decl;
3259
3260 if (is_gimple_call (call)
3261 && (decl = gimple_call_fndecl (call))
3262 && DECL_EXTERNAL (decl)
3263 && TREE_PUBLIC (decl)
3264 && DECL_INITIAL (decl) == NULL)
3265 {
3266 tree built_in;
3267
3268 if (DECL_NAME (decl) == thr_num_id)
3269 {
3270 /* In #pragma omp task untied omp_get_thread_num () can change
3271 during the execution of the task region. */
3272 if (untied_task)
3273 continue;
3274 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3275 }
3276 else if (DECL_NAME (decl) == num_thr_id)
3277 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3278 else
3279 continue;
3280
3281 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3282 || gimple_call_num_args (call) != 0)
3283 continue;
3284
3285 if (flag_exceptions && !TREE_NOTHROW (decl))
3286 continue;
3287
3288 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3289 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
3290 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
3291 continue;
3292
3293 gimple_call_set_fndecl (call, built_in);
3294 }
3295 }
3296 }
3297
3298 /* Expand the OpenMP parallel or task directive starting at REGION. */
3299
3300 static void
3301 expand_omp_taskreg (struct omp_region *region)
3302 {
3303 basic_block entry_bb, exit_bb, new_bb;
3304 struct function *child_cfun;
3305 tree child_fn, block, t, ws_args, *tp;
3306 tree save_current;
3307 gimple_stmt_iterator gsi;
3308 gimple entry_stmt, stmt;
3309 edge e;
3310
3311 entry_stmt = last_stmt (region->entry);
3312 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3313 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3314 /* If this function has been already instrumented, make sure
3315 the child function isn't instrumented again. */
3316 child_cfun->after_tree_profile = cfun->after_tree_profile;
3317
3318 entry_bb = region->entry;
3319 exit_bb = region->exit;
3320
3321 if (is_combined_parallel (region))
3322 ws_args = region->ws_args;
3323 else
3324 ws_args = NULL_TREE;
3325
3326 if (child_cfun->cfg)
3327 {
3328 /* Due to inlining, it may happen that we have already outlined
3329 the region, in which case all we need to do is make the
3330 sub-graph unreachable and emit the parallel call. */
3331 edge entry_succ_e, exit_succ_e;
3332 gimple_stmt_iterator gsi;
3333
3334 entry_succ_e = single_succ_edge (entry_bb);
3335
3336 gsi = gsi_last_bb (entry_bb);
3337 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3338 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3339 gsi_remove (&gsi, true);
3340
3341 new_bb = entry_bb;
3342 if (exit_bb)
3343 {
3344 exit_succ_e = single_succ_edge (exit_bb);
3345 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3346 }
3347 remove_edge_and_dominated_blocks (entry_succ_e);
3348 }
3349 else
3350 {
3351 /* If the parallel region needs data sent from the parent
3352 function, then the very first statement (except possible
3353 tree profile counter updates) of the parallel body
3354 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3355 &.OMP_DATA_O is passed as an argument to the child function,
3356 we need to replace it with the argument as seen by the child
3357 function.
3358
3359 In most cases, this will end up being the identity assignment
3360 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3361 a function call that has been inlined, the original PARM_DECL
3362 .OMP_DATA_I may have been converted into a different local
3363 variable. In which case, we need to keep the assignment. */
3364 if (gimple_omp_taskreg_data_arg (entry_stmt))
3365 {
3366 basic_block entry_succ_bb = single_succ (entry_bb);
3367 gimple_stmt_iterator gsi;
3368 tree arg, narg;
3369 gimple parcopy_stmt = NULL;
3370
3371 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3372 {
3373 gimple stmt;
3374
3375 gcc_assert (!gsi_end_p (gsi));
3376 stmt = gsi_stmt (gsi);
3377 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3378 continue;
3379
3380 if (gimple_num_ops (stmt) == 2)
3381 {
3382 tree arg = gimple_assign_rhs1 (stmt);
3383
3384 /* We're ignore the subcode because we're
3385 effectively doing a STRIP_NOPS. */
3386
3387 if (TREE_CODE (arg) == ADDR_EXPR
3388 && TREE_OPERAND (arg, 0)
3389 == gimple_omp_taskreg_data_arg (entry_stmt))
3390 {
3391 parcopy_stmt = stmt;
3392 break;
3393 }
3394 }
3395 }
3396
3397 gcc_assert (parcopy_stmt != NULL);
3398 arg = DECL_ARGUMENTS (child_fn);
3399
3400 if (!gimple_in_ssa_p (cfun))
3401 {
3402 if (gimple_assign_lhs (parcopy_stmt) == arg)
3403 gsi_remove (&gsi, true);
3404 else
3405 {
3406 /* ?? Is setting the subcode really necessary ?? */
3407 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3408 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3409 }
3410 }
3411 else
3412 {
3413 /* If we are in ssa form, we must load the value from the default
3414 definition of the argument. That should not be defined now,
3415 since the argument is not used uninitialized. */
3416 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3417 narg = make_ssa_name (arg, gimple_build_nop ());
3418 set_default_def (arg, narg);
3419 /* ?? Is setting the subcode really necessary ?? */
3420 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3421 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3422 update_stmt (parcopy_stmt);
3423 }
3424 }
3425
3426 /* Declare local variables needed in CHILD_CFUN. */
3427 block = DECL_INITIAL (child_fn);
3428 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
3429 /* The gimplifier could record temporaries in parallel/task block
3430 rather than in containing function's local_decls chain,
3431 which would mean cgraph missed finalizing them. Do it now. */
3432 for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
3433 if (TREE_CODE (t) == VAR_DECL
3434 && TREE_STATIC (t)
3435 && !DECL_EXTERNAL (t))
3436 varpool_finalize_decl (t);
3437 DECL_SAVED_TREE (child_fn) = NULL;
3438 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3439 TREE_USED (block) = 1;
3440
3441 /* Reset DECL_CONTEXT on function arguments. */
3442 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3443 DECL_CONTEXT (t) = child_fn;
3444
3445 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3446 so that it can be moved to the child function. */
3447 gsi = gsi_last_bb (entry_bb);
3448 stmt = gsi_stmt (gsi);
3449 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3450 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3451 gsi_remove (&gsi, true);
3452 e = split_block (entry_bb, stmt);
3453 entry_bb = e->dest;
3454 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3455
3456 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3457 if (exit_bb)
3458 {
3459 gsi = gsi_last_bb (exit_bb);
3460 gcc_assert (!gsi_end_p (gsi)
3461 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3462 stmt = gimple_build_return (NULL);
3463 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3464 gsi_remove (&gsi, true);
3465 }
3466
3467 /* Move the parallel region into CHILD_CFUN. */
3468
3469 if (gimple_in_ssa_p (cfun))
3470 {
3471 push_cfun (child_cfun);
3472 init_tree_ssa (child_cfun);
3473 init_ssa_operands ();
3474 cfun->gimple_df->in_ssa_p = true;
3475 pop_cfun ();
3476 block = NULL_TREE;
3477 }
3478 else
3479 block = gimple_block (entry_stmt);
3480
3481 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3482 if (exit_bb)
3483 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3484
3485 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3486 for (tp = &child_cfun->local_decls; *tp; )
3487 if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3488 tp = &TREE_CHAIN (*tp);
3489 else
3490 *tp = TREE_CHAIN (*tp);
3491
3492 /* Inform the callgraph about the new function. */
3493 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3494 = cfun->curr_properties;
3495 cgraph_add_new_function (child_fn, true);
3496
3497 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3498 fixed in a following pass. */
3499 push_cfun (child_cfun);
3500 save_current = current_function_decl;
3501 current_function_decl = child_fn;
3502 if (optimize)
3503 optimize_omp_library_calls (entry_stmt);
3504 rebuild_cgraph_edges ();
3505
3506 /* Some EH regions might become dead, see PR34608. If
3507 pass_cleanup_cfg isn't the first pass to happen with the
3508 new child, these dead EH edges might cause problems.
3509 Clean them up now. */
3510 if (flag_exceptions)
3511 {
3512 basic_block bb;
3513 bool changed = false;
3514
3515 FOR_EACH_BB (bb)
3516 changed |= gimple_purge_dead_eh_edges (bb);
3517 if (changed)
3518 cleanup_tree_cfg ();
3519 }
3520 if (gimple_in_ssa_p (cfun))
3521 update_ssa (TODO_update_ssa);
3522 current_function_decl = save_current;
3523 pop_cfun ();
3524 }
3525
3526 /* Emit a library call to launch the children threads. */
3527 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3528 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3529 else
3530 expand_task_call (new_bb, entry_stmt);
3531 update_ssa (TODO_update_ssa_only_virtuals);
3532 }
3533
3534
3535 /* A subroutine of expand_omp_for. Generate code for a parallel
3536 loop with any schedule. Given parameters:
3537
3538 for (V = N1; V cond N2; V += STEP) BODY;
3539
3540 where COND is "<" or ">", we generate pseudocode
3541
3542 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3543 if (more) goto L0; else goto L3;
3544 L0:
3545 V = istart0;
3546 iend = iend0;
3547 L1:
3548 BODY;
3549 V += STEP;
3550 if (V cond iend) goto L1; else goto L2;
3551 L2:
3552 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3553 L3:
3554
3555 If this is a combined omp parallel loop, instead of the call to
3556 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3557
3558 For collapsed loops, given parameters:
3559 collapse(3)
3560 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3561 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3562 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3563 BODY;
3564
3565 we generate pseudocode
3566
3567 if (cond3 is <)
3568 adj = STEP3 - 1;
3569 else
3570 adj = STEP3 + 1;
3571 count3 = (adj + N32 - N31) / STEP3;
3572 if (cond2 is <)
3573 adj = STEP2 - 1;
3574 else
3575 adj = STEP2 + 1;
3576 count2 = (adj + N22 - N21) / STEP2;
3577 if (cond1 is <)
3578 adj = STEP1 - 1;
3579 else
3580 adj = STEP1 + 1;
3581 count1 = (adj + N12 - N11) / STEP1;
3582 count = count1 * count2 * count3;
3583 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3584 if (more) goto L0; else goto L3;
3585 L0:
3586 V = istart0;
3587 T = V;
3588 V3 = N31 + (T % count3) * STEP3;
3589 T = T / count3;
3590 V2 = N21 + (T % count2) * STEP2;
3591 T = T / count2;
3592 V1 = N11 + T * STEP1;
3593 iend = iend0;
3594 L1:
3595 BODY;
3596 V += 1;
3597 if (V < iend) goto L10; else goto L2;
3598 L10:
3599 V3 += STEP3;
3600 if (V3 cond3 N32) goto L1; else goto L11;
3601 L11:
3602 V3 = N31;
3603 V2 += STEP2;
3604 if (V2 cond2 N22) goto L1; else goto L12;
3605 L12:
3606 V2 = N21;
3607 V1 += STEP1;
3608 goto L1;
3609 L2:
3610 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3611 L3:
3612
3613 */
3614
3615 static void
3616 expand_omp_for_generic (struct omp_region *region,
3617 struct omp_for_data *fd,
3618 enum built_in_function start_fn,
3619 enum built_in_function next_fn)
3620 {
3621 tree type, istart0, iend0, iend;
3622 tree t, vmain, vback, bias = NULL_TREE;
3623 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3624 basic_block l2_bb = NULL, l3_bb = NULL;
3625 gimple_stmt_iterator gsi;
3626 gimple stmt;
3627 bool in_combined_parallel = is_combined_parallel (region);
3628 bool broken_loop = region->cont == NULL;
3629 edge e, ne;
3630 tree *counts = NULL;
3631 int i;
3632
3633 gcc_assert (!broken_loop || !in_combined_parallel);
3634 gcc_assert (fd->iter_type == long_integer_type_node
3635 || !in_combined_parallel);
3636
3637 type = TREE_TYPE (fd->loop.v);
3638 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3639 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3640 TREE_ADDRESSABLE (istart0) = 1;
3641 TREE_ADDRESSABLE (iend0) = 1;
3642 if (gimple_in_ssa_p (cfun))
3643 {
3644 add_referenced_var (istart0);
3645 add_referenced_var (iend0);
3646 }
3647
3648 /* See if we need to bias by LLONG_MIN. */
3649 if (fd->iter_type == long_long_unsigned_type_node
3650 && TREE_CODE (type) == INTEGER_TYPE
3651 && !TYPE_UNSIGNED (type))
3652 {
3653 tree n1, n2;
3654
3655 if (fd->loop.cond_code == LT_EXPR)
3656 {
3657 n1 = fd->loop.n1;
3658 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3659 }
3660 else
3661 {
3662 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3663 n2 = fd->loop.n1;
3664 }
3665 if (TREE_CODE (n1) != INTEGER_CST
3666 || TREE_CODE (n2) != INTEGER_CST
3667 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3668 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3669 }
3670
3671 entry_bb = region->entry;
3672 cont_bb = region->cont;
3673 collapse_bb = NULL;
3674 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3675 gcc_assert (broken_loop
3676 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3677 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3678 l1_bb = single_succ (l0_bb);
3679 if (!broken_loop)
3680 {
3681 l2_bb = create_empty_bb (cont_bb);
3682 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3683 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3684 }
3685 else
3686 l2_bb = NULL;
3687 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3688 exit_bb = region->exit;
3689
3690 gsi = gsi_last_bb (entry_bb);
3691
3692 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3693 if (fd->collapse > 1)
3694 {
3695 /* collapsed loops need work for expansion in SSA form. */
3696 gcc_assert (!gimple_in_ssa_p (cfun));
3697 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3698 for (i = 0; i < fd->collapse; i++)
3699 {
3700 tree itype = TREE_TYPE (fd->loops[i].v);
3701
3702 if (POINTER_TYPE_P (itype))
3703 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3704 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3705 ? -1 : 1));
3706 t = fold_build2 (PLUS_EXPR, itype,
3707 fold_convert (itype, fd->loops[i].step), t);
3708 t = fold_build2 (PLUS_EXPR, itype, t,
3709 fold_convert (itype, fd->loops[i].n2));
3710 t = fold_build2 (MINUS_EXPR, itype, t,
3711 fold_convert (itype, fd->loops[i].n1));
3712 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3713 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3714 fold_build1 (NEGATE_EXPR, itype, t),
3715 fold_build1 (NEGATE_EXPR, itype,
3716 fold_convert (itype,
3717 fd->loops[i].step)));
3718 else
3719 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3720 fold_convert (itype, fd->loops[i].step));
3721 t = fold_convert (type, t);
3722 if (TREE_CODE (t) == INTEGER_CST)
3723 counts[i] = t;
3724 else
3725 {
3726 counts[i] = create_tmp_var (type, ".count");
3727 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3728 true, GSI_SAME_STMT);
3729 stmt = gimple_build_assign (counts[i], t);
3730 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3731 }
3732 if (SSA_VAR_P (fd->loop.n2))
3733 {
3734 if (i == 0)
3735 t = counts[0];
3736 else
3737 {
3738 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3739 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3740 true, GSI_SAME_STMT);
3741 }
3742 stmt = gimple_build_assign (fd->loop.n2, t);
3743 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3744 }
3745 }
3746 }
3747 if (in_combined_parallel)
3748 {
3749 /* In a combined parallel loop, emit a call to
3750 GOMP_loop_foo_next. */
3751 t = build_call_expr (built_in_decls[next_fn], 2,
3752 build_fold_addr_expr (istart0),
3753 build_fold_addr_expr (iend0));
3754 }
3755 else
3756 {
3757 tree t0, t1, t2, t3, t4;
3758 /* If this is not a combined parallel loop, emit a call to
3759 GOMP_loop_foo_start in ENTRY_BB. */
3760 t4 = build_fold_addr_expr (iend0);
3761 t3 = build_fold_addr_expr (istart0);
3762 t2 = fold_convert (fd->iter_type, fd->loop.step);
3763 if (POINTER_TYPE_P (type)
3764 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3765 {
3766 /* Avoid casting pointers to integer of a different size. */
3767 tree itype
3768 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3769 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3770 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3771 }
3772 else
3773 {
3774 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3775 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3776 }
3777 if (bias)
3778 {
3779 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3780 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3781 }
3782 if (fd->iter_type == long_integer_type_node)
3783 {
3784 if (fd->chunk_size)
3785 {
3786 t = fold_convert (fd->iter_type, fd->chunk_size);
3787 t = build_call_expr (built_in_decls[start_fn], 6,
3788 t0, t1, t2, t, t3, t4);
3789 }
3790 else
3791 t = build_call_expr (built_in_decls[start_fn], 5,
3792 t0, t1, t2, t3, t4);
3793 }
3794 else
3795 {
3796 tree t5;
3797 tree c_bool_type;
3798
3799 /* The GOMP_loop_ull_*start functions have additional boolean
3800 argument, true for < loops and false for > loops.
3801 In Fortran, the C bool type can be different from
3802 boolean_type_node. */
3803 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3804 t5 = build_int_cst (c_bool_type,
3805 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3806 if (fd->chunk_size)
3807 {
3808 t = fold_convert (fd->iter_type, fd->chunk_size);
3809 t = build_call_expr (built_in_decls[start_fn], 7,
3810 t5, t0, t1, t2, t, t3, t4);
3811 }
3812 else
3813 t = build_call_expr (built_in_decls[start_fn], 6,
3814 t5, t0, t1, t2, t3, t4);
3815 }
3816 }
3817 if (TREE_TYPE (t) != boolean_type_node)
3818 t = fold_build2 (NE_EXPR, boolean_type_node,
3819 t, build_int_cst (TREE_TYPE (t), 0));
3820 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3821 true, GSI_SAME_STMT);
3822 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3823
3824 /* Remove the GIMPLE_OMP_FOR statement. */
3825 gsi_remove (&gsi, true);
3826
3827 /* Iteration setup for sequential loop goes in L0_BB. */
3828 gsi = gsi_start_bb (l0_bb);
3829 t = istart0;
3830 if (bias)
3831 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3832 if (POINTER_TYPE_P (type))
3833 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3834 0), t);
3835 t = fold_convert (type, t);
3836 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3837 false, GSI_CONTINUE_LINKING);
3838 stmt = gimple_build_assign (fd->loop.v, t);
3839 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3840
3841 t = iend0;
3842 if (bias)
3843 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3844 if (POINTER_TYPE_P (type))
3845 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3846 0), t);
3847 t = fold_convert (type, t);
3848 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3849 false, GSI_CONTINUE_LINKING);
3850 if (fd->collapse > 1)
3851 {
3852 tree tem = create_tmp_var (type, ".tem");
3853
3854 stmt = gimple_build_assign (tem, fd->loop.v);
3855 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3856 for (i = fd->collapse - 1; i >= 0; i--)
3857 {
3858 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3859 itype = vtype;
3860 if (POINTER_TYPE_P (vtype))
3861 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3862 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3863 t = fold_convert (itype, t);
3864 t = fold_build2 (MULT_EXPR, itype, t,
3865 fold_convert (itype, fd->loops[i].step));
3866 if (POINTER_TYPE_P (vtype))
3867 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3868 fd->loops[i].n1, fold_convert (sizetype, t));
3869 else
3870 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3871 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3872 false, GSI_CONTINUE_LINKING);
3873 stmt = gimple_build_assign (fd->loops[i].v, t);
3874 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3875 if (i != 0)
3876 {
3877 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3878 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3879 false, GSI_CONTINUE_LINKING);
3880 stmt = gimple_build_assign (tem, t);
3881 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3882 }
3883 }
3884 }
3885
3886 if (!broken_loop)
3887 {
3888 /* Code to control the increment and predicate for the sequential
3889 loop goes in the CONT_BB. */
3890 gsi = gsi_last_bb (cont_bb);
3891 stmt = gsi_stmt (gsi);
3892 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3893 vmain = gimple_omp_continue_control_use (stmt);
3894 vback = gimple_omp_continue_control_def (stmt);
3895
3896 if (POINTER_TYPE_P (type))
3897 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3898 fold_convert (sizetype, fd->loop.step));
3899 else
3900 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3901 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3902 true, GSI_SAME_STMT);
3903 stmt = gimple_build_assign (vback, t);
3904 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3905
3906 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3907 stmt = gimple_build_cond_empty (t);
3908 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3909
3910 /* Remove GIMPLE_OMP_CONTINUE. */
3911 gsi_remove (&gsi, true);
3912
3913 if (fd->collapse > 1)
3914 {
3915 basic_block last_bb, bb;
3916
3917 last_bb = cont_bb;
3918 for (i = fd->collapse - 1; i >= 0; i--)
3919 {
3920 tree vtype = TREE_TYPE (fd->loops[i].v);
3921
3922 bb = create_empty_bb (last_bb);
3923 gsi = gsi_start_bb (bb);
3924
3925 if (i < fd->collapse - 1)
3926 {
3927 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3928 e->probability = REG_BR_PROB_BASE / 8;
3929
3930 t = fd->loops[i + 1].n1;
3931 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3932 false, GSI_CONTINUE_LINKING);
3933 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3934 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3935 }
3936 else
3937 collapse_bb = bb;
3938
3939 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3940
3941 if (POINTER_TYPE_P (vtype))
3942 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3943 fd->loops[i].v,
3944 fold_convert (sizetype, fd->loops[i].step));
3945 else
3946 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3947 fd->loops[i].step);
3948 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3949 false, GSI_CONTINUE_LINKING);
3950 stmt = gimple_build_assign (fd->loops[i].v, t);
3951 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3952
3953 if (i > 0)
3954 {
3955 t = fd->loops[i].n2;
3956 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3957 false, GSI_CONTINUE_LINKING);
3958 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
3959 fd->loops[i].v, t);
3960 stmt = gimple_build_cond_empty (t);
3961 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3962 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3963 e->probability = REG_BR_PROB_BASE * 7 / 8;
3964 }
3965 else
3966 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3967 last_bb = bb;
3968 }
3969 }
3970
3971 /* Emit code to get the next parallel iteration in L2_BB. */
3972 gsi = gsi_start_bb (l2_bb);
3973
3974 t = build_call_expr (built_in_decls[next_fn], 2,
3975 build_fold_addr_expr (istart0),
3976 build_fold_addr_expr (iend0));
3977 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3978 false, GSI_CONTINUE_LINKING);
3979 if (TREE_TYPE (t) != boolean_type_node)
3980 t = fold_build2 (NE_EXPR, boolean_type_node,
3981 t, build_int_cst (TREE_TYPE (t), 0));
3982 stmt = gimple_build_cond_empty (t);
3983 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3984 }
3985
3986 /* Add the loop cleanup function. */
3987 gsi = gsi_last_bb (exit_bb);
3988 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
3989 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
3990 else
3991 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
3992 stmt = gimple_build_call (t, 0);
3993 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3994 gsi_remove (&gsi, true);
3995
3996 /* Connect the new blocks. */
3997 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
3998 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
3999
4000 if (!broken_loop)
4001 {
4002 gimple_seq phis;
4003
4004 e = find_edge (cont_bb, l3_bb);
4005 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4006
4007 phis = phi_nodes (l3_bb);
4008 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4009 {
4010 gimple phi = gsi_stmt (gsi);
4011 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4012 PHI_ARG_DEF_FROM_EDGE (phi, e));
4013 }
4014 remove_edge (e);
4015
4016 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4017 if (fd->collapse > 1)
4018 {
4019 e = find_edge (cont_bb, l1_bb);
4020 remove_edge (e);
4021 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4022 }
4023 else
4024 {
4025 e = find_edge (cont_bb, l1_bb);
4026 e->flags = EDGE_TRUE_VALUE;
4027 }
4028 e->probability = REG_BR_PROB_BASE * 7 / 8;
4029 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4030 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4031
4032 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4033 recompute_dominator (CDI_DOMINATORS, l2_bb));
4034 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4035 recompute_dominator (CDI_DOMINATORS, l3_bb));
4036 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4037 recompute_dominator (CDI_DOMINATORS, l0_bb));
4038 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4039 recompute_dominator (CDI_DOMINATORS, l1_bb));
4040 }
4041 }
4042
4043
4044 /* A subroutine of expand_omp_for. Generate code for a parallel
4045 loop with static schedule and no specified chunk size. Given
4046 parameters:
4047
4048 for (V = N1; V cond N2; V += STEP) BODY;
4049
4050 where COND is "<" or ">", we generate pseudocode
4051
4052 if (cond is <)
4053 adj = STEP - 1;
4054 else
4055 adj = STEP + 1;
4056 if ((__typeof (V)) -1 > 0 && cond is >)
4057 n = -(adj + N2 - N1) / -STEP;
4058 else
4059 n = (adj + N2 - N1) / STEP;
4060 q = n / nthreads;
4061 q += (q * nthreads != n);
4062 s0 = q * threadid;
4063 e0 = min(s0 + q, n);
4064 V = s0 * STEP + N1;
4065 if (s0 >= e0) goto L2; else goto L0;
4066 L0:
4067 e = e0 * STEP + N1;
4068 L1:
4069 BODY;
4070 V += STEP;
4071 if (V cond e) goto L1;
4072 L2:
4073 */
4074
4075 static void
4076 expand_omp_for_static_nochunk (struct omp_region *region,
4077 struct omp_for_data *fd)
4078 {
4079 tree n, q, s0, e0, e, t, nthreads, threadid;
4080 tree type, itype, vmain, vback;
4081 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4082 basic_block fin_bb;
4083 gimple_stmt_iterator gsi;
4084 gimple stmt;
4085
4086 itype = type = TREE_TYPE (fd->loop.v);
4087 if (POINTER_TYPE_P (type))
4088 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4089
4090 entry_bb = region->entry;
4091 cont_bb = region->cont;
4092 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4093 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4094 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4095 body_bb = single_succ (seq_start_bb);
4096 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4097 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4098 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4099 exit_bb = region->exit;
4100
4101 /* Iteration space partitioning goes in ENTRY_BB. */
4102 gsi = gsi_last_bb (entry_bb);
4103 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4104
4105 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4106 t = fold_convert (itype, t);
4107 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4108 true, GSI_SAME_STMT);
4109
4110 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4111 t = fold_convert (itype, t);
4112 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4113 true, GSI_SAME_STMT);
4114
4115 fd->loop.n1
4116 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4117 true, NULL_TREE, true, GSI_SAME_STMT);
4118 fd->loop.n2
4119 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4120 true, NULL_TREE, true, GSI_SAME_STMT);
4121 fd->loop.step
4122 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4123 true, NULL_TREE, true, GSI_SAME_STMT);
4124
4125 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4126 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4127 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4128 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4129 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4130 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4131 fold_build1 (NEGATE_EXPR, itype, t),
4132 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4133 else
4134 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4135 t = fold_convert (itype, t);
4136 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4137
4138 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4139 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4140
4141 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4142 t = fold_build2 (NE_EXPR, itype, t, n);
4143 t = fold_build2 (PLUS_EXPR, itype, q, t);
4144 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4145
4146 t = build2 (MULT_EXPR, itype, q, threadid);
4147 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4148
4149 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4150 t = fold_build2 (MIN_EXPR, itype, t, n);
4151 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4152
4153 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4154 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4155
4156 /* Remove the GIMPLE_OMP_FOR statement. */
4157 gsi_remove (&gsi, true);
4158
4159 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4160 gsi = gsi_start_bb (seq_start_bb);
4161
4162 t = fold_convert (itype, s0);
4163 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4164 if (POINTER_TYPE_P (type))
4165 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4166 fold_convert (sizetype, t));
4167 else
4168 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4169 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4170 false, GSI_CONTINUE_LINKING);
4171 stmt = gimple_build_assign (fd->loop.v, t);
4172 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4173
4174 t = fold_convert (itype, e0);
4175 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4176 if (POINTER_TYPE_P (type))
4177 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4178 fold_convert (sizetype, t));
4179 else
4180 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4181 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4182 false, GSI_CONTINUE_LINKING);
4183
4184 /* The code controlling the sequential loop replaces the
4185 GIMPLE_OMP_CONTINUE. */
4186 gsi = gsi_last_bb (cont_bb);
4187 stmt = gsi_stmt (gsi);
4188 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4189 vmain = gimple_omp_continue_control_use (stmt);
4190 vback = gimple_omp_continue_control_def (stmt);
4191
4192 if (POINTER_TYPE_P (type))
4193 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4194 fold_convert (sizetype, fd->loop.step));
4195 else
4196 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4197 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4198 true, GSI_SAME_STMT);
4199 stmt = gimple_build_assign (vback, t);
4200 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4201
4202 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4203 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4204
4205 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4206 gsi_remove (&gsi, true);
4207
4208 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4209 gsi = gsi_last_bb (exit_bb);
4210 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4211 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4212 false, GSI_SAME_STMT);
4213 gsi_remove (&gsi, true);
4214
4215 /* Connect all the blocks. */
4216 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4217 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4218
4219 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4220 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4221
4222 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4223 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4224 recompute_dominator (CDI_DOMINATORS, body_bb));
4225 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4226 recompute_dominator (CDI_DOMINATORS, fin_bb));
4227 }
4228
4229
4230 /* A subroutine of expand_omp_for. Generate code for a parallel
4231 loop with static schedule and a specified chunk size. Given
4232 parameters:
4233
4234 for (V = N1; V cond N2; V += STEP) BODY;
4235
4236 where COND is "<" or ">", we generate pseudocode
4237
4238 if (cond is <)
4239 adj = STEP - 1;
4240 else
4241 adj = STEP + 1;
4242 if ((__typeof (V)) -1 > 0 && cond is >)
4243 n = -(adj + N2 - N1) / -STEP;
4244 else
4245 n = (adj + N2 - N1) / STEP;
4246 trip = 0;
4247 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4248 here so that V is defined
4249 if the loop is not entered
4250 L0:
4251 s0 = (trip * nthreads + threadid) * CHUNK;
4252 e0 = min(s0 + CHUNK, n);
4253 if (s0 < n) goto L1; else goto L4;
4254 L1:
4255 V = s0 * STEP + N1;
4256 e = e0 * STEP + N1;
4257 L2:
4258 BODY;
4259 V += STEP;
4260 if (V cond e) goto L2; else goto L3;
4261 L3:
4262 trip += 1;
4263 goto L0;
4264 L4:
4265 */
4266
4267 static void
4268 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4269 {
4270 tree n, s0, e0, e, t;
4271 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4272 tree type, itype, v_main, v_back, v_extra;
4273 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4274 basic_block trip_update_bb, cont_bb, fin_bb;
4275 gimple_stmt_iterator si;
4276 gimple stmt;
4277 edge se;
4278
4279 itype = type = TREE_TYPE (fd->loop.v);
4280 if (POINTER_TYPE_P (type))
4281 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4282
4283 entry_bb = region->entry;
4284 se = split_block (entry_bb, last_stmt (entry_bb));
4285 entry_bb = se->src;
4286 iter_part_bb = se->dest;
4287 cont_bb = region->cont;
4288 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4289 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4290 == FALLTHRU_EDGE (cont_bb)->dest);
4291 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4292 body_bb = single_succ (seq_start_bb);
4293 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4294 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4295 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4296 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4297 exit_bb = region->exit;
4298
4299 /* Trip and adjustment setup goes in ENTRY_BB. */
4300 si = gsi_last_bb (entry_bb);
4301 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4302
4303 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4304 t = fold_convert (itype, t);
4305 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4306 true, GSI_SAME_STMT);
4307
4308 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4309 t = fold_convert (itype, t);
4310 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4311 true, GSI_SAME_STMT);
4312
4313 fd->loop.n1
4314 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4315 true, NULL_TREE, true, GSI_SAME_STMT);
4316 fd->loop.n2
4317 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4318 true, NULL_TREE, true, GSI_SAME_STMT);
4319 fd->loop.step
4320 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4321 true, NULL_TREE, true, GSI_SAME_STMT);
4322 fd->chunk_size
4323 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4324 true, NULL_TREE, true, GSI_SAME_STMT);
4325
4326 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4327 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4328 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4329 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4330 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4331 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4332 fold_build1 (NEGATE_EXPR, itype, t),
4333 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4334 else
4335 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4336 t = fold_convert (itype, t);
4337 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4338 true, GSI_SAME_STMT);
4339
4340 trip_var = create_tmp_var (itype, ".trip");
4341 if (gimple_in_ssa_p (cfun))
4342 {
4343 add_referenced_var (trip_var);
4344 trip_init = make_ssa_name (trip_var, NULL);
4345 trip_main = make_ssa_name (trip_var, NULL);
4346 trip_back = make_ssa_name (trip_var, NULL);
4347 }
4348 else
4349 {
4350 trip_init = trip_var;
4351 trip_main = trip_var;
4352 trip_back = trip_var;
4353 }
4354
4355 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4356 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4357
4358 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4359 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4360 if (POINTER_TYPE_P (type))
4361 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4362 fold_convert (sizetype, t));
4363 else
4364 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4365 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4366 true, GSI_SAME_STMT);
4367
4368 /* Remove the GIMPLE_OMP_FOR. */
4369 gsi_remove (&si, true);
4370
4371 /* Iteration space partitioning goes in ITER_PART_BB. */
4372 si = gsi_last_bb (iter_part_bb);
4373
4374 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4375 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4376 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4377 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4378 false, GSI_CONTINUE_LINKING);
4379
4380 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4381 t = fold_build2 (MIN_EXPR, itype, t, n);
4382 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4383 false, GSI_CONTINUE_LINKING);
4384
4385 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4386 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4387
4388 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4389 si = gsi_start_bb (seq_start_bb);
4390
4391 t = fold_convert (itype, s0);
4392 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4393 if (POINTER_TYPE_P (type))
4394 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4395 fold_convert (sizetype, t));
4396 else
4397 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4398 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4399 false, GSI_CONTINUE_LINKING);
4400 stmt = gimple_build_assign (fd->loop.v, t);
4401 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4402
4403 t = fold_convert (itype, e0);
4404 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4405 if (POINTER_TYPE_P (type))
4406 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4407 fold_convert (sizetype, t));
4408 else
4409 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4410 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4411 false, GSI_CONTINUE_LINKING);
4412
4413 /* The code controlling the sequential loop goes in CONT_BB,
4414 replacing the GIMPLE_OMP_CONTINUE. */
4415 si = gsi_last_bb (cont_bb);
4416 stmt = gsi_stmt (si);
4417 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4418 v_main = gimple_omp_continue_control_use (stmt);
4419 v_back = gimple_omp_continue_control_def (stmt);
4420
4421 if (POINTER_TYPE_P (type))
4422 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4423 fold_convert (sizetype, fd->loop.step));
4424 else
4425 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4426 stmt = gimple_build_assign (v_back, t);
4427 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4428
4429 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4430 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4431
4432 /* Remove GIMPLE_OMP_CONTINUE. */
4433 gsi_remove (&si, true);
4434
4435 /* Trip update code goes into TRIP_UPDATE_BB. */
4436 si = gsi_start_bb (trip_update_bb);
4437
4438 t = build_int_cst (itype, 1);
4439 t = build2 (PLUS_EXPR, itype, trip_main, t);
4440 stmt = gimple_build_assign (trip_back, t);
4441 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4442
4443 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4444 si = gsi_last_bb (exit_bb);
4445 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4446 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4447 false, GSI_SAME_STMT);
4448 gsi_remove (&si, true);
4449
4450 /* Connect the new blocks. */
4451 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4452 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4453
4454 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4455 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4456
4457 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4458
4459 if (gimple_in_ssa_p (cfun))
4460 {
4461 gimple_stmt_iterator psi;
4462 gimple phi;
4463 edge re, ene;
4464 edge_var_map_vector head;
4465 edge_var_map *vm;
4466 size_t i;
4467
4468 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4469 remove arguments of the phi nodes in fin_bb. We need to create
4470 appropriate phi nodes in iter_part_bb instead. */
4471 se = single_pred_edge (fin_bb);
4472 re = single_succ_edge (trip_update_bb);
4473 head = redirect_edge_var_map_vector (re);
4474 ene = single_succ_edge (entry_bb);
4475
4476 psi = gsi_start_phis (fin_bb);
4477 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4478 gsi_next (&psi), ++i)
4479 {
4480 gimple nphi;
4481
4482 phi = gsi_stmt (psi);
4483 t = gimple_phi_result (phi);
4484 gcc_assert (t == redirect_edge_var_map_result (vm));
4485 nphi = create_phi_node (t, iter_part_bb);
4486 SSA_NAME_DEF_STMT (t) = nphi;
4487
4488 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4489 /* A special case -- fd->loop.v is not yet computed in
4490 iter_part_bb, we need to use v_extra instead. */
4491 if (t == fd->loop.v)
4492 t = v_extra;
4493 add_phi_arg (nphi, t, ene);
4494 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re);
4495 }
4496 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4497 redirect_edge_var_map_clear (re);
4498 while (1)
4499 {
4500 psi = gsi_start_phis (fin_bb);
4501 if (gsi_end_p (psi))
4502 break;
4503 remove_phi_node (&psi, false);
4504 }
4505
4506 /* Make phi node for trip. */
4507 phi = create_phi_node (trip_main, iter_part_bb);
4508 SSA_NAME_DEF_STMT (trip_main) = phi;
4509 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
4510 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
4511 }
4512
4513 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4514 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4515 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4516 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4517 recompute_dominator (CDI_DOMINATORS, fin_bb));
4518 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4519 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4520 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4521 recompute_dominator (CDI_DOMINATORS, body_bb));
4522 }
4523
4524
4525 /* Expand the OpenMP loop defined by REGION. */
4526
4527 static void
4528 expand_omp_for (struct omp_region *region)
4529 {
4530 struct omp_for_data fd;
4531 struct omp_for_data_loop *loops;
4532
4533 loops
4534 = (struct omp_for_data_loop *)
4535 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4536 * sizeof (struct omp_for_data_loop));
4537 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4538 region->sched_kind = fd.sched_kind;
4539
4540 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4541 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4542 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4543 if (region->cont)
4544 {
4545 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4546 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4547 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4548 }
4549
4550 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4551 && !fd.have_ordered
4552 && fd.collapse == 1
4553 && region->cont != NULL)
4554 {
4555 if (fd.chunk_size == NULL)
4556 expand_omp_for_static_nochunk (region, &fd);
4557 else
4558 expand_omp_for_static_chunk (region, &fd);
4559 }
4560 else
4561 {
4562 int fn_index, start_ix, next_ix;
4563
4564 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4565 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4566 ? 3 : fd.sched_kind;
4567 fn_index += fd.have_ordered * 4;
4568 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4569 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4570 if (fd.iter_type == long_long_unsigned_type_node)
4571 {
4572 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4573 - BUILT_IN_GOMP_LOOP_STATIC_START;
4574 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4575 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4576 }
4577 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4578 (enum built_in_function) next_ix);
4579 }
4580
4581 update_ssa (TODO_update_ssa_only_virtuals);
4582 }
4583
4584
4585 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4586
4587 v = GOMP_sections_start (n);
4588 L0:
4589 switch (v)
4590 {
4591 case 0:
4592 goto L2;
4593 case 1:
4594 section 1;
4595 goto L1;
4596 case 2:
4597 ...
4598 case n:
4599 ...
4600 default:
4601 abort ();
4602 }
4603 L1:
4604 v = GOMP_sections_next ();
4605 goto L0;
4606 L2:
4607 reduction;
4608
4609 If this is a combined parallel sections, replace the call to
4610 GOMP_sections_start with call to GOMP_sections_next. */
4611
4612 static void
4613 expand_omp_sections (struct omp_region *region)
4614 {
4615 tree t, u, vin = NULL, vmain, vnext, l1, l2;
4616 VEC (tree,heap) *label_vec;
4617 unsigned len;
4618 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4619 gimple_stmt_iterator si, switch_si;
4620 gimple sections_stmt, stmt, cont;
4621 edge_iterator ei;
4622 edge e;
4623 struct omp_region *inner;
4624 unsigned i, casei;
4625 bool exit_reachable = region->cont != NULL;
4626
4627 gcc_assert (exit_reachable == (region->exit != NULL));
4628 entry_bb = region->entry;
4629 l0_bb = single_succ (entry_bb);
4630 l1_bb = region->cont;
4631 l2_bb = region->exit;
4632 if (exit_reachable)
4633 {
4634 if (single_pred (l2_bb) == l0_bb)
4635 l2 = gimple_block_label (l2_bb);
4636 else
4637 {
4638 /* This can happen if there are reductions. */
4639 len = EDGE_COUNT (l0_bb->succs);
4640 gcc_assert (len > 0);
4641 e = EDGE_SUCC (l0_bb, len - 1);
4642 si = gsi_last_bb (e->dest);
4643 l2 = NULL_TREE;
4644 if (gsi_end_p (si)
4645 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4646 l2 = gimple_block_label (e->dest);
4647 else
4648 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4649 {
4650 si = gsi_last_bb (e->dest);
4651 if (gsi_end_p (si)
4652 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4653 {
4654 l2 = gimple_block_label (e->dest);
4655 break;
4656 }
4657 }
4658 }
4659 default_bb = create_empty_bb (l1_bb->prev_bb);
4660 l1 = gimple_block_label (l1_bb);
4661 }
4662 else
4663 {
4664 default_bb = create_empty_bb (l0_bb);
4665 l1 = NULL_TREE;
4666 l2 = gimple_block_label (default_bb);
4667 }
4668
4669 /* We will build a switch() with enough cases for all the
4670 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4671 and a default case to abort if something goes wrong. */
4672 len = EDGE_COUNT (l0_bb->succs);
4673
4674 /* Use VEC_quick_push on label_vec throughout, since we know the size
4675 in advance. */
4676 label_vec = VEC_alloc (tree, heap, len);
4677
4678 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4679 GIMPLE_OMP_SECTIONS statement. */
4680 si = gsi_last_bb (entry_bb);
4681 sections_stmt = gsi_stmt (si);
4682 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4683 vin = gimple_omp_sections_control (sections_stmt);
4684 if (!is_combined_parallel (region))
4685 {
4686 /* If we are not inside a combined parallel+sections region,
4687 call GOMP_sections_start. */
4688 t = build_int_cst (unsigned_type_node,
4689 exit_reachable ? len - 1 : len);
4690 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4691 stmt = gimple_build_call (u, 1, t);
4692 }
4693 else
4694 {
4695 /* Otherwise, call GOMP_sections_next. */
4696 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4697 stmt = gimple_build_call (u, 0);
4698 }
4699 gimple_call_set_lhs (stmt, vin);
4700 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4701 gsi_remove (&si, true);
4702
4703 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4704 L0_BB. */
4705 switch_si = gsi_last_bb (l0_bb);
4706 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4707 if (exit_reachable)
4708 {
4709 cont = last_stmt (l1_bb);
4710 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4711 vmain = gimple_omp_continue_control_use (cont);
4712 vnext = gimple_omp_continue_control_def (cont);
4713 }
4714 else
4715 {
4716 vmain = vin;
4717 vnext = NULL_TREE;
4718 }
4719
4720 i = 0;
4721 if (exit_reachable)
4722 {
4723 t = build3 (CASE_LABEL_EXPR, void_type_node,
4724 build_int_cst (unsigned_type_node, 0), NULL, l2);
4725 VEC_quick_push (tree, label_vec, t);
4726 i++;
4727 }
4728
4729 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4730 for (inner = region->inner, casei = 1;
4731 inner;
4732 inner = inner->next, i++, casei++)
4733 {
4734 basic_block s_entry_bb, s_exit_bb;
4735
4736 /* Skip optional reduction region. */
4737 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4738 {
4739 --i;
4740 --casei;
4741 continue;
4742 }
4743
4744 s_entry_bb = inner->entry;
4745 s_exit_bb = inner->exit;
4746
4747 t = gimple_block_label (s_entry_bb);
4748 u = build_int_cst (unsigned_type_node, casei);
4749 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4750 VEC_quick_push (tree, label_vec, u);
4751
4752 si = gsi_last_bb (s_entry_bb);
4753 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4754 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4755 gsi_remove (&si, true);
4756 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4757
4758 if (s_exit_bb == NULL)
4759 continue;
4760
4761 si = gsi_last_bb (s_exit_bb);
4762 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4763 gsi_remove (&si, true);
4764
4765 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4766 }
4767
4768 /* Error handling code goes in DEFAULT_BB. */
4769 t = gimple_block_label (default_bb);
4770 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4771 make_edge (l0_bb, default_bb, 0);
4772
4773 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4774 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4775 gsi_remove (&switch_si, true);
4776 VEC_free (tree, heap, label_vec);
4777
4778 si = gsi_start_bb (default_bb);
4779 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4780 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4781
4782 if (exit_reachable)
4783 {
4784 /* Code to get the next section goes in L1_BB. */
4785 si = gsi_last_bb (l1_bb);
4786 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4787
4788 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4789 gimple_call_set_lhs (stmt, vnext);
4790 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4791 gsi_remove (&si, true);
4792
4793 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4794
4795 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4796 si = gsi_last_bb (l2_bb);
4797 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4798 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4799 else
4800 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4801 stmt = gimple_build_call (t, 0);
4802 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4803 gsi_remove (&si, true);
4804 }
4805
4806 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4807 }
4808
4809
4810 /* Expand code for an OpenMP single directive. We've already expanded
4811 much of the code, here we simply place the GOMP_barrier call. */
4812
4813 static void
4814 expand_omp_single (struct omp_region *region)
4815 {
4816 basic_block entry_bb, exit_bb;
4817 gimple_stmt_iterator si;
4818 bool need_barrier = false;
4819
4820 entry_bb = region->entry;
4821 exit_bb = region->exit;
4822
4823 si = gsi_last_bb (entry_bb);
4824 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4825 be removed. We need to ensure that the thread that entered the single
4826 does not exit before the data is copied out by the other threads. */
4827 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4828 OMP_CLAUSE_COPYPRIVATE))
4829 need_barrier = true;
4830 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4831 gsi_remove (&si, true);
4832 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4833
4834 si = gsi_last_bb (exit_bb);
4835 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4836 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4837 false, GSI_SAME_STMT);
4838 gsi_remove (&si, true);
4839 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4840 }
4841
4842
4843 /* Generic expansion for OpenMP synchronization directives: master,
4844 ordered and critical. All we need to do here is remove the entry
4845 and exit markers for REGION. */
4846
4847 static void
4848 expand_omp_synch (struct omp_region *region)
4849 {
4850 basic_block entry_bb, exit_bb;
4851 gimple_stmt_iterator si;
4852
4853 entry_bb = region->entry;
4854 exit_bb = region->exit;
4855
4856 si = gsi_last_bb (entry_bb);
4857 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4858 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4859 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4860 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4861 gsi_remove (&si, true);
4862 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4863
4864 if (exit_bb)
4865 {
4866 si = gsi_last_bb (exit_bb);
4867 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4868 gsi_remove (&si, true);
4869 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4870 }
4871 }
4872
4873 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4874 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4875 size of the data type, and thus usable to find the index of the builtin
4876 decl. Returns false if the expression is not of the proper form. */
4877
4878 static bool
4879 expand_omp_atomic_fetch_op (basic_block load_bb,
4880 tree addr, tree loaded_val,
4881 tree stored_val, int index)
4882 {
4883 enum built_in_function base;
4884 tree decl, itype, call;
4885 enum insn_code *optab;
4886 tree rhs;
4887 basic_block store_bb = single_succ (load_bb);
4888 gimple_stmt_iterator gsi;
4889 gimple stmt;
4890
4891 /* We expect to find the following sequences:
4892
4893 load_bb:
4894 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4895
4896 store_bb:
4897 val = tmp OP something; (or: something OP tmp)
4898 GIMPLE_OMP_STORE (val)
4899
4900 ???FIXME: Allow a more flexible sequence.
4901 Perhaps use data flow to pick the statements.
4902
4903 */
4904
4905 gsi = gsi_after_labels (store_bb);
4906 stmt = gsi_stmt (gsi);
4907 if (!is_gimple_assign (stmt))
4908 return false;
4909 gsi_next (&gsi);
4910 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4911 return false;
4912
4913 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4914 return false;
4915
4916 /* Check for one of the supported fetch-op operations. */
4917 switch (gimple_assign_rhs_code (stmt))
4918 {
4919 case PLUS_EXPR:
4920 case POINTER_PLUS_EXPR:
4921 base = BUILT_IN_FETCH_AND_ADD_N;
4922 optab = sync_add_optab;
4923 break;
4924 case MINUS_EXPR:
4925 base = BUILT_IN_FETCH_AND_SUB_N;
4926 optab = sync_add_optab;
4927 break;
4928 case BIT_AND_EXPR:
4929 base = BUILT_IN_FETCH_AND_AND_N;
4930 optab = sync_and_optab;
4931 break;
4932 case BIT_IOR_EXPR:
4933 base = BUILT_IN_FETCH_AND_OR_N;
4934 optab = sync_ior_optab;
4935 break;
4936 case BIT_XOR_EXPR:
4937 base = BUILT_IN_FETCH_AND_XOR_N;
4938 optab = sync_xor_optab;
4939 break;
4940 default:
4941 return false;
4942 }
4943 /* Make sure the expression is of the proper form. */
4944 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4945 rhs = gimple_assign_rhs2 (stmt);
4946 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4947 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4948 rhs = gimple_assign_rhs1 (stmt);
4949 else
4950 return false;
4951
4952 decl = built_in_decls[base + index + 1];
4953 itype = TREE_TYPE (TREE_TYPE (decl));
4954
4955 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4956 return false;
4957
4958 gsi = gsi_last_bb (load_bb);
4959 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
4960 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
4961 call = fold_convert (void_type_node, call);
4962 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
4963 gsi_remove (&gsi, true);
4964
4965 gsi = gsi_last_bb (store_bb);
4966 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
4967 gsi_remove (&gsi, true);
4968 gsi = gsi_last_bb (store_bb);
4969 gsi_remove (&gsi, true);
4970
4971 if (gimple_in_ssa_p (cfun))
4972 update_ssa (TODO_update_ssa_no_phi);
4973
4974 return true;
4975 }
4976
4977 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
4978
4979 oldval = *addr;
4980 repeat:
4981 newval = rhs; // with oldval replacing *addr in rhs
4982 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
4983 if (oldval != newval)
4984 goto repeat;
4985
4986 INDEX is log2 of the size of the data type, and thus usable to find the
4987 index of the builtin decl. */
4988
4989 static bool
4990 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
4991 tree addr, tree loaded_val, tree stored_val,
4992 int index)
4993 {
4994 tree loadedi, storedi, initial, new_storedi, old_vali;
4995 tree type, itype, cmpxchg, iaddr;
4996 gimple_stmt_iterator si;
4997 basic_block loop_header = single_succ (load_bb);
4998 gimple phi, stmt;
4999 edge e;
5000
5001 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
5002 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5003 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5004
5005 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
5006 return false;
5007
5008 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5009 si = gsi_last_bb (load_bb);
5010 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5011
5012 /* For floating-point values, we'll need to view-convert them to integers
5013 so that we can perform the atomic compare and swap. Simplify the
5014 following code by always setting up the "i"ntegral variables. */
5015 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5016 {
5017 tree iaddr_val;
5018
5019 iaddr = create_tmp_var (build_pointer_type (itype), NULL);
5020 iaddr_val
5021 = force_gimple_operand_gsi (&si,
5022 fold_convert (TREE_TYPE (iaddr), addr),
5023 false, NULL_TREE, true, GSI_SAME_STMT);
5024 stmt = gimple_build_assign (iaddr, iaddr_val);
5025 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5026 DECL_POINTER_ALIAS_SET (iaddr) = 0;
5027 loadedi = create_tmp_var (itype, NULL);
5028 if (gimple_in_ssa_p (cfun))
5029 {
5030 add_referenced_var (iaddr);
5031 add_referenced_var (loadedi);
5032 loadedi = make_ssa_name (loadedi, NULL);
5033 }
5034 }
5035 else
5036 {
5037 iaddr = addr;
5038 loadedi = loaded_val;
5039 }
5040
5041 initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
5042 true, NULL_TREE, true, GSI_SAME_STMT);
5043
5044 /* Move the value to the LOADEDI temporary. */
5045 if (gimple_in_ssa_p (cfun))
5046 {
5047 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5048 phi = create_phi_node (loadedi, loop_header);
5049 SSA_NAME_DEF_STMT (loadedi) = phi;
5050 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5051 initial);
5052 }
5053 else
5054 gsi_insert_before (&si,
5055 gimple_build_assign (loadedi, initial),
5056 GSI_SAME_STMT);
5057 if (loadedi != loaded_val)
5058 {
5059 gimple_stmt_iterator gsi2;
5060 tree x;
5061
5062 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5063 gsi2 = gsi_start_bb (loop_header);
5064 if (gimple_in_ssa_p (cfun))
5065 {
5066 gimple stmt;
5067 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5068 true, GSI_SAME_STMT);
5069 stmt = gimple_build_assign (loaded_val, x);
5070 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5071 }
5072 else
5073 {
5074 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5075 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5076 true, GSI_SAME_STMT);
5077 }
5078 }
5079 gsi_remove (&si, true);
5080
5081 si = gsi_last_bb (store_bb);
5082 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5083
5084 if (iaddr == addr)
5085 storedi = stored_val;
5086 else
5087 storedi =
5088 force_gimple_operand_gsi (&si,
5089 build1 (VIEW_CONVERT_EXPR, itype,
5090 stored_val), true, NULL_TREE, true,
5091 GSI_SAME_STMT);
5092
5093 /* Build the compare&swap statement. */
5094 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5095 new_storedi = force_gimple_operand_gsi (&si,
5096 fold_convert (TREE_TYPE (loadedi),
5097 new_storedi),
5098 true, NULL_TREE,
5099 true, GSI_SAME_STMT);
5100
5101 if (gimple_in_ssa_p (cfun))
5102 old_vali = loadedi;
5103 else
5104 {
5105 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5106 if (gimple_in_ssa_p (cfun))
5107 add_referenced_var (old_vali);
5108 stmt = gimple_build_assign (old_vali, loadedi);
5109 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5110
5111 stmt = gimple_build_assign (loadedi, new_storedi);
5112 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5113 }
5114
5115 /* Note that we always perform the comparison as an integer, even for
5116 floating point. This allows the atomic operation to properly
5117 succeed even with NaNs and -0.0. */
5118 stmt = gimple_build_cond_empty
5119 (build2 (NE_EXPR, boolean_type_node,
5120 new_storedi, old_vali));
5121 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5122
5123 /* Update cfg. */
5124 e = single_succ_edge (store_bb);
5125 e->flags &= ~EDGE_FALLTHRU;
5126 e->flags |= EDGE_FALSE_VALUE;
5127
5128 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5129
5130 /* Copy the new value to loadedi (we already did that before the condition
5131 if we are not in SSA). */
5132 if (gimple_in_ssa_p (cfun))
5133 {
5134 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5135 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5136 }
5137
5138 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5139 gsi_remove (&si, true);
5140
5141 if (gimple_in_ssa_p (cfun))
5142 update_ssa (TODO_update_ssa_no_phi);
5143
5144 return true;
5145 }
5146
5147 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5148
5149 GOMP_atomic_start ();
5150 *addr = rhs;
5151 GOMP_atomic_end ();
5152
5153 The result is not globally atomic, but works so long as all parallel
5154 references are within #pragma omp atomic directives. According to
5155 responses received from omp@openmp.org, appears to be within spec.
5156 Which makes sense, since that's how several other compilers handle
5157 this situation as well.
5158 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5159 expanding. STORED_VAL is the operand of the matching
5160 GIMPLE_OMP_ATOMIC_STORE.
5161
5162 We replace
5163 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5164 loaded_val = *addr;
5165
5166 and replace
5167 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5168 *addr = stored_val;
5169 */
5170
5171 static bool
5172 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5173 tree addr, tree loaded_val, tree stored_val)
5174 {
5175 gimple_stmt_iterator si;
5176 gimple stmt;
5177 tree t;
5178
5179 si = gsi_last_bb (load_bb);
5180 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5181
5182 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5183 t = build_function_call_expr (t, 0);
5184 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5185
5186 stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
5187 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5188 gsi_remove (&si, true);
5189
5190 si = gsi_last_bb (store_bb);
5191 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5192
5193 stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
5194 stored_val);
5195 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5196
5197 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5198 t = build_function_call_expr (t, 0);
5199 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5200 gsi_remove (&si, true);
5201
5202 if (gimple_in_ssa_p (cfun))
5203 update_ssa (TODO_update_ssa_no_phi);
5204 return true;
5205 }
5206
5207 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5208 using expand_omp_atomic_fetch_op. If it failed, we try to
5209 call expand_omp_atomic_pipeline, and if it fails too, the
5210 ultimate fallback is wrapping the operation in a mutex
5211 (expand_omp_atomic_mutex). REGION is the atomic region built
5212 by build_omp_regions_1(). */
5213
5214 static void
5215 expand_omp_atomic (struct omp_region *region)
5216 {
5217 basic_block load_bb = region->entry, store_bb = region->exit;
5218 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5219 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5220 tree addr = gimple_omp_atomic_load_rhs (load);
5221 tree stored_val = gimple_omp_atomic_store_val (store);
5222 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5223 HOST_WIDE_INT index;
5224
5225 /* Make sure the type is one of the supported sizes. */
5226 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5227 index = exact_log2 (index);
5228 if (index >= 0 && index <= 4)
5229 {
5230 unsigned int align = TYPE_ALIGN_UNIT (type);
5231
5232 /* __sync builtins require strict data alignment. */
5233 if (exact_log2 (align) >= index)
5234 {
5235 /* When possible, use specialized atomic update functions. */
5236 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5237 && store_bb == single_succ (load_bb))
5238 {
5239 if (expand_omp_atomic_fetch_op (load_bb, addr,
5240 loaded_val, stored_val, index))
5241 return;
5242 }
5243
5244 /* If we don't have specialized __sync builtins, try and implement
5245 as a compare and swap loop. */
5246 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5247 loaded_val, stored_val, index))
5248 return;
5249 }
5250 }
5251
5252 /* The ultimate fallback is wrapping the operation in a mutex. */
5253 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5254 }
5255
5256
5257 /* Expand the parallel region tree rooted at REGION. Expansion
5258 proceeds in depth-first order. Innermost regions are expanded
5259 first. This way, parallel regions that require a new function to
5260 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5261 internal dependencies in their body. */
5262
5263 static void
5264 expand_omp (struct omp_region *region)
5265 {
5266 while (region)
5267 {
5268 location_t saved_location;
5269
5270 /* First, determine whether this is a combined parallel+workshare
5271 region. */
5272 if (region->type == GIMPLE_OMP_PARALLEL)
5273 determine_parallel_type (region);
5274
5275 if (region->inner)
5276 expand_omp (region->inner);
5277
5278 saved_location = input_location;
5279 if (gimple_has_location (last_stmt (region->entry)))
5280 input_location = gimple_location (last_stmt (region->entry));
5281
5282 switch (region->type)
5283 {
5284 case GIMPLE_OMP_PARALLEL:
5285 case GIMPLE_OMP_TASK:
5286 expand_omp_taskreg (region);
5287 break;
5288
5289 case GIMPLE_OMP_FOR:
5290 expand_omp_for (region);
5291 break;
5292
5293 case GIMPLE_OMP_SECTIONS:
5294 expand_omp_sections (region);
5295 break;
5296
5297 case GIMPLE_OMP_SECTION:
5298 /* Individual omp sections are handled together with their
5299 parent GIMPLE_OMP_SECTIONS region. */
5300 break;
5301
5302 case GIMPLE_OMP_SINGLE:
5303 expand_omp_single (region);
5304 break;
5305
5306 case GIMPLE_OMP_MASTER:
5307 case GIMPLE_OMP_ORDERED:
5308 case GIMPLE_OMP_CRITICAL:
5309 expand_omp_synch (region);
5310 break;
5311
5312 case GIMPLE_OMP_ATOMIC_LOAD:
5313 expand_omp_atomic (region);
5314 break;
5315
5316 default:
5317 gcc_unreachable ();
5318 }
5319
5320 input_location = saved_location;
5321 region = region->next;
5322 }
5323 }
5324
5325
5326 /* Helper for build_omp_regions. Scan the dominator tree starting at
5327 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5328 true, the function ends once a single tree is built (otherwise, whole
5329 forest of OMP constructs may be built). */
5330
5331 static void
5332 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5333 bool single_tree)
5334 {
5335 gimple_stmt_iterator gsi;
5336 gimple stmt;
5337 basic_block son;
5338
5339 gsi = gsi_last_bb (bb);
5340 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5341 {
5342 struct omp_region *region;
5343 enum gimple_code code;
5344
5345 stmt = gsi_stmt (gsi);
5346 code = gimple_code (stmt);
5347 if (code == GIMPLE_OMP_RETURN)
5348 {
5349 /* STMT is the return point out of region PARENT. Mark it
5350 as the exit point and make PARENT the immediately
5351 enclosing region. */
5352 gcc_assert (parent);
5353 region = parent;
5354 region->exit = bb;
5355 parent = parent->outer;
5356 }
5357 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5358 {
5359 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5360 GIMPLE_OMP_RETURN, but matches with
5361 GIMPLE_OMP_ATOMIC_LOAD. */
5362 gcc_assert (parent);
5363 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5364 region = parent;
5365 region->exit = bb;
5366 parent = parent->outer;
5367 }
5368
5369 else if (code == GIMPLE_OMP_CONTINUE)
5370 {
5371 gcc_assert (parent);
5372 parent->cont = bb;
5373 }
5374 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5375 {
5376 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5377 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5378 ;
5379 }
5380 else
5381 {
5382 /* Otherwise, this directive becomes the parent for a new
5383 region. */
5384 region = new_omp_region (bb, code, parent);
5385 parent = region;
5386 }
5387 }
5388
5389 if (single_tree && !parent)
5390 return;
5391
5392 for (son = first_dom_son (CDI_DOMINATORS, bb);
5393 son;
5394 son = next_dom_son (CDI_DOMINATORS, son))
5395 build_omp_regions_1 (son, parent, single_tree);
5396 }
5397
5398 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5399 root_omp_region. */
5400
5401 static void
5402 build_omp_regions_root (basic_block root)
5403 {
5404 gcc_assert (root_omp_region == NULL);
5405 build_omp_regions_1 (root, NULL, true);
5406 gcc_assert (root_omp_region != NULL);
5407 }
5408
5409 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5410
5411 void
5412 omp_expand_local (basic_block head)
5413 {
5414 build_omp_regions_root (head);
5415 if (dump_file && (dump_flags & TDF_DETAILS))
5416 {
5417 fprintf (dump_file, "\nOMP region tree\n\n");
5418 dump_omp_region (dump_file, root_omp_region, 0);
5419 fprintf (dump_file, "\n");
5420 }
5421
5422 remove_exit_barriers (root_omp_region);
5423 expand_omp (root_omp_region);
5424
5425 free_omp_regions ();
5426 }
5427
5428 /* Scan the CFG and build a tree of OMP regions. Return the root of
5429 the OMP region tree. */
5430
5431 static void
5432 build_omp_regions (void)
5433 {
5434 gcc_assert (root_omp_region == NULL);
5435 calculate_dominance_info (CDI_DOMINATORS);
5436 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5437 }
5438
5439 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5440
5441 static unsigned int
5442 execute_expand_omp (void)
5443 {
5444 build_omp_regions ();
5445
5446 if (!root_omp_region)
5447 return 0;
5448
5449 if (dump_file)
5450 {
5451 fprintf (dump_file, "\nOMP region tree\n\n");
5452 dump_omp_region (dump_file, root_omp_region, 0);
5453 fprintf (dump_file, "\n");
5454 }
5455
5456 remove_exit_barriers (root_omp_region);
5457
5458 expand_omp (root_omp_region);
5459
5460 cleanup_tree_cfg ();
5461
5462 free_omp_regions ();
5463
5464 return 0;
5465 }
5466
5467 /* OMP expansion -- the default pass, run before creation of SSA form. */
5468
5469 static bool
5470 gate_expand_omp (void)
5471 {
5472 return (flag_openmp != 0 && errorcount == 0);
5473 }
5474
5475 struct gimple_opt_pass pass_expand_omp =
5476 {
5477 {
5478 GIMPLE_PASS,
5479 "ompexp", /* name */
5480 gate_expand_omp, /* gate */
5481 execute_expand_omp, /* execute */
5482 NULL, /* sub */
5483 NULL, /* next */
5484 0, /* static_pass_number */
5485 TV_NONE, /* tv_id */
5486 PROP_gimple_any, /* properties_required */
5487 0, /* properties_provided */
5488 0, /* properties_destroyed */
5489 0, /* todo_flags_start */
5490 TODO_dump_func /* todo_flags_finish */
5491 }
5492 };
5493 \f
5494 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5495
5496 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5497 CTX is the enclosing OMP context for the current statement. */
5498
5499 static void
5500 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5501 {
5502 tree block, control;
5503 gimple_stmt_iterator tgsi;
5504 unsigned i, len;
5505 gimple stmt, new_stmt, bind, t;
5506 gimple_seq ilist, dlist, olist, new_body, body;
5507 struct gimplify_ctx gctx;
5508
5509 stmt = gsi_stmt (*gsi_p);
5510
5511 push_gimplify_context (&gctx);
5512
5513 dlist = NULL;
5514 ilist = NULL;
5515 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5516 &ilist, &dlist, ctx);
5517
5518 tgsi = gsi_start (gimple_omp_body (stmt));
5519 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5520 continue;
5521
5522 tgsi = gsi_start (gimple_omp_body (stmt));
5523 body = NULL;
5524 for (i = 0; i < len; i++, gsi_next (&tgsi))
5525 {
5526 omp_context *sctx;
5527 gimple sec_start;
5528
5529 sec_start = gsi_stmt (tgsi);
5530 sctx = maybe_lookup_ctx (sec_start);
5531 gcc_assert (sctx);
5532
5533 gimple_seq_add_stmt (&body, sec_start);
5534
5535 lower_omp (gimple_omp_body (sec_start), sctx);
5536 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5537 gimple_omp_set_body (sec_start, NULL);
5538
5539 if (i == len - 1)
5540 {
5541 gimple_seq l = NULL;
5542 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5543 &l, ctx);
5544 gimple_seq_add_seq (&body, l);
5545 gimple_omp_section_set_last (sec_start);
5546 }
5547
5548 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5549 }
5550
5551 block = make_node (BLOCK);
5552 bind = gimple_build_bind (NULL, body, block);
5553
5554 olist = NULL;
5555 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5556
5557 block = make_node (BLOCK);
5558 new_stmt = gimple_build_bind (NULL, NULL, block);
5559
5560 pop_gimplify_context (new_stmt);
5561 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5562 BLOCK_VARS (block) = gimple_bind_vars (bind);
5563 if (BLOCK_VARS (block))
5564 TREE_USED (block) = 1;
5565
5566 new_body = NULL;
5567 gimple_seq_add_seq (&new_body, ilist);
5568 gimple_seq_add_stmt (&new_body, stmt);
5569 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5570 gimple_seq_add_stmt (&new_body, bind);
5571
5572 control = create_tmp_var (unsigned_type_node, ".section");
5573 t = gimple_build_omp_continue (control, control);
5574 gimple_omp_sections_set_control (stmt, control);
5575 gimple_seq_add_stmt (&new_body, t);
5576
5577 gimple_seq_add_seq (&new_body, olist);
5578 gimple_seq_add_seq (&new_body, dlist);
5579
5580 new_body = maybe_catch_exception (new_body);
5581
5582 t = gimple_build_omp_return
5583 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5584 OMP_CLAUSE_NOWAIT));
5585 gimple_seq_add_stmt (&new_body, t);
5586
5587 gimple_bind_set_body (new_stmt, new_body);
5588 gimple_omp_set_body (stmt, NULL);
5589
5590 gsi_replace (gsi_p, new_stmt, true);
5591 }
5592
5593
5594 /* A subroutine of lower_omp_single. Expand the simple form of
5595 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5596
5597 if (GOMP_single_start ())
5598 BODY;
5599 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5600
5601 FIXME. It may be better to delay expanding the logic of this until
5602 pass_expand_omp. The expanded logic may make the job more difficult
5603 to a synchronization analysis pass. */
5604
5605 static void
5606 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5607 {
5608 location_t loc = gimple_location (single_stmt);
5609 tree tlabel = create_artificial_label (loc);
5610 tree flabel = create_artificial_label (loc);
5611 gimple call, cond;
5612 tree lhs, decl;
5613
5614 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5615 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5616 call = gimple_build_call (decl, 0);
5617 gimple_call_set_lhs (call, lhs);
5618 gimple_seq_add_stmt (pre_p, call);
5619
5620 cond = gimple_build_cond (EQ_EXPR, lhs,
5621 fold_convert (TREE_TYPE (lhs), boolean_true_node),
5622 tlabel, flabel);
5623 gimple_seq_add_stmt (pre_p, cond);
5624 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5625 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5626 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5627 }
5628
5629
5630 /* A subroutine of lower_omp_single. Expand the simple form of
5631 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5632
5633 #pragma omp single copyprivate (a, b, c)
5634
5635 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5636
5637 {
5638 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5639 {
5640 BODY;
5641 copyout.a = a;
5642 copyout.b = b;
5643 copyout.c = c;
5644 GOMP_single_copy_end (&copyout);
5645 }
5646 else
5647 {
5648 a = copyout_p->a;
5649 b = copyout_p->b;
5650 c = copyout_p->c;
5651 }
5652 GOMP_barrier ();
5653 }
5654
5655 FIXME. It may be better to delay expanding the logic of this until
5656 pass_expand_omp. The expanded logic may make the job more difficult
5657 to a synchronization analysis pass. */
5658
5659 static void
5660 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5661 {
5662 tree ptr_type, t, l0, l1, l2;
5663 gimple_seq copyin_seq;
5664 location_t loc = gimple_location (single_stmt);
5665
5666 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5667
5668 ptr_type = build_pointer_type (ctx->record_type);
5669 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5670
5671 l0 = create_artificial_label (loc);
5672 l1 = create_artificial_label (loc);
5673 l2 = create_artificial_label (loc);
5674
5675 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5676 t = fold_convert (ptr_type, t);
5677 gimplify_assign (ctx->receiver_decl, t, pre_p);
5678
5679 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5680 build_int_cst (ptr_type, 0));
5681 t = build3 (COND_EXPR, void_type_node, t,
5682 build_and_jump (&l0), build_and_jump (&l1));
5683 gimplify_and_add (t, pre_p);
5684
5685 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5686
5687 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5688
5689 copyin_seq = NULL;
5690 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5691 &copyin_seq, ctx);
5692
5693 t = build_fold_addr_expr (ctx->sender_decl);
5694 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
5695 gimplify_and_add (t, pre_p);
5696
5697 t = build_and_jump (&l2);
5698 gimplify_and_add (t, pre_p);
5699
5700 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5701
5702 gimple_seq_add_seq (pre_p, copyin_seq);
5703
5704 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5705 }
5706
5707
5708 /* Expand code for an OpenMP single directive. */
5709
5710 static void
5711 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5712 {
5713 tree block;
5714 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5715 gimple_seq bind_body, dlist;
5716 struct gimplify_ctx gctx;
5717
5718 push_gimplify_context (&gctx);
5719
5720 bind_body = NULL;
5721 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5722 &bind_body, &dlist, ctx);
5723 lower_omp (gimple_omp_body (single_stmt), ctx);
5724
5725 gimple_seq_add_stmt (&bind_body, single_stmt);
5726
5727 if (ctx->record_type)
5728 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5729 else
5730 lower_omp_single_simple (single_stmt, &bind_body);
5731
5732 gimple_omp_set_body (single_stmt, NULL);
5733
5734 gimple_seq_add_seq (&bind_body, dlist);
5735
5736 bind_body = maybe_catch_exception (bind_body);
5737
5738 t = gimple_build_omp_return
5739 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5740 OMP_CLAUSE_NOWAIT));
5741 gimple_seq_add_stmt (&bind_body, t);
5742
5743 block = make_node (BLOCK);
5744 bind = gimple_build_bind (NULL, bind_body, block);
5745
5746 pop_gimplify_context (bind);
5747
5748 gimple_bind_append_vars (bind, ctx->block_vars);
5749 BLOCK_VARS (block) = ctx->block_vars;
5750 gsi_replace (gsi_p, bind, true);
5751 if (BLOCK_VARS (block))
5752 TREE_USED (block) = 1;
5753 }
5754
5755
5756 /* Expand code for an OpenMP master directive. */
5757
5758 static void
5759 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5760 {
5761 tree block, lab = NULL, x;
5762 gimple stmt = gsi_stmt (*gsi_p), bind;
5763 gimple_seq tseq;
5764 struct gimplify_ctx gctx;
5765
5766 push_gimplify_context (&gctx);
5767
5768 block = make_node (BLOCK);
5769 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5770 block);
5771
5772 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5773 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5774 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5775 tseq = NULL;
5776 gimplify_and_add (x, &tseq);
5777 gimple_bind_add_seq (bind, tseq);
5778
5779 lower_omp (gimple_omp_body (stmt), ctx);
5780 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5781 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5782 gimple_omp_set_body (stmt, NULL);
5783
5784 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5785
5786 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5787
5788 pop_gimplify_context (bind);
5789
5790 gimple_bind_append_vars (bind, ctx->block_vars);
5791 BLOCK_VARS (block) = ctx->block_vars;
5792 gsi_replace (gsi_p, bind, true);
5793 }
5794
5795
5796 /* Expand code for an OpenMP ordered directive. */
5797
5798 static void
5799 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5800 {
5801 tree block;
5802 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5803 struct gimplify_ctx gctx;
5804
5805 push_gimplify_context (&gctx);
5806
5807 block = make_node (BLOCK);
5808 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5809 block);
5810
5811 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5812 gimple_bind_add_stmt (bind, x);
5813
5814 lower_omp (gimple_omp_body (stmt), ctx);
5815 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5816 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5817 gimple_omp_set_body (stmt, NULL);
5818
5819 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5820 gimple_bind_add_stmt (bind, x);
5821
5822 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5823
5824 pop_gimplify_context (bind);
5825
5826 gimple_bind_append_vars (bind, ctx->block_vars);
5827 BLOCK_VARS (block) = gimple_bind_vars (bind);
5828 gsi_replace (gsi_p, bind, true);
5829 }
5830
5831
5832 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5833 substitution of a couple of function calls. But in the NAMED case,
5834 requires that languages coordinate a symbol name. It is therefore
5835 best put here in common code. */
5836
5837 static GTY((param1_is (tree), param2_is (tree)))
5838 splay_tree critical_name_mutexes;
5839
5840 static void
5841 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5842 {
5843 tree block;
5844 tree name, lock, unlock;
5845 gimple stmt = gsi_stmt (*gsi_p), bind;
5846 gimple_seq tbody;
5847 struct gimplify_ctx gctx;
5848
5849 name = gimple_omp_critical_name (stmt);
5850 if (name)
5851 {
5852 tree decl;
5853 splay_tree_node n;
5854
5855 if (!critical_name_mutexes)
5856 critical_name_mutexes
5857 = splay_tree_new_ggc (splay_tree_compare_pointers);
5858
5859 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5860 if (n == NULL)
5861 {
5862 char *new_str;
5863
5864 decl = create_tmp_var_raw (ptr_type_node, NULL);
5865
5866 new_str = ACONCAT ((".gomp_critical_user_",
5867 IDENTIFIER_POINTER (name), NULL));
5868 DECL_NAME (decl) = get_identifier (new_str);
5869 TREE_PUBLIC (decl) = 1;
5870 TREE_STATIC (decl) = 1;
5871 DECL_COMMON (decl) = 1;
5872 DECL_ARTIFICIAL (decl) = 1;
5873 DECL_IGNORED_P (decl) = 1;
5874 varpool_finalize_decl (decl);
5875
5876 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5877 (splay_tree_value) decl);
5878 }
5879 else
5880 decl = (tree) n->value;
5881
5882 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5883 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
5884
5885 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5886 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
5887 }
5888 else
5889 {
5890 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5891 lock = build_call_expr (lock, 0);
5892
5893 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5894 unlock = build_call_expr (unlock, 0);
5895 }
5896
5897 push_gimplify_context (&gctx);
5898
5899 block = make_node (BLOCK);
5900 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5901
5902 tbody = gimple_bind_body (bind);
5903 gimplify_and_add (lock, &tbody);
5904 gimple_bind_set_body (bind, tbody);
5905
5906 lower_omp (gimple_omp_body (stmt), ctx);
5907 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5908 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5909 gimple_omp_set_body (stmt, NULL);
5910
5911 tbody = gimple_bind_body (bind);
5912 gimplify_and_add (unlock, &tbody);
5913 gimple_bind_set_body (bind, tbody);
5914
5915 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5916
5917 pop_gimplify_context (bind);
5918 gimple_bind_append_vars (bind, ctx->block_vars);
5919 BLOCK_VARS (block) = gimple_bind_vars (bind);
5920 gsi_replace (gsi_p, bind, true);
5921 }
5922
5923
5924 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5925 for a lastprivate clause. Given a loop control predicate of (V
5926 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5927 is appended to *DLIST, iterator initialization is appended to
5928 *BODY_P. */
5929
5930 static void
5931 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5932 gimple_seq *dlist, struct omp_context *ctx)
5933 {
5934 tree clauses, cond, vinit;
5935 enum tree_code cond_code;
5936 gimple_seq stmts;
5937
5938 cond_code = fd->loop.cond_code;
5939 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5940
5941 /* When possible, use a strict equality expression. This can let VRP
5942 type optimizations deduce the value and remove a copy. */
5943 if (host_integerp (fd->loop.step, 0))
5944 {
5945 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
5946 if (step == 1 || step == -1)
5947 cond_code = EQ_EXPR;
5948 }
5949
5950 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
5951
5952 clauses = gimple_omp_for_clauses (fd->for_stmt);
5953 stmts = NULL;
5954 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
5955 if (!gimple_seq_empty_p (stmts))
5956 {
5957 gimple_seq_add_seq (&stmts, *dlist);
5958 *dlist = stmts;
5959
5960 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
5961 vinit = fd->loop.n1;
5962 if (cond_code == EQ_EXPR
5963 && host_integerp (fd->loop.n2, 0)
5964 && ! integer_zerop (fd->loop.n2))
5965 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
5966
5967 /* Initialize the iterator variable, so that threads that don't execute
5968 any iterations don't execute the lastprivate clauses by accident. */
5969 gimplify_assign (fd->loop.v, vinit, body_p);
5970 }
5971 }
5972
5973
5974 /* Lower code for an OpenMP loop directive. */
5975
5976 static void
5977 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5978 {
5979 tree *rhs_p, block;
5980 struct omp_for_data fd;
5981 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
5982 gimple_seq omp_for_body, body, dlist, ilist;
5983 size_t i;
5984 struct gimplify_ctx gctx;
5985
5986 push_gimplify_context (&gctx);
5987
5988 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
5989 lower_omp (gimple_omp_body (stmt), ctx);
5990
5991 block = make_node (BLOCK);
5992 new_stmt = gimple_build_bind (NULL, NULL, block);
5993
5994 /* Move declaration of temporaries in the loop body before we make
5995 it go away. */
5996 omp_for_body = gimple_omp_body (stmt);
5997 if (!gimple_seq_empty_p (omp_for_body)
5998 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
5999 {
6000 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6001 gimple_bind_append_vars (new_stmt, vars);
6002 }
6003
6004 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6005 ilist = NULL;
6006 dlist = NULL;
6007 body = NULL;
6008 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6009 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6010
6011 /* Lower the header expressions. At this point, we can assume that
6012 the header is of the form:
6013
6014 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6015
6016 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6017 using the .omp_data_s mapping, if needed. */
6018 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6019 {
6020 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6021 if (!is_gimple_min_invariant (*rhs_p))
6022 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6023
6024 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6025 if (!is_gimple_min_invariant (*rhs_p))
6026 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6027
6028 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6029 if (!is_gimple_min_invariant (*rhs_p))
6030 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6031 }
6032
6033 /* Once lowered, extract the bounds and clauses. */
6034 extract_omp_for_data (stmt, &fd, NULL);
6035
6036 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6037
6038 gimple_seq_add_stmt (&body, stmt);
6039 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6040
6041 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6042 fd.loop.v));
6043
6044 /* After the loop, add exit clauses. */
6045 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6046 gimple_seq_add_seq (&body, dlist);
6047
6048 body = maybe_catch_exception (body);
6049
6050 /* Region exit marker goes at the end of the loop body. */
6051 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6052
6053 pop_gimplify_context (new_stmt);
6054
6055 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6056 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6057 if (BLOCK_VARS (block))
6058 TREE_USED (block) = 1;
6059
6060 gimple_bind_set_body (new_stmt, body);
6061 gimple_omp_set_body (stmt, NULL);
6062 gimple_omp_for_set_pre_body (stmt, NULL);
6063 gsi_replace (gsi_p, new_stmt, true);
6064 }
6065
6066 /* Callback for walk_stmts. Check if the current statement only contains
6067 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6068
6069 static tree
6070 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6071 bool *handled_ops_p,
6072 struct walk_stmt_info *wi)
6073 {
6074 int *info = (int *) wi->info;
6075 gimple stmt = gsi_stmt (*gsi_p);
6076
6077 *handled_ops_p = true;
6078 switch (gimple_code (stmt))
6079 {
6080 WALK_SUBSTMTS;
6081
6082 case GIMPLE_OMP_FOR:
6083 case GIMPLE_OMP_SECTIONS:
6084 *info = *info == 0 ? 1 : -1;
6085 break;
6086 default:
6087 *info = -1;
6088 break;
6089 }
6090 return NULL;
6091 }
6092
6093 struct omp_taskcopy_context
6094 {
6095 /* This field must be at the beginning, as we do "inheritance": Some
6096 callback functions for tree-inline.c (e.g., omp_copy_decl)
6097 receive a copy_body_data pointer that is up-casted to an
6098 omp_context pointer. */
6099 copy_body_data cb;
6100 omp_context *ctx;
6101 };
6102
6103 static tree
6104 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6105 {
6106 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6107
6108 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6109 return create_tmp_var (TREE_TYPE (var), NULL);
6110
6111 return var;
6112 }
6113
6114 static tree
6115 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6116 {
6117 tree name, new_fields = NULL, type, f;
6118
6119 type = lang_hooks.types.make_type (RECORD_TYPE);
6120 name = DECL_NAME (TYPE_NAME (orig_type));
6121 name = build_decl (gimple_location (tcctx->ctx->stmt),
6122 TYPE_DECL, name, type);
6123 TYPE_NAME (type) = name;
6124
6125 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6126 {
6127 tree new_f = copy_node (f);
6128 DECL_CONTEXT (new_f) = type;
6129 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6130 TREE_CHAIN (new_f) = new_fields;
6131 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6132 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6133 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6134 &tcctx->cb, NULL);
6135 new_fields = new_f;
6136 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6137 }
6138 TYPE_FIELDS (type) = nreverse (new_fields);
6139 layout_type (type);
6140 return type;
6141 }
6142
6143 /* Create task copyfn. */
6144
6145 static void
6146 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6147 {
6148 struct function *child_cfun;
6149 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6150 tree record_type, srecord_type, bind, list;
6151 bool record_needs_remap = false, srecord_needs_remap = false;
6152 splay_tree_node n;
6153 struct omp_taskcopy_context tcctx;
6154 struct gimplify_ctx gctx;
6155
6156 child_fn = gimple_omp_task_copy_fn (task_stmt);
6157 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6158 gcc_assert (child_cfun->cfg == NULL);
6159 child_cfun->dont_save_pending_sizes_p = 1;
6160 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6161
6162 /* Reset DECL_CONTEXT on function arguments. */
6163 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6164 DECL_CONTEXT (t) = child_fn;
6165
6166 /* Populate the function. */
6167 push_gimplify_context (&gctx);
6168 current_function_decl = child_fn;
6169
6170 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6171 TREE_SIDE_EFFECTS (bind) = 1;
6172 list = NULL;
6173 DECL_SAVED_TREE (child_fn) = bind;
6174 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6175
6176 /* Remap src and dst argument types if needed. */
6177 record_type = ctx->record_type;
6178 srecord_type = ctx->srecord_type;
6179 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6180 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6181 {
6182 record_needs_remap = true;
6183 break;
6184 }
6185 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6186 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6187 {
6188 srecord_needs_remap = true;
6189 break;
6190 }
6191
6192 if (record_needs_remap || srecord_needs_remap)
6193 {
6194 memset (&tcctx, '\0', sizeof (tcctx));
6195 tcctx.cb.src_fn = ctx->cb.src_fn;
6196 tcctx.cb.dst_fn = child_fn;
6197 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6198 tcctx.cb.dst_node = tcctx.cb.src_node;
6199 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6200 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6201 tcctx.cb.eh_region = -1;
6202 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6203 tcctx.cb.decl_map = pointer_map_create ();
6204 tcctx.ctx = ctx;
6205
6206 if (record_needs_remap)
6207 record_type = task_copyfn_remap_type (&tcctx, record_type);
6208 if (srecord_needs_remap)
6209 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6210 }
6211 else
6212 tcctx.cb.decl_map = NULL;
6213
6214 push_cfun (child_cfun);
6215
6216 arg = DECL_ARGUMENTS (child_fn);
6217 TREE_TYPE (arg) = build_pointer_type (record_type);
6218 sarg = TREE_CHAIN (arg);
6219 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6220
6221 /* First pass: initialize temporaries used in record_type and srecord_type
6222 sizes and field offsets. */
6223 if (tcctx.cb.decl_map)
6224 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6225 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6226 {
6227 tree *p;
6228
6229 decl = OMP_CLAUSE_DECL (c);
6230 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6231 if (p == NULL)
6232 continue;
6233 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6234 sf = (tree) n->value;
6235 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6236 src = build_fold_indirect_ref (sarg);
6237 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6238 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6239 append_to_statement_list (t, &list);
6240 }
6241
6242 /* Second pass: copy shared var pointers and copy construct non-VLA
6243 firstprivate vars. */
6244 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6245 switch (OMP_CLAUSE_CODE (c))
6246 {
6247 case OMP_CLAUSE_SHARED:
6248 decl = OMP_CLAUSE_DECL (c);
6249 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6250 if (n == NULL)
6251 break;
6252 f = (tree) n->value;
6253 if (tcctx.cb.decl_map)
6254 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6255 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6256 sf = (tree) n->value;
6257 if (tcctx.cb.decl_map)
6258 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6259 src = build_fold_indirect_ref (sarg);
6260 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6261 dst = build_fold_indirect_ref (arg);
6262 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6263 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6264 append_to_statement_list (t, &list);
6265 break;
6266 case OMP_CLAUSE_FIRSTPRIVATE:
6267 decl = OMP_CLAUSE_DECL (c);
6268 if (is_variable_sized (decl))
6269 break;
6270 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6271 if (n == NULL)
6272 break;
6273 f = (tree) n->value;
6274 if (tcctx.cb.decl_map)
6275 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6276 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6277 if (n != NULL)
6278 {
6279 sf = (tree) n->value;
6280 if (tcctx.cb.decl_map)
6281 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6282 src = build_fold_indirect_ref (sarg);
6283 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6284 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6285 src = build_fold_indirect_ref (src);
6286 }
6287 else
6288 src = decl;
6289 dst = build_fold_indirect_ref (arg);
6290 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6291 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6292 append_to_statement_list (t, &list);
6293 break;
6294 case OMP_CLAUSE_PRIVATE:
6295 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6296 break;
6297 decl = OMP_CLAUSE_DECL (c);
6298 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6299 f = (tree) n->value;
6300 if (tcctx.cb.decl_map)
6301 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6302 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6303 if (n != NULL)
6304 {
6305 sf = (tree) n->value;
6306 if (tcctx.cb.decl_map)
6307 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6308 src = build_fold_indirect_ref (sarg);
6309 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6310 if (use_pointer_for_field (decl, NULL))
6311 src = build_fold_indirect_ref (src);
6312 }
6313 else
6314 src = decl;
6315 dst = build_fold_indirect_ref (arg);
6316 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6317 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6318 append_to_statement_list (t, &list);
6319 break;
6320 default:
6321 break;
6322 }
6323
6324 /* Last pass: handle VLA firstprivates. */
6325 if (tcctx.cb.decl_map)
6326 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6327 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6328 {
6329 tree ind, ptr, df;
6330
6331 decl = OMP_CLAUSE_DECL (c);
6332 if (!is_variable_sized (decl))
6333 continue;
6334 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6335 if (n == NULL)
6336 continue;
6337 f = (tree) n->value;
6338 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6339 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6340 ind = DECL_VALUE_EXPR (decl);
6341 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6342 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6343 n = splay_tree_lookup (ctx->sfield_map,
6344 (splay_tree_key) TREE_OPERAND (ind, 0));
6345 sf = (tree) n->value;
6346 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6347 src = build_fold_indirect_ref (sarg);
6348 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6349 src = build_fold_indirect_ref (src);
6350 dst = build_fold_indirect_ref (arg);
6351 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6352 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6353 append_to_statement_list (t, &list);
6354 n = splay_tree_lookup (ctx->field_map,
6355 (splay_tree_key) TREE_OPERAND (ind, 0));
6356 df = (tree) n->value;
6357 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6358 ptr = build_fold_indirect_ref (arg);
6359 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6360 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6361 build_fold_addr_expr (dst));
6362 append_to_statement_list (t, &list);
6363 }
6364
6365 t = build1 (RETURN_EXPR, void_type_node, NULL);
6366 append_to_statement_list (t, &list);
6367
6368 if (tcctx.cb.decl_map)
6369 pointer_map_destroy (tcctx.cb.decl_map);
6370 pop_gimplify_context (NULL);
6371 BIND_EXPR_BODY (bind) = list;
6372 pop_cfun ();
6373 current_function_decl = ctx->cb.src_fn;
6374 }
6375
6376 /* Lower the OpenMP parallel or task directive in the current statement
6377 in GSI_P. CTX holds context information for the directive. */
6378
6379 static void
6380 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6381 {
6382 tree clauses;
6383 tree child_fn, t;
6384 gimple stmt = gsi_stmt (*gsi_p);
6385 gimple par_bind, bind;
6386 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6387 struct gimplify_ctx gctx;
6388
6389 clauses = gimple_omp_taskreg_clauses (stmt);
6390 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6391 par_body = gimple_bind_body (par_bind);
6392 child_fn = ctx->cb.dst_fn;
6393 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6394 && !gimple_omp_parallel_combined_p (stmt))
6395 {
6396 struct walk_stmt_info wi;
6397 int ws_num = 0;
6398
6399 memset (&wi, 0, sizeof (wi));
6400 wi.info = &ws_num;
6401 wi.val_only = true;
6402 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6403 if (ws_num == 1)
6404 gimple_omp_parallel_set_combined_p (stmt, true);
6405 }
6406 if (ctx->srecord_type)
6407 create_task_copyfn (stmt, ctx);
6408
6409 push_gimplify_context (&gctx);
6410
6411 par_olist = NULL;
6412 par_ilist = NULL;
6413 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6414 lower_omp (par_body, ctx);
6415 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6416 lower_reduction_clauses (clauses, &par_olist, ctx);
6417
6418 /* Declare all the variables created by mapping and the variables
6419 declared in the scope of the parallel body. */
6420 record_vars_into (ctx->block_vars, child_fn);
6421 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6422
6423 if (ctx->record_type)
6424 {
6425 ctx->sender_decl
6426 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6427 : ctx->record_type, ".omp_data_o");
6428 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6429 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6430 }
6431
6432 olist = NULL;
6433 ilist = NULL;
6434 lower_send_clauses (clauses, &ilist, &olist, ctx);
6435 lower_send_shared_vars (&ilist, &olist, ctx);
6436
6437 /* Once all the expansions are done, sequence all the different
6438 fragments inside gimple_omp_body. */
6439
6440 new_body = NULL;
6441
6442 if (ctx->record_type)
6443 {
6444 t = build_fold_addr_expr (ctx->sender_decl);
6445 /* fixup_child_record_type might have changed receiver_decl's type. */
6446 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
6447 gimple_seq_add_stmt (&new_body,
6448 gimple_build_assign (ctx->receiver_decl, t));
6449 }
6450
6451 gimple_seq_add_seq (&new_body, par_ilist);
6452 gimple_seq_add_seq (&new_body, par_body);
6453 gimple_seq_add_seq (&new_body, par_olist);
6454 new_body = maybe_catch_exception (new_body);
6455 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6456 gimple_omp_set_body (stmt, new_body);
6457
6458 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6459 gimple_bind_add_stmt (bind, stmt);
6460 if (ilist || olist)
6461 {
6462 gimple_seq_add_stmt (&ilist, bind);
6463 gimple_seq_add_seq (&ilist, olist);
6464 bind = gimple_build_bind (NULL, ilist, NULL);
6465 }
6466
6467 gsi_replace (gsi_p, bind, true);
6468
6469 pop_gimplify_context (NULL);
6470 }
6471
6472 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6473 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6474 of OpenMP context, but with task_shared_vars set. */
6475
6476 static tree
6477 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6478 void *data)
6479 {
6480 tree t = *tp;
6481
6482 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6483 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6484 return t;
6485
6486 if (task_shared_vars
6487 && DECL_P (t)
6488 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6489 return t;
6490
6491 /* If a global variable has been privatized, TREE_CONSTANT on
6492 ADDR_EXPR might be wrong. */
6493 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6494 recompute_tree_invariant_for_addr_expr (t);
6495
6496 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6497 return NULL_TREE;
6498 }
6499
6500 static void
6501 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6502 {
6503 gimple stmt = gsi_stmt (*gsi_p);
6504 struct walk_stmt_info wi;
6505
6506 if (gimple_has_location (stmt))
6507 input_location = gimple_location (stmt);
6508
6509 if (task_shared_vars)
6510 memset (&wi, '\0', sizeof (wi));
6511
6512 /* If we have issued syntax errors, avoid doing any heavy lifting.
6513 Just replace the OpenMP directives with a NOP to avoid
6514 confusing RTL expansion. */
6515 if (errorcount && is_gimple_omp (stmt))
6516 {
6517 gsi_replace (gsi_p, gimple_build_nop (), true);
6518 return;
6519 }
6520
6521 switch (gimple_code (stmt))
6522 {
6523 case GIMPLE_COND:
6524 if ((ctx || task_shared_vars)
6525 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6526 ctx ? NULL : &wi, NULL)
6527 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6528 ctx ? NULL : &wi, NULL)))
6529 gimple_regimplify_operands (stmt, gsi_p);
6530 break;
6531 case GIMPLE_CATCH:
6532 lower_omp (gimple_catch_handler (stmt), ctx);
6533 break;
6534 case GIMPLE_EH_FILTER:
6535 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6536 break;
6537 case GIMPLE_TRY:
6538 lower_omp (gimple_try_eval (stmt), ctx);
6539 lower_omp (gimple_try_cleanup (stmt), ctx);
6540 break;
6541 case GIMPLE_BIND:
6542 lower_omp (gimple_bind_body (stmt), ctx);
6543 break;
6544 case GIMPLE_OMP_PARALLEL:
6545 case GIMPLE_OMP_TASK:
6546 ctx = maybe_lookup_ctx (stmt);
6547 lower_omp_taskreg (gsi_p, ctx);
6548 break;
6549 case GIMPLE_OMP_FOR:
6550 ctx = maybe_lookup_ctx (stmt);
6551 gcc_assert (ctx);
6552 lower_omp_for (gsi_p, ctx);
6553 break;
6554 case GIMPLE_OMP_SECTIONS:
6555 ctx = maybe_lookup_ctx (stmt);
6556 gcc_assert (ctx);
6557 lower_omp_sections (gsi_p, ctx);
6558 break;
6559 case GIMPLE_OMP_SINGLE:
6560 ctx = maybe_lookup_ctx (stmt);
6561 gcc_assert (ctx);
6562 lower_omp_single (gsi_p, ctx);
6563 break;
6564 case GIMPLE_OMP_MASTER:
6565 ctx = maybe_lookup_ctx (stmt);
6566 gcc_assert (ctx);
6567 lower_omp_master (gsi_p, ctx);
6568 break;
6569 case GIMPLE_OMP_ORDERED:
6570 ctx = maybe_lookup_ctx (stmt);
6571 gcc_assert (ctx);
6572 lower_omp_ordered (gsi_p, ctx);
6573 break;
6574 case GIMPLE_OMP_CRITICAL:
6575 ctx = maybe_lookup_ctx (stmt);
6576 gcc_assert (ctx);
6577 lower_omp_critical (gsi_p, ctx);
6578 break;
6579 case GIMPLE_OMP_ATOMIC_LOAD:
6580 if ((ctx || task_shared_vars)
6581 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6582 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6583 gimple_regimplify_operands (stmt, gsi_p);
6584 break;
6585 default:
6586 if ((ctx || task_shared_vars)
6587 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6588 ctx ? NULL : &wi))
6589 gimple_regimplify_operands (stmt, gsi_p);
6590 break;
6591 }
6592 }
6593
6594 static void
6595 lower_omp (gimple_seq body, omp_context *ctx)
6596 {
6597 location_t saved_location = input_location;
6598 gimple_stmt_iterator gsi = gsi_start (body);
6599 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6600 lower_omp_1 (&gsi, ctx);
6601 input_location = saved_location;
6602 }
6603 \f
6604 /* Main entry point. */
6605
6606 static unsigned int
6607 execute_lower_omp (void)
6608 {
6609 gimple_seq body;
6610
6611 /* This pass always runs, to provide PROP_gimple_lomp.
6612 But there is nothing to do unless -fopenmp is given. */
6613 if (flag_openmp == 0)
6614 return 0;
6615
6616 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6617 delete_omp_context);
6618
6619 body = gimple_body (current_function_decl);
6620 scan_omp (body, NULL);
6621 gcc_assert (taskreg_nesting_level == 0);
6622
6623 if (all_contexts->root)
6624 {
6625 struct gimplify_ctx gctx;
6626
6627 if (task_shared_vars)
6628 push_gimplify_context (&gctx);
6629 lower_omp (body, NULL);
6630 if (task_shared_vars)
6631 pop_gimplify_context (NULL);
6632 }
6633
6634 if (all_contexts)
6635 {
6636 splay_tree_delete (all_contexts);
6637 all_contexts = NULL;
6638 }
6639 BITMAP_FREE (task_shared_vars);
6640 return 0;
6641 }
6642
6643 struct gimple_opt_pass pass_lower_omp =
6644 {
6645 {
6646 GIMPLE_PASS,
6647 "omplower", /* name */
6648 NULL, /* gate */
6649 execute_lower_omp, /* execute */
6650 NULL, /* sub */
6651 NULL, /* next */
6652 0, /* static_pass_number */
6653 TV_NONE, /* tv_id */
6654 PROP_gimple_any, /* properties_required */
6655 PROP_gimple_lomp, /* properties_provided */
6656 0, /* properties_destroyed */
6657 0, /* todo_flags_start */
6658 TODO_dump_func /* todo_flags_finish */
6659 }
6660 };
6661 \f
6662 /* The following is a utility to diagnose OpenMP structured block violations.
6663 It is not part of the "omplower" pass, as that's invoked too late. It
6664 should be invoked by the respective front ends after gimplification. */
6665
6666 static splay_tree all_labels;
6667
6668 /* Check for mismatched contexts and generate an error if needed. Return
6669 true if an error is detected. */
6670
6671 static bool
6672 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6673 gimple branch_ctx, gimple label_ctx)
6674 {
6675 if (label_ctx == branch_ctx)
6676 return false;
6677
6678
6679 /*
6680 Previously we kept track of the label's entire context in diagnose_sb_[12]
6681 so we could traverse it and issue a correct "exit" or "enter" error
6682 message upon a structured block violation.
6683
6684 We built the context by building a list with tree_cons'ing, but there is
6685 no easy counterpart in gimple tuples. It seems like far too much work
6686 for issuing exit/enter error messages. If someone really misses the
6687 distinct error message... patches welcome.
6688 */
6689
6690 #if 0
6691 /* Try to avoid confusing the user by producing and error message
6692 with correct "exit" or "enter" verbiage. We prefer "exit"
6693 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6694 if (branch_ctx == NULL)
6695 exit_p = false;
6696 else
6697 {
6698 while (label_ctx)
6699 {
6700 if (TREE_VALUE (label_ctx) == branch_ctx)
6701 {
6702 exit_p = false;
6703 break;
6704 }
6705 label_ctx = TREE_CHAIN (label_ctx);
6706 }
6707 }
6708
6709 if (exit_p)
6710 error ("invalid exit from OpenMP structured block");
6711 else
6712 error ("invalid entry to OpenMP structured block");
6713 #endif
6714
6715 /* If it's obvious we have an invalid entry, be specific about the error. */
6716 if (branch_ctx == NULL)
6717 error ("invalid entry to OpenMP structured block");
6718 else
6719 /* Otherwise, be vague and lazy, but efficient. */
6720 error ("invalid branch to/from an OpenMP structured block");
6721
6722 gsi_replace (gsi_p, gimple_build_nop (), false);
6723 return true;
6724 }
6725
6726 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6727 where each label is found. */
6728
6729 static tree
6730 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6731 struct walk_stmt_info *wi)
6732 {
6733 gimple context = (gimple) wi->info;
6734 gimple inner_context;
6735 gimple stmt = gsi_stmt (*gsi_p);
6736
6737 *handled_ops_p = true;
6738
6739 switch (gimple_code (stmt))
6740 {
6741 WALK_SUBSTMTS;
6742
6743 case GIMPLE_OMP_PARALLEL:
6744 case GIMPLE_OMP_TASK:
6745 case GIMPLE_OMP_SECTIONS:
6746 case GIMPLE_OMP_SINGLE:
6747 case GIMPLE_OMP_SECTION:
6748 case GIMPLE_OMP_MASTER:
6749 case GIMPLE_OMP_ORDERED:
6750 case GIMPLE_OMP_CRITICAL:
6751 /* The minimal context here is just the current OMP construct. */
6752 inner_context = stmt;
6753 wi->info = inner_context;
6754 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6755 wi->info = context;
6756 break;
6757
6758 case GIMPLE_OMP_FOR:
6759 inner_context = stmt;
6760 wi->info = inner_context;
6761 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6762 walk them. */
6763 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6764 diagnose_sb_1, NULL, wi);
6765 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6766 wi->info = context;
6767 break;
6768
6769 case GIMPLE_LABEL:
6770 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6771 (splay_tree_value) context);
6772 break;
6773
6774 default:
6775 break;
6776 }
6777
6778 return NULL_TREE;
6779 }
6780
6781 /* Pass 2: Check each branch and see if its context differs from that of
6782 the destination label's context. */
6783
6784 static tree
6785 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6786 struct walk_stmt_info *wi)
6787 {
6788 gimple context = (gimple) wi->info;
6789 splay_tree_node n;
6790 gimple stmt = gsi_stmt (*gsi_p);
6791
6792 *handled_ops_p = true;
6793
6794 switch (gimple_code (stmt))
6795 {
6796 WALK_SUBSTMTS;
6797
6798 case GIMPLE_OMP_PARALLEL:
6799 case GIMPLE_OMP_TASK:
6800 case GIMPLE_OMP_SECTIONS:
6801 case GIMPLE_OMP_SINGLE:
6802 case GIMPLE_OMP_SECTION:
6803 case GIMPLE_OMP_MASTER:
6804 case GIMPLE_OMP_ORDERED:
6805 case GIMPLE_OMP_CRITICAL:
6806 wi->info = stmt;
6807 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6808 wi->info = context;
6809 break;
6810
6811 case GIMPLE_OMP_FOR:
6812 wi->info = stmt;
6813 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6814 walk them. */
6815 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6816 diagnose_sb_2, NULL, wi);
6817 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6818 wi->info = context;
6819 break;
6820
6821 case GIMPLE_GOTO:
6822 {
6823 tree lab = gimple_goto_dest (stmt);
6824 if (TREE_CODE (lab) != LABEL_DECL)
6825 break;
6826
6827 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6828 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6829 }
6830 break;
6831
6832 case GIMPLE_SWITCH:
6833 {
6834 unsigned int i;
6835 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6836 {
6837 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6838 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6839 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6840 break;
6841 }
6842 }
6843 break;
6844
6845 case GIMPLE_RETURN:
6846 diagnose_sb_0 (gsi_p, context, NULL);
6847 break;
6848
6849 default:
6850 break;
6851 }
6852
6853 return NULL_TREE;
6854 }
6855
6856 void
6857 diagnose_omp_structured_block_errors (tree fndecl)
6858 {
6859 tree save_current = current_function_decl;
6860 struct walk_stmt_info wi;
6861 struct function *old_cfun = cfun;
6862 gimple_seq body = gimple_body (fndecl);
6863
6864 current_function_decl = fndecl;
6865 set_cfun (DECL_STRUCT_FUNCTION (fndecl));
6866
6867 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6868
6869 memset (&wi, 0, sizeof (wi));
6870 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6871
6872 memset (&wi, 0, sizeof (wi));
6873 wi.want_locations = true;
6874 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6875
6876 splay_tree_delete (all_labels);
6877 all_labels = NULL;
6878
6879 set_cfun (old_cfun);
6880 current_function_decl = save_current;
6881 }
6882
6883 #include "gt-omp-low.h"