]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/omp-expand.c
omp-expand.c (oxpand_omp_atomic_fetch_op, [...]): Return false if can_atomic_load_p...
[thirdparty/gcc.git] / gcc / omp-expand.c
1 /* Expansion pass for OMP directives. Outlines regions of certain OMP
2 directives to separate functions, converts others into explicit calls to the
3 runtime library (libgomp) and so forth
4
5 Copyright (C) 2005-2017 Free Software Foundation, Inc.
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "memmodel.h"
27 #include "backend.h"
28 #include "target.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "gimple.h"
32 #include "cfghooks.h"
33 #include "tree-pass.h"
34 #include "ssa.h"
35 #include "optabs.h"
36 #include "cgraph.h"
37 #include "pretty-print.h"
38 #include "diagnostic-core.h"
39 #include "fold-const.h"
40 #include "stor-layout.h"
41 #include "cfganal.h"
42 #include "internal-fn.h"
43 #include "gimplify.h"
44 #include "gimple-iterator.h"
45 #include "gimplify-me.h"
46 #include "gimple-walk.h"
47 #include "tree-cfg.h"
48 #include "tree-into-ssa.h"
49 #include "tree-ssa.h"
50 #include "splay-tree.h"
51 #include "cfgloop.h"
52 #include "omp-general.h"
53 #include "omp-offload.h"
54 #include "tree-cfgcleanup.h"
55 #include "symbol-summary.h"
56 #include "cilk.h"
57 #include "gomp-constants.h"
58 #include "gimple-pretty-print.h"
59 #include "hsa-common.h"
60 #include "debug.h"
61
62
63 /* OMP region information. Every parallel and workshare
64 directive is enclosed between two markers, the OMP_* directive
65 and a corresponding GIMPLE_OMP_RETURN statement. */
66
67 struct omp_region
68 {
69 /* The enclosing region. */
70 struct omp_region *outer;
71
72 /* First child region. */
73 struct omp_region *inner;
74
75 /* Next peer region. */
76 struct omp_region *next;
77
78 /* Block containing the omp directive as its last stmt. */
79 basic_block entry;
80
81 /* Block containing the GIMPLE_OMP_RETURN as its last stmt. */
82 basic_block exit;
83
84 /* Block containing the GIMPLE_OMP_CONTINUE as its last stmt. */
85 basic_block cont;
86
87 /* If this is a combined parallel+workshare region, this is a list
88 of additional arguments needed by the combined parallel+workshare
89 library call. */
90 vec<tree, va_gc> *ws_args;
91
92 /* The code for the omp directive of this region. */
93 enum gimple_code type;
94
95 /* Schedule kind, only used for GIMPLE_OMP_FOR type regions. */
96 enum omp_clause_schedule_kind sched_kind;
97
98 /* Schedule modifiers. */
99 unsigned char sched_modifiers;
100
101 /* True if this is a combined parallel+workshare region. */
102 bool is_combined_parallel;
103
104 /* The ordered stmt if type is GIMPLE_OMP_ORDERED and it has
105 a depend clause. */
106 gomp_ordered *ord_stmt;
107 };
108
109 static struct omp_region *root_omp_region;
110 static bool omp_any_child_fn_dumped;
111
112 static void expand_omp_build_assign (gimple_stmt_iterator *, tree, tree,
113 bool = false);
114 static gphi *find_phi_with_arg_on_edge (tree, edge);
115 static void expand_omp (struct omp_region *region);
116
117 /* Return true if REGION is a combined parallel+workshare region. */
118
119 static inline bool
120 is_combined_parallel (struct omp_region *region)
121 {
122 return region->is_combined_parallel;
123 }
124
125 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
126 is the immediate dominator of PAR_ENTRY_BB, return true if there
127 are no data dependencies that would prevent expanding the parallel
128 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
129
130 When expanding a combined parallel+workshare region, the call to
131 the child function may need additional arguments in the case of
132 GIMPLE_OMP_FOR regions. In some cases, these arguments are
133 computed out of variables passed in from the parent to the child
134 via 'struct .omp_data_s'. For instance:
135
136 #pragma omp parallel for schedule (guided, i * 4)
137 for (j ...)
138
139 Is lowered into:
140
141 # BLOCK 2 (PAR_ENTRY_BB)
142 .omp_data_o.i = i;
143 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
144
145 # BLOCK 3 (WS_ENTRY_BB)
146 .omp_data_i = &.omp_data_o;
147 D.1667 = .omp_data_i->i;
148 D.1598 = D.1667 * 4;
149 #pragma omp for schedule (guided, D.1598)
150
151 When we outline the parallel region, the call to the child function
152 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
153 that value is computed *after* the call site. So, in principle we
154 cannot do the transformation.
155
156 To see whether the code in WS_ENTRY_BB blocks the combined
157 parallel+workshare call, we collect all the variables used in the
158 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
159 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
160 call.
161
162 FIXME. If we had the SSA form built at this point, we could merely
163 hoist the code in block 3 into block 2 and be done with it. But at
164 this point we don't have dataflow information and though we could
165 hack something up here, it is really not worth the aggravation. */
166
167 static bool
168 workshare_safe_to_combine_p (basic_block ws_entry_bb)
169 {
170 struct omp_for_data fd;
171 gimple *ws_stmt = last_stmt (ws_entry_bb);
172
173 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
174 return true;
175
176 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
177
178 omp_extract_for_data (as_a <gomp_for *> (ws_stmt), &fd, NULL);
179
180 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
181 return false;
182 if (fd.iter_type != long_integer_type_node)
183 return false;
184
185 /* FIXME. We give up too easily here. If any of these arguments
186 are not constants, they will likely involve variables that have
187 been mapped into fields of .omp_data_s for sharing with the child
188 function. With appropriate data flow, it would be possible to
189 see through this. */
190 if (!is_gimple_min_invariant (fd.loop.n1)
191 || !is_gimple_min_invariant (fd.loop.n2)
192 || !is_gimple_min_invariant (fd.loop.step)
193 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
194 return false;
195
196 return true;
197 }
198
199 /* Adjust CHUNK_SIZE from SCHEDULE clause, depending on simd modifier
200 presence (SIMD_SCHEDULE). */
201
202 static tree
203 omp_adjust_chunk_size (tree chunk_size, bool simd_schedule)
204 {
205 if (!simd_schedule)
206 return chunk_size;
207
208 int vf = omp_max_vf ();
209 if (vf == 1)
210 return chunk_size;
211
212 tree type = TREE_TYPE (chunk_size);
213 chunk_size = fold_build2 (PLUS_EXPR, type, chunk_size,
214 build_int_cst (type, vf - 1));
215 return fold_build2 (BIT_AND_EXPR, type, chunk_size,
216 build_int_cst (type, -vf));
217 }
218
219 /* Collect additional arguments needed to emit a combined
220 parallel+workshare call. WS_STMT is the workshare directive being
221 expanded. */
222
223 static vec<tree, va_gc> *
224 get_ws_args_for (gimple *par_stmt, gimple *ws_stmt)
225 {
226 tree t;
227 location_t loc = gimple_location (ws_stmt);
228 vec<tree, va_gc> *ws_args;
229
230 if (gomp_for *for_stmt = dyn_cast <gomp_for *> (ws_stmt))
231 {
232 struct omp_for_data fd;
233 tree n1, n2;
234
235 omp_extract_for_data (for_stmt, &fd, NULL);
236 n1 = fd.loop.n1;
237 n2 = fd.loop.n2;
238
239 if (gimple_omp_for_combined_into_p (for_stmt))
240 {
241 tree innerc
242 = omp_find_clause (gimple_omp_parallel_clauses (par_stmt),
243 OMP_CLAUSE__LOOPTEMP_);
244 gcc_assert (innerc);
245 n1 = OMP_CLAUSE_DECL (innerc);
246 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
247 OMP_CLAUSE__LOOPTEMP_);
248 gcc_assert (innerc);
249 n2 = OMP_CLAUSE_DECL (innerc);
250 }
251
252 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
253
254 t = fold_convert_loc (loc, long_integer_type_node, n1);
255 ws_args->quick_push (t);
256
257 t = fold_convert_loc (loc, long_integer_type_node, n2);
258 ws_args->quick_push (t);
259
260 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
261 ws_args->quick_push (t);
262
263 if (fd.chunk_size)
264 {
265 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
266 t = omp_adjust_chunk_size (t, fd.simd_schedule);
267 ws_args->quick_push (t);
268 }
269
270 return ws_args;
271 }
272 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
273 {
274 /* Number of sections is equal to the number of edges from the
275 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
276 the exit of the sections region. */
277 basic_block bb = single_succ (gimple_bb (ws_stmt));
278 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
279 vec_alloc (ws_args, 1);
280 ws_args->quick_push (t);
281 return ws_args;
282 }
283
284 gcc_unreachable ();
285 }
286
287 /* Discover whether REGION is a combined parallel+workshare region. */
288
289 static void
290 determine_parallel_type (struct omp_region *region)
291 {
292 basic_block par_entry_bb, par_exit_bb;
293 basic_block ws_entry_bb, ws_exit_bb;
294
295 if (region == NULL || region->inner == NULL
296 || region->exit == NULL || region->inner->exit == NULL
297 || region->inner->cont == NULL)
298 return;
299
300 /* We only support parallel+for and parallel+sections. */
301 if (region->type != GIMPLE_OMP_PARALLEL
302 || (region->inner->type != GIMPLE_OMP_FOR
303 && region->inner->type != GIMPLE_OMP_SECTIONS))
304 return;
305
306 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
307 WS_EXIT_BB -> PAR_EXIT_BB. */
308 par_entry_bb = region->entry;
309 par_exit_bb = region->exit;
310 ws_entry_bb = region->inner->entry;
311 ws_exit_bb = region->inner->exit;
312
313 if (single_succ (par_entry_bb) == ws_entry_bb
314 && single_succ (ws_exit_bb) == par_exit_bb
315 && workshare_safe_to_combine_p (ws_entry_bb)
316 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
317 || (last_and_only_stmt (ws_entry_bb)
318 && last_and_only_stmt (par_exit_bb))))
319 {
320 gimple *par_stmt = last_stmt (par_entry_bb);
321 gimple *ws_stmt = last_stmt (ws_entry_bb);
322
323 if (region->inner->type == GIMPLE_OMP_FOR)
324 {
325 /* If this is a combined parallel loop, we need to determine
326 whether or not to use the combined library calls. There
327 are two cases where we do not apply the transformation:
328 static loops and any kind of ordered loop. In the first
329 case, we already open code the loop so there is no need
330 to do anything else. In the latter case, the combined
331 parallel loop call would still need extra synchronization
332 to implement ordered semantics, so there would not be any
333 gain in using the combined call. */
334 tree clauses = gimple_omp_for_clauses (ws_stmt);
335 tree c = omp_find_clause (clauses, OMP_CLAUSE_SCHEDULE);
336 if (c == NULL
337 || ((OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_MASK)
338 == OMP_CLAUSE_SCHEDULE_STATIC)
339 || omp_find_clause (clauses, OMP_CLAUSE_ORDERED))
340 {
341 region->is_combined_parallel = false;
342 region->inner->is_combined_parallel = false;
343 return;
344 }
345 }
346
347 region->is_combined_parallel = true;
348 region->inner->is_combined_parallel = true;
349 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
350 }
351 }
352
353 /* Debugging dumps for parallel regions. */
354 void dump_omp_region (FILE *, struct omp_region *, int);
355 void debug_omp_region (struct omp_region *);
356 void debug_all_omp_regions (void);
357
358 /* Dump the parallel region tree rooted at REGION. */
359
360 void
361 dump_omp_region (FILE *file, struct omp_region *region, int indent)
362 {
363 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
364 gimple_code_name[region->type]);
365
366 if (region->inner)
367 dump_omp_region (file, region->inner, indent + 4);
368
369 if (region->cont)
370 {
371 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
372 region->cont->index);
373 }
374
375 if (region->exit)
376 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
377 region->exit->index);
378 else
379 fprintf (file, "%*s[no exit marker]\n", indent, "");
380
381 if (region->next)
382 dump_omp_region (file, region->next, indent);
383 }
384
385 DEBUG_FUNCTION void
386 debug_omp_region (struct omp_region *region)
387 {
388 dump_omp_region (stderr, region, 0);
389 }
390
391 DEBUG_FUNCTION void
392 debug_all_omp_regions (void)
393 {
394 dump_omp_region (stderr, root_omp_region, 0);
395 }
396
397 /* Create a new parallel region starting at STMT inside region PARENT. */
398
399 static struct omp_region *
400 new_omp_region (basic_block bb, enum gimple_code type,
401 struct omp_region *parent)
402 {
403 struct omp_region *region = XCNEW (struct omp_region);
404
405 region->outer = parent;
406 region->entry = bb;
407 region->type = type;
408
409 if (parent)
410 {
411 /* This is a nested region. Add it to the list of inner
412 regions in PARENT. */
413 region->next = parent->inner;
414 parent->inner = region;
415 }
416 else
417 {
418 /* This is a toplevel region. Add it to the list of toplevel
419 regions in ROOT_OMP_REGION. */
420 region->next = root_omp_region;
421 root_omp_region = region;
422 }
423
424 return region;
425 }
426
427 /* Release the memory associated with the region tree rooted at REGION. */
428
429 static void
430 free_omp_region_1 (struct omp_region *region)
431 {
432 struct omp_region *i, *n;
433
434 for (i = region->inner; i ; i = n)
435 {
436 n = i->next;
437 free_omp_region_1 (i);
438 }
439
440 free (region);
441 }
442
443 /* Release the memory for the entire omp region tree. */
444
445 void
446 omp_free_regions (void)
447 {
448 struct omp_region *r, *n;
449 for (r = root_omp_region; r ; r = n)
450 {
451 n = r->next;
452 free_omp_region_1 (r);
453 }
454 root_omp_region = NULL;
455 }
456
457 /* A convenience function to build an empty GIMPLE_COND with just the
458 condition. */
459
460 static gcond *
461 gimple_build_cond_empty (tree cond)
462 {
463 enum tree_code pred_code;
464 tree lhs, rhs;
465
466 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
467 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
468 }
469
470 /* Return true if a parallel REGION is within a declare target function or
471 within a target region and is not a part of a gridified target. */
472
473 static bool
474 parallel_needs_hsa_kernel_p (struct omp_region *region)
475 {
476 bool indirect = false;
477 for (region = region->outer; region; region = region->outer)
478 {
479 if (region->type == GIMPLE_OMP_PARALLEL)
480 indirect = true;
481 else if (region->type == GIMPLE_OMP_TARGET)
482 {
483 gomp_target *tgt_stmt
484 = as_a <gomp_target *> (last_stmt (region->entry));
485
486 if (omp_find_clause (gimple_omp_target_clauses (tgt_stmt),
487 OMP_CLAUSE__GRIDDIM_))
488 return indirect;
489 else
490 return true;
491 }
492 }
493
494 if (lookup_attribute ("omp declare target",
495 DECL_ATTRIBUTES (current_function_decl)))
496 return true;
497
498 return false;
499 }
500
501 /* Build the function calls to GOMP_parallel_start etc to actually
502 generate the parallel operation. REGION is the parallel region
503 being expanded. BB is the block where to insert the code. WS_ARGS
504 will be set if this is a call to a combined parallel+workshare
505 construct, it contains the list of additional arguments needed by
506 the workshare construct. */
507
508 static void
509 expand_parallel_call (struct omp_region *region, basic_block bb,
510 gomp_parallel *entry_stmt,
511 vec<tree, va_gc> *ws_args)
512 {
513 tree t, t1, t2, val, cond, c, clauses, flags;
514 gimple_stmt_iterator gsi;
515 gimple *stmt;
516 enum built_in_function start_ix;
517 int start_ix2;
518 location_t clause_loc;
519 vec<tree, va_gc> *args;
520
521 clauses = gimple_omp_parallel_clauses (entry_stmt);
522
523 /* Determine what flavor of GOMP_parallel we will be
524 emitting. */
525 start_ix = BUILT_IN_GOMP_PARALLEL;
526 if (is_combined_parallel (region))
527 {
528 switch (region->inner->type)
529 {
530 case GIMPLE_OMP_FOR:
531 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
532 switch (region->inner->sched_kind)
533 {
534 case OMP_CLAUSE_SCHEDULE_RUNTIME:
535 start_ix2 = 3;
536 break;
537 case OMP_CLAUSE_SCHEDULE_DYNAMIC:
538 case OMP_CLAUSE_SCHEDULE_GUIDED:
539 if (region->inner->sched_modifiers
540 & OMP_CLAUSE_SCHEDULE_NONMONOTONIC)
541 {
542 start_ix2 = 3 + region->inner->sched_kind;
543 break;
544 }
545 /* FALLTHRU */
546 default:
547 start_ix2 = region->inner->sched_kind;
548 break;
549 }
550 start_ix2 += (int) BUILT_IN_GOMP_PARALLEL_LOOP_STATIC;
551 start_ix = (enum built_in_function) start_ix2;
552 break;
553 case GIMPLE_OMP_SECTIONS:
554 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
555 break;
556 default:
557 gcc_unreachable ();
558 }
559 }
560
561 /* By default, the value of NUM_THREADS is zero (selected at run time)
562 and there is no conditional. */
563 cond = NULL_TREE;
564 val = build_int_cst (unsigned_type_node, 0);
565 flags = build_int_cst (unsigned_type_node, 0);
566
567 c = omp_find_clause (clauses, OMP_CLAUSE_IF);
568 if (c)
569 cond = OMP_CLAUSE_IF_EXPR (c);
570
571 c = omp_find_clause (clauses, OMP_CLAUSE_NUM_THREADS);
572 if (c)
573 {
574 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
575 clause_loc = OMP_CLAUSE_LOCATION (c);
576 }
577 else
578 clause_loc = gimple_location (entry_stmt);
579
580 c = omp_find_clause (clauses, OMP_CLAUSE_PROC_BIND);
581 if (c)
582 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
583
584 /* Ensure 'val' is of the correct type. */
585 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
586
587 /* If we found the clause 'if (cond)', build either
588 (cond != 0) or (cond ? val : 1u). */
589 if (cond)
590 {
591 cond = gimple_boolify (cond);
592
593 if (integer_zerop (val))
594 val = fold_build2_loc (clause_loc,
595 EQ_EXPR, unsigned_type_node, cond,
596 build_int_cst (TREE_TYPE (cond), 0));
597 else
598 {
599 basic_block cond_bb, then_bb, else_bb;
600 edge e, e_then, e_else;
601 tree tmp_then, tmp_else, tmp_join, tmp_var;
602
603 tmp_var = create_tmp_var (TREE_TYPE (val));
604 if (gimple_in_ssa_p (cfun))
605 {
606 tmp_then = make_ssa_name (tmp_var);
607 tmp_else = make_ssa_name (tmp_var);
608 tmp_join = make_ssa_name (tmp_var);
609 }
610 else
611 {
612 tmp_then = tmp_var;
613 tmp_else = tmp_var;
614 tmp_join = tmp_var;
615 }
616
617 e = split_block_after_labels (bb);
618 cond_bb = e->src;
619 bb = e->dest;
620 remove_edge (e);
621
622 then_bb = create_empty_bb (cond_bb);
623 else_bb = create_empty_bb (then_bb);
624 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
625 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
626
627 stmt = gimple_build_cond_empty (cond);
628 gsi = gsi_start_bb (cond_bb);
629 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
630
631 gsi = gsi_start_bb (then_bb);
632 expand_omp_build_assign (&gsi, tmp_then, val, true);
633
634 gsi = gsi_start_bb (else_bb);
635 expand_omp_build_assign (&gsi, tmp_else,
636 build_int_cst (unsigned_type_node, 1),
637 true);
638
639 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
640 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
641 add_bb_to_loop (then_bb, cond_bb->loop_father);
642 add_bb_to_loop (else_bb, cond_bb->loop_father);
643 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
644 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
645
646 if (gimple_in_ssa_p (cfun))
647 {
648 gphi *phi = create_phi_node (tmp_join, bb);
649 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
650 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
651 }
652
653 val = tmp_join;
654 }
655
656 gsi = gsi_start_bb (bb);
657 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
658 false, GSI_CONTINUE_LINKING);
659 }
660
661 gsi = gsi_last_bb (bb);
662 t = gimple_omp_parallel_data_arg (entry_stmt);
663 if (t == NULL)
664 t1 = null_pointer_node;
665 else
666 t1 = build_fold_addr_expr (t);
667 tree child_fndecl = gimple_omp_parallel_child_fn (entry_stmt);
668 t2 = build_fold_addr_expr (child_fndecl);
669
670 vec_alloc (args, 4 + vec_safe_length (ws_args));
671 args->quick_push (t2);
672 args->quick_push (t1);
673 args->quick_push (val);
674 if (ws_args)
675 args->splice (*ws_args);
676 args->quick_push (flags);
677
678 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
679 builtin_decl_explicit (start_ix), args);
680
681 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
682 false, GSI_CONTINUE_LINKING);
683
684 if (hsa_gen_requested_p ()
685 && parallel_needs_hsa_kernel_p (region))
686 {
687 cgraph_node *child_cnode = cgraph_node::get (child_fndecl);
688 hsa_register_kernel (child_cnode);
689 }
690 }
691
692 /* Insert a function call whose name is FUNC_NAME with the information from
693 ENTRY_STMT into the basic_block BB. */
694
695 static void
696 expand_cilk_for_call (basic_block bb, gomp_parallel *entry_stmt,
697 vec <tree, va_gc> *ws_args)
698 {
699 tree t, t1, t2;
700 gimple_stmt_iterator gsi;
701 vec <tree, va_gc> *args;
702
703 gcc_assert (vec_safe_length (ws_args) == 2);
704 tree func_name = (*ws_args)[0];
705 tree grain = (*ws_args)[1];
706
707 tree clauses = gimple_omp_parallel_clauses (entry_stmt);
708 tree count = omp_find_clause (clauses, OMP_CLAUSE__CILK_FOR_COUNT_);
709 gcc_assert (count != NULL_TREE);
710 count = OMP_CLAUSE_OPERAND (count, 0);
711
712 gsi = gsi_last_bb (bb);
713 t = gimple_omp_parallel_data_arg (entry_stmt);
714 if (t == NULL)
715 t1 = null_pointer_node;
716 else
717 t1 = build_fold_addr_expr (t);
718 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
719
720 vec_alloc (args, 4);
721 args->quick_push (t2);
722 args->quick_push (t1);
723 args->quick_push (count);
724 args->quick_push (grain);
725 t = build_call_expr_loc_vec (UNKNOWN_LOCATION, func_name, args);
726
727 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false,
728 GSI_CONTINUE_LINKING);
729 }
730
731 /* Build the function call to GOMP_task to actually
732 generate the task operation. BB is the block where to insert the code. */
733
734 static void
735 expand_task_call (struct omp_region *region, basic_block bb,
736 gomp_task *entry_stmt)
737 {
738 tree t1, t2, t3;
739 gimple_stmt_iterator gsi;
740 location_t loc = gimple_location (entry_stmt);
741
742 tree clauses = gimple_omp_task_clauses (entry_stmt);
743
744 tree ifc = omp_find_clause (clauses, OMP_CLAUSE_IF);
745 tree untied = omp_find_clause (clauses, OMP_CLAUSE_UNTIED);
746 tree mergeable = omp_find_clause (clauses, OMP_CLAUSE_MERGEABLE);
747 tree depend = omp_find_clause (clauses, OMP_CLAUSE_DEPEND);
748 tree finalc = omp_find_clause (clauses, OMP_CLAUSE_FINAL);
749 tree priority = omp_find_clause (clauses, OMP_CLAUSE_PRIORITY);
750
751 unsigned int iflags
752 = (untied ? GOMP_TASK_FLAG_UNTIED : 0)
753 | (mergeable ? GOMP_TASK_FLAG_MERGEABLE : 0)
754 | (depend ? GOMP_TASK_FLAG_DEPEND : 0);
755
756 bool taskloop_p = gimple_omp_task_taskloop_p (entry_stmt);
757 tree startvar = NULL_TREE, endvar = NULL_TREE, step = NULL_TREE;
758 tree num_tasks = NULL_TREE;
759 bool ull = false;
760 if (taskloop_p)
761 {
762 gimple *g = last_stmt (region->outer->entry);
763 gcc_assert (gimple_code (g) == GIMPLE_OMP_FOR
764 && gimple_omp_for_kind (g) == GF_OMP_FOR_KIND_TASKLOOP);
765 struct omp_for_data fd;
766 omp_extract_for_data (as_a <gomp_for *> (g), &fd, NULL);
767 startvar = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
768 endvar = omp_find_clause (OMP_CLAUSE_CHAIN (startvar),
769 OMP_CLAUSE__LOOPTEMP_);
770 startvar = OMP_CLAUSE_DECL (startvar);
771 endvar = OMP_CLAUSE_DECL (endvar);
772 step = fold_convert_loc (loc, fd.iter_type, fd.loop.step);
773 if (fd.loop.cond_code == LT_EXPR)
774 iflags |= GOMP_TASK_FLAG_UP;
775 tree tclauses = gimple_omp_for_clauses (g);
776 num_tasks = omp_find_clause (tclauses, OMP_CLAUSE_NUM_TASKS);
777 if (num_tasks)
778 num_tasks = OMP_CLAUSE_NUM_TASKS_EXPR (num_tasks);
779 else
780 {
781 num_tasks = omp_find_clause (tclauses, OMP_CLAUSE_GRAINSIZE);
782 if (num_tasks)
783 {
784 iflags |= GOMP_TASK_FLAG_GRAINSIZE;
785 num_tasks = OMP_CLAUSE_GRAINSIZE_EXPR (num_tasks);
786 }
787 else
788 num_tasks = integer_zero_node;
789 }
790 num_tasks = fold_convert_loc (loc, long_integer_type_node, num_tasks);
791 if (ifc == NULL_TREE)
792 iflags |= GOMP_TASK_FLAG_IF;
793 if (omp_find_clause (tclauses, OMP_CLAUSE_NOGROUP))
794 iflags |= GOMP_TASK_FLAG_NOGROUP;
795 ull = fd.iter_type == long_long_unsigned_type_node;
796 }
797 else if (priority)
798 iflags |= GOMP_TASK_FLAG_PRIORITY;
799
800 tree flags = build_int_cst (unsigned_type_node, iflags);
801
802 tree cond = boolean_true_node;
803 if (ifc)
804 {
805 if (taskloop_p)
806 {
807 tree t = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc));
808 t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t,
809 build_int_cst (unsigned_type_node,
810 GOMP_TASK_FLAG_IF),
811 build_int_cst (unsigned_type_node, 0));
812 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node,
813 flags, t);
814 }
815 else
816 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc));
817 }
818
819 if (finalc)
820 {
821 tree t = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (finalc));
822 t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t,
823 build_int_cst (unsigned_type_node,
824 GOMP_TASK_FLAG_FINAL),
825 build_int_cst (unsigned_type_node, 0));
826 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, t);
827 }
828 if (depend)
829 depend = OMP_CLAUSE_DECL (depend);
830 else
831 depend = build_int_cst (ptr_type_node, 0);
832 if (priority)
833 priority = fold_convert (integer_type_node,
834 OMP_CLAUSE_PRIORITY_EXPR (priority));
835 else
836 priority = integer_zero_node;
837
838 gsi = gsi_last_bb (bb);
839 tree t = gimple_omp_task_data_arg (entry_stmt);
840 if (t == NULL)
841 t2 = null_pointer_node;
842 else
843 t2 = build_fold_addr_expr_loc (loc, t);
844 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
845 t = gimple_omp_task_copy_fn (entry_stmt);
846 if (t == NULL)
847 t3 = null_pointer_node;
848 else
849 t3 = build_fold_addr_expr_loc (loc, t);
850
851 if (taskloop_p)
852 t = build_call_expr (ull
853 ? builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP_ULL)
854 : builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP),
855 11, t1, t2, t3,
856 gimple_omp_task_arg_size (entry_stmt),
857 gimple_omp_task_arg_align (entry_stmt), flags,
858 num_tasks, priority, startvar, endvar, step);
859 else
860 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
861 9, t1, t2, t3,
862 gimple_omp_task_arg_size (entry_stmt),
863 gimple_omp_task_arg_align (entry_stmt), cond, flags,
864 depend, priority);
865
866 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
867 false, GSI_CONTINUE_LINKING);
868 }
869
870 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
871
872 static tree
873 vec2chain (vec<tree, va_gc> *v)
874 {
875 tree chain = NULL_TREE, t;
876 unsigned ix;
877
878 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
879 {
880 DECL_CHAIN (t) = chain;
881 chain = t;
882 }
883
884 return chain;
885 }
886
887 /* Remove barriers in REGION->EXIT's block. Note that this is only
888 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
889 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
890 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
891 removed. */
892
893 static void
894 remove_exit_barrier (struct omp_region *region)
895 {
896 gimple_stmt_iterator gsi;
897 basic_block exit_bb;
898 edge_iterator ei;
899 edge e;
900 gimple *stmt;
901 int any_addressable_vars = -1;
902
903 exit_bb = region->exit;
904
905 /* If the parallel region doesn't return, we don't have REGION->EXIT
906 block at all. */
907 if (! exit_bb)
908 return;
909
910 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
911 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
912 statements that can appear in between are extremely limited -- no
913 memory operations at all. Here, we allow nothing at all, so the
914 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
915 gsi = gsi_last_bb (exit_bb);
916 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
917 gsi_prev (&gsi);
918 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
919 return;
920
921 FOR_EACH_EDGE (e, ei, exit_bb->preds)
922 {
923 gsi = gsi_last_bb (e->src);
924 if (gsi_end_p (gsi))
925 continue;
926 stmt = gsi_stmt (gsi);
927 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
928 && !gimple_omp_return_nowait_p (stmt))
929 {
930 /* OpenMP 3.0 tasks unfortunately prevent this optimization
931 in many cases. If there could be tasks queued, the barrier
932 might be needed to let the tasks run before some local
933 variable of the parallel that the task uses as shared
934 runs out of scope. The task can be spawned either
935 from within current function (this would be easy to check)
936 or from some function it calls and gets passed an address
937 of such a variable. */
938 if (any_addressable_vars < 0)
939 {
940 gomp_parallel *parallel_stmt
941 = as_a <gomp_parallel *> (last_stmt (region->entry));
942 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
943 tree local_decls, block, decl;
944 unsigned ix;
945
946 any_addressable_vars = 0;
947 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
948 if (TREE_ADDRESSABLE (decl))
949 {
950 any_addressable_vars = 1;
951 break;
952 }
953 for (block = gimple_block (stmt);
954 !any_addressable_vars
955 && block
956 && TREE_CODE (block) == BLOCK;
957 block = BLOCK_SUPERCONTEXT (block))
958 {
959 for (local_decls = BLOCK_VARS (block);
960 local_decls;
961 local_decls = DECL_CHAIN (local_decls))
962 if (TREE_ADDRESSABLE (local_decls))
963 {
964 any_addressable_vars = 1;
965 break;
966 }
967 if (block == gimple_block (parallel_stmt))
968 break;
969 }
970 }
971 if (!any_addressable_vars)
972 gimple_omp_return_set_nowait (stmt);
973 }
974 }
975 }
976
977 static void
978 remove_exit_barriers (struct omp_region *region)
979 {
980 if (region->type == GIMPLE_OMP_PARALLEL)
981 remove_exit_barrier (region);
982
983 if (region->inner)
984 {
985 region = region->inner;
986 remove_exit_barriers (region);
987 while (region->next)
988 {
989 region = region->next;
990 remove_exit_barriers (region);
991 }
992 }
993 }
994
995 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
996 calls. These can't be declared as const functions, but
997 within one parallel body they are constant, so they can be
998 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
999 which are declared const. Similarly for task body, except
1000 that in untied task omp_get_thread_num () can change at any task
1001 scheduling point. */
1002
1003 static void
1004 optimize_omp_library_calls (gimple *entry_stmt)
1005 {
1006 basic_block bb;
1007 gimple_stmt_iterator gsi;
1008 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
1009 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
1010 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
1011 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
1012 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
1013 && omp_find_clause (gimple_omp_task_clauses (entry_stmt),
1014 OMP_CLAUSE_UNTIED) != NULL);
1015
1016 FOR_EACH_BB_FN (bb, cfun)
1017 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1018 {
1019 gimple *call = gsi_stmt (gsi);
1020 tree decl;
1021
1022 if (is_gimple_call (call)
1023 && (decl = gimple_call_fndecl (call))
1024 && DECL_EXTERNAL (decl)
1025 && TREE_PUBLIC (decl)
1026 && DECL_INITIAL (decl) == NULL)
1027 {
1028 tree built_in;
1029
1030 if (DECL_NAME (decl) == thr_num_id)
1031 {
1032 /* In #pragma omp task untied omp_get_thread_num () can change
1033 during the execution of the task region. */
1034 if (untied_task)
1035 continue;
1036 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
1037 }
1038 else if (DECL_NAME (decl) == num_thr_id)
1039 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
1040 else
1041 continue;
1042
1043 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
1044 || gimple_call_num_args (call) != 0)
1045 continue;
1046
1047 if (flag_exceptions && !TREE_NOTHROW (decl))
1048 continue;
1049
1050 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
1051 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
1052 TREE_TYPE (TREE_TYPE (built_in))))
1053 continue;
1054
1055 gimple_call_set_fndecl (call, built_in);
1056 }
1057 }
1058 }
1059
1060 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
1061 regimplified. */
1062
1063 static tree
1064 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
1065 {
1066 tree t = *tp;
1067
1068 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
1069 if (VAR_P (t) && DECL_HAS_VALUE_EXPR_P (t))
1070 return t;
1071
1072 if (TREE_CODE (t) == ADDR_EXPR)
1073 recompute_tree_invariant_for_addr_expr (t);
1074
1075 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
1076 return NULL_TREE;
1077 }
1078
1079 /* Prepend or append TO = FROM assignment before or after *GSI_P. */
1080
1081 static void
1082 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from,
1083 bool after)
1084 {
1085 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
1086 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
1087 !after, after ? GSI_CONTINUE_LINKING
1088 : GSI_SAME_STMT);
1089 gimple *stmt = gimple_build_assign (to, from);
1090 if (after)
1091 gsi_insert_after (gsi_p, stmt, GSI_CONTINUE_LINKING);
1092 else
1093 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
1094 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
1095 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
1096 {
1097 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1098 gimple_regimplify_operands (stmt, &gsi);
1099 }
1100 }
1101
1102 /* Expand the OpenMP parallel or task directive starting at REGION. */
1103
1104 static void
1105 expand_omp_taskreg (struct omp_region *region)
1106 {
1107 basic_block entry_bb, exit_bb, new_bb;
1108 struct function *child_cfun;
1109 tree child_fn, block, t;
1110 gimple_stmt_iterator gsi;
1111 gimple *entry_stmt, *stmt;
1112 edge e;
1113 vec<tree, va_gc> *ws_args;
1114
1115 entry_stmt = last_stmt (region->entry);
1116 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
1117 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1118
1119 entry_bb = region->entry;
1120 if (gimple_code (entry_stmt) == GIMPLE_OMP_TASK)
1121 exit_bb = region->cont;
1122 else
1123 exit_bb = region->exit;
1124
1125 bool is_cilk_for
1126 = (flag_cilkplus
1127 && gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL
1128 && omp_find_clause (gimple_omp_parallel_clauses (entry_stmt),
1129 OMP_CLAUSE__CILK_FOR_COUNT_) != NULL_TREE);
1130
1131 if (is_cilk_for)
1132 /* If it is a _Cilk_for statement, it is modelled *like* a parallel for,
1133 and the inner statement contains the name of the built-in function
1134 and grain. */
1135 ws_args = region->inner->ws_args;
1136 else if (is_combined_parallel (region))
1137 ws_args = region->ws_args;
1138 else
1139 ws_args = NULL;
1140
1141 if (child_cfun->cfg)
1142 {
1143 /* Due to inlining, it may happen that we have already outlined
1144 the region, in which case all we need to do is make the
1145 sub-graph unreachable and emit the parallel call. */
1146 edge entry_succ_e, exit_succ_e;
1147
1148 entry_succ_e = single_succ_edge (entry_bb);
1149
1150 gsi = gsi_last_bb (entry_bb);
1151 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
1152 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
1153 gsi_remove (&gsi, true);
1154
1155 new_bb = entry_bb;
1156 if (exit_bb)
1157 {
1158 exit_succ_e = single_succ_edge (exit_bb);
1159 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
1160 }
1161 remove_edge_and_dominated_blocks (entry_succ_e);
1162 }
1163 else
1164 {
1165 unsigned srcidx, dstidx, num;
1166
1167 /* If the parallel region needs data sent from the parent
1168 function, then the very first statement (except possible
1169 tree profile counter updates) of the parallel body
1170 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
1171 &.OMP_DATA_O is passed as an argument to the child function,
1172 we need to replace it with the argument as seen by the child
1173 function.
1174
1175 In most cases, this will end up being the identity assignment
1176 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
1177 a function call that has been inlined, the original PARM_DECL
1178 .OMP_DATA_I may have been converted into a different local
1179 variable. In which case, we need to keep the assignment. */
1180 if (gimple_omp_taskreg_data_arg (entry_stmt))
1181 {
1182 basic_block entry_succ_bb
1183 = single_succ_p (entry_bb) ? single_succ (entry_bb)
1184 : FALLTHRU_EDGE (entry_bb)->dest;
1185 tree arg;
1186 gimple *parcopy_stmt = NULL;
1187
1188 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
1189 {
1190 gimple *stmt;
1191
1192 gcc_assert (!gsi_end_p (gsi));
1193 stmt = gsi_stmt (gsi);
1194 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1195 continue;
1196
1197 if (gimple_num_ops (stmt) == 2)
1198 {
1199 tree arg = gimple_assign_rhs1 (stmt);
1200
1201 /* We're ignore the subcode because we're
1202 effectively doing a STRIP_NOPS. */
1203
1204 if (TREE_CODE (arg) == ADDR_EXPR
1205 && TREE_OPERAND (arg, 0)
1206 == gimple_omp_taskreg_data_arg (entry_stmt))
1207 {
1208 parcopy_stmt = stmt;
1209 break;
1210 }
1211 }
1212 }
1213
1214 gcc_assert (parcopy_stmt != NULL);
1215 arg = DECL_ARGUMENTS (child_fn);
1216
1217 if (!gimple_in_ssa_p (cfun))
1218 {
1219 if (gimple_assign_lhs (parcopy_stmt) == arg)
1220 gsi_remove (&gsi, true);
1221 else
1222 {
1223 /* ?? Is setting the subcode really necessary ?? */
1224 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
1225 gimple_assign_set_rhs1 (parcopy_stmt, arg);
1226 }
1227 }
1228 else
1229 {
1230 tree lhs = gimple_assign_lhs (parcopy_stmt);
1231 gcc_assert (SSA_NAME_VAR (lhs) == arg);
1232 /* We'd like to set the rhs to the default def in the child_fn,
1233 but it's too early to create ssa names in the child_fn.
1234 Instead, we set the rhs to the parm. In
1235 move_sese_region_to_fn, we introduce a default def for the
1236 parm, map the parm to it's default def, and once we encounter
1237 this stmt, replace the parm with the default def. */
1238 gimple_assign_set_rhs1 (parcopy_stmt, arg);
1239 update_stmt (parcopy_stmt);
1240 }
1241 }
1242
1243 /* Declare local variables needed in CHILD_CFUN. */
1244 block = DECL_INITIAL (child_fn);
1245 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
1246 /* The gimplifier could record temporaries in parallel/task block
1247 rather than in containing function's local_decls chain,
1248 which would mean cgraph missed finalizing them. Do it now. */
1249 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
1250 if (VAR_P (t) && TREE_STATIC (t) && !DECL_EXTERNAL (t))
1251 varpool_node::finalize_decl (t);
1252 DECL_SAVED_TREE (child_fn) = NULL;
1253 /* We'll create a CFG for child_fn, so no gimple body is needed. */
1254 gimple_set_body (child_fn, NULL);
1255 TREE_USED (block) = 1;
1256
1257 /* Reset DECL_CONTEXT on function arguments. */
1258 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
1259 DECL_CONTEXT (t) = child_fn;
1260
1261 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
1262 so that it can be moved to the child function. */
1263 gsi = gsi_last_bb (entry_bb);
1264 stmt = gsi_stmt (gsi);
1265 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
1266 || gimple_code (stmt) == GIMPLE_OMP_TASK));
1267 e = split_block (entry_bb, stmt);
1268 gsi_remove (&gsi, true);
1269 entry_bb = e->dest;
1270 edge e2 = NULL;
1271 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
1272 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
1273 else
1274 {
1275 e2 = make_edge (e->src, BRANCH_EDGE (entry_bb)->dest, EDGE_ABNORMAL);
1276 gcc_assert (e2->dest == region->exit);
1277 remove_edge (BRANCH_EDGE (entry_bb));
1278 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e->src);
1279 gsi = gsi_last_bb (region->exit);
1280 gcc_assert (!gsi_end_p (gsi)
1281 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
1282 gsi_remove (&gsi, true);
1283 }
1284
1285 /* Convert GIMPLE_OMP_{RETURN,CONTINUE} into a RETURN_EXPR. */
1286 if (exit_bb)
1287 {
1288 gsi = gsi_last_bb (exit_bb);
1289 gcc_assert (!gsi_end_p (gsi)
1290 && (gimple_code (gsi_stmt (gsi))
1291 == (e2 ? GIMPLE_OMP_CONTINUE : GIMPLE_OMP_RETURN)));
1292 stmt = gimple_build_return (NULL);
1293 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
1294 gsi_remove (&gsi, true);
1295 }
1296
1297 /* Move the parallel region into CHILD_CFUN. */
1298
1299 if (gimple_in_ssa_p (cfun))
1300 {
1301 init_tree_ssa (child_cfun);
1302 init_ssa_operands (child_cfun);
1303 child_cfun->gimple_df->in_ssa_p = true;
1304 block = NULL_TREE;
1305 }
1306 else
1307 block = gimple_block (entry_stmt);
1308
1309 /* Make sure to generate early debug for the function before
1310 outlining anything. */
1311 if (! gimple_in_ssa_p (cfun))
1312 (*debug_hooks->early_global_decl) (cfun->decl);
1313
1314 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
1315 if (exit_bb)
1316 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
1317 if (e2)
1318 {
1319 basic_block dest_bb = e2->dest;
1320 if (!exit_bb)
1321 make_edge (new_bb, dest_bb, EDGE_FALLTHRU);
1322 remove_edge (e2);
1323 set_immediate_dominator (CDI_DOMINATORS, dest_bb, new_bb);
1324 }
1325 /* When the OMP expansion process cannot guarantee an up-to-date
1326 loop tree arrange for the child function to fixup loops. */
1327 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1328 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
1329
1330 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
1331 num = vec_safe_length (child_cfun->local_decls);
1332 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
1333 {
1334 t = (*child_cfun->local_decls)[srcidx];
1335 if (DECL_CONTEXT (t) == cfun->decl)
1336 continue;
1337 if (srcidx != dstidx)
1338 (*child_cfun->local_decls)[dstidx] = t;
1339 dstidx++;
1340 }
1341 if (dstidx != num)
1342 vec_safe_truncate (child_cfun->local_decls, dstidx);
1343
1344 /* Inform the callgraph about the new function. */
1345 child_cfun->curr_properties = cfun->curr_properties;
1346 child_cfun->has_simduid_loops |= cfun->has_simduid_loops;
1347 child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops;
1348 cgraph_node *node = cgraph_node::get_create (child_fn);
1349 node->parallelized_function = 1;
1350 cgraph_node::add_new_function (child_fn, true);
1351
1352 bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl)
1353 && !DECL_ASSEMBLER_NAME_SET_P (child_fn);
1354
1355 /* Fix the callgraph edges for child_cfun. Those for cfun will be
1356 fixed in a following pass. */
1357 push_cfun (child_cfun);
1358 if (need_asm)
1359 assign_assembler_name_if_needed (child_fn);
1360
1361 if (optimize)
1362 optimize_omp_library_calls (entry_stmt);
1363 cgraph_edge::rebuild_edges ();
1364
1365 /* Some EH regions might become dead, see PR34608. If
1366 pass_cleanup_cfg isn't the first pass to happen with the
1367 new child, these dead EH edges might cause problems.
1368 Clean them up now. */
1369 if (flag_exceptions)
1370 {
1371 basic_block bb;
1372 bool changed = false;
1373
1374 FOR_EACH_BB_FN (bb, cfun)
1375 changed |= gimple_purge_dead_eh_edges (bb);
1376 if (changed)
1377 cleanup_tree_cfg ();
1378 }
1379 if (gimple_in_ssa_p (cfun))
1380 update_ssa (TODO_update_ssa);
1381 if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1382 verify_loop_structure ();
1383 pop_cfun ();
1384
1385 if (dump_file && !gimple_in_ssa_p (cfun))
1386 {
1387 omp_any_child_fn_dumped = true;
1388 dump_function_header (dump_file, child_fn, dump_flags);
1389 dump_function_to_file (child_fn, dump_file, dump_flags);
1390 }
1391 }
1392
1393 /* Emit a library call to launch the children threads. */
1394 if (is_cilk_for)
1395 expand_cilk_for_call (new_bb,
1396 as_a <gomp_parallel *> (entry_stmt), ws_args);
1397 else if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
1398 expand_parallel_call (region, new_bb,
1399 as_a <gomp_parallel *> (entry_stmt), ws_args);
1400 else
1401 expand_task_call (region, new_bb, as_a <gomp_task *> (entry_stmt));
1402 if (gimple_in_ssa_p (cfun))
1403 update_ssa (TODO_update_ssa_only_virtuals);
1404 }
1405
1406 /* Information about members of an OpenACC collapsed loop nest. */
1407
1408 struct oacc_collapse
1409 {
1410 tree base; /* Base value. */
1411 tree iters; /* Number of steps. */
1412 tree step; /* step size. */
1413 };
1414
1415 /* Helper for expand_oacc_for. Determine collapsed loop information.
1416 Fill in COUNTS array. Emit any initialization code before GSI.
1417 Return the calculated outer loop bound of BOUND_TYPE. */
1418
1419 static tree
1420 expand_oacc_collapse_init (const struct omp_for_data *fd,
1421 gimple_stmt_iterator *gsi,
1422 oacc_collapse *counts, tree bound_type)
1423 {
1424 tree total = build_int_cst (bound_type, 1);
1425 int ix;
1426
1427 gcc_assert (integer_onep (fd->loop.step));
1428 gcc_assert (integer_zerop (fd->loop.n1));
1429
1430 for (ix = 0; ix != fd->collapse; ix++)
1431 {
1432 const omp_for_data_loop *loop = &fd->loops[ix];
1433
1434 tree iter_type = TREE_TYPE (loop->v);
1435 tree diff_type = iter_type;
1436 tree plus_type = iter_type;
1437
1438 gcc_assert (loop->cond_code == fd->loop.cond_code);
1439
1440 if (POINTER_TYPE_P (iter_type))
1441 plus_type = sizetype;
1442 if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type))
1443 diff_type = signed_type_for (diff_type);
1444
1445 tree b = loop->n1;
1446 tree e = loop->n2;
1447 tree s = loop->step;
1448 bool up = loop->cond_code == LT_EXPR;
1449 tree dir = build_int_cst (diff_type, up ? +1 : -1);
1450 bool negating;
1451 tree expr;
1452
1453 b = force_gimple_operand_gsi (gsi, b, true, NULL_TREE,
1454 true, GSI_SAME_STMT);
1455 e = force_gimple_operand_gsi (gsi, e, true, NULL_TREE,
1456 true, GSI_SAME_STMT);
1457
1458 /* Convert the step, avoiding possible unsigned->signed overflow. */
1459 negating = !up && TYPE_UNSIGNED (TREE_TYPE (s));
1460 if (negating)
1461 s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s);
1462 s = fold_convert (diff_type, s);
1463 if (negating)
1464 s = fold_build1 (NEGATE_EXPR, diff_type, s);
1465 s = force_gimple_operand_gsi (gsi, s, true, NULL_TREE,
1466 true, GSI_SAME_STMT);
1467
1468 /* Determine the range, avoiding possible unsigned->signed overflow. */
1469 negating = !up && TYPE_UNSIGNED (iter_type);
1470 expr = fold_build2 (MINUS_EXPR, plus_type,
1471 fold_convert (plus_type, negating ? b : e),
1472 fold_convert (plus_type, negating ? e : b));
1473 expr = fold_convert (diff_type, expr);
1474 if (negating)
1475 expr = fold_build1 (NEGATE_EXPR, diff_type, expr);
1476 tree range = force_gimple_operand_gsi
1477 (gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT);
1478
1479 /* Determine number of iterations. */
1480 expr = fold_build2 (MINUS_EXPR, diff_type, range, dir);
1481 expr = fold_build2 (PLUS_EXPR, diff_type, expr, s);
1482 expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s);
1483
1484 tree iters = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE,
1485 true, GSI_SAME_STMT);
1486
1487 counts[ix].base = b;
1488 counts[ix].iters = iters;
1489 counts[ix].step = s;
1490
1491 total = fold_build2 (MULT_EXPR, bound_type, total,
1492 fold_convert (bound_type, iters));
1493 }
1494
1495 return total;
1496 }
1497
1498 /* Emit initializers for collapsed loop members. IVAR is the outer
1499 loop iteration variable, from which collapsed loop iteration values
1500 are calculated. COUNTS array has been initialized by
1501 expand_oacc_collapse_inits. */
1502
1503 static void
1504 expand_oacc_collapse_vars (const struct omp_for_data *fd,
1505 gimple_stmt_iterator *gsi,
1506 const oacc_collapse *counts, tree ivar)
1507 {
1508 tree ivar_type = TREE_TYPE (ivar);
1509
1510 /* The most rapidly changing iteration variable is the innermost
1511 one. */
1512 for (int ix = fd->collapse; ix--;)
1513 {
1514 const omp_for_data_loop *loop = &fd->loops[ix];
1515 const oacc_collapse *collapse = &counts[ix];
1516 tree iter_type = TREE_TYPE (loop->v);
1517 tree diff_type = TREE_TYPE (collapse->step);
1518 tree plus_type = iter_type;
1519 enum tree_code plus_code = PLUS_EXPR;
1520 tree expr;
1521
1522 if (POINTER_TYPE_P (iter_type))
1523 {
1524 plus_code = POINTER_PLUS_EXPR;
1525 plus_type = sizetype;
1526 }
1527
1528 expr = fold_build2 (TRUNC_MOD_EXPR, ivar_type, ivar,
1529 fold_convert (ivar_type, collapse->iters));
1530 expr = fold_build2 (MULT_EXPR, diff_type, fold_convert (diff_type, expr),
1531 collapse->step);
1532 expr = fold_build2 (plus_code, iter_type, collapse->base,
1533 fold_convert (plus_type, expr));
1534 expr = force_gimple_operand_gsi (gsi, expr, false, NULL_TREE,
1535 true, GSI_SAME_STMT);
1536 gassign *ass = gimple_build_assign (loop->v, expr);
1537 gsi_insert_before (gsi, ass, GSI_SAME_STMT);
1538
1539 if (ix)
1540 {
1541 expr = fold_build2 (TRUNC_DIV_EXPR, ivar_type, ivar,
1542 fold_convert (ivar_type, collapse->iters));
1543 ivar = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE,
1544 true, GSI_SAME_STMT);
1545 }
1546 }
1547 }
1548
1549 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
1550 of the combined collapse > 1 loop constructs, generate code like:
1551 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
1552 if (cond3 is <)
1553 adj = STEP3 - 1;
1554 else
1555 adj = STEP3 + 1;
1556 count3 = (adj + N32 - N31) / STEP3;
1557 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
1558 if (cond2 is <)
1559 adj = STEP2 - 1;
1560 else
1561 adj = STEP2 + 1;
1562 count2 = (adj + N22 - N21) / STEP2;
1563 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
1564 if (cond1 is <)
1565 adj = STEP1 - 1;
1566 else
1567 adj = STEP1 + 1;
1568 count1 = (adj + N12 - N11) / STEP1;
1569 count = count1 * count2 * count3;
1570 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
1571 count = 0;
1572 and set ZERO_ITER_BB to that bb. If this isn't the outermost
1573 of the combined loop constructs, just initialize COUNTS array
1574 from the _looptemp_ clauses. */
1575
1576 /* NOTE: It *could* be better to moosh all of the BBs together,
1577 creating one larger BB with all the computation and the unexpected
1578 jump at the end. I.e.
1579
1580 bool zero3, zero2, zero1, zero;
1581
1582 zero3 = N32 c3 N31;
1583 count3 = (N32 - N31) /[cl] STEP3;
1584 zero2 = N22 c2 N21;
1585 count2 = (N22 - N21) /[cl] STEP2;
1586 zero1 = N12 c1 N11;
1587 count1 = (N12 - N11) /[cl] STEP1;
1588 zero = zero3 || zero2 || zero1;
1589 count = count1 * count2 * count3;
1590 if (__builtin_expect(zero, false)) goto zero_iter_bb;
1591
1592 After all, we expect the zero=false, and thus we expect to have to
1593 evaluate all of the comparison expressions, so short-circuiting
1594 oughtn't be a win. Since the condition isn't protecting a
1595 denominator, we're not concerned about divide-by-zero, so we can
1596 fully evaluate count even if a numerator turned out to be wrong.
1597
1598 It seems like putting this all together would create much better
1599 scheduling opportunities, and less pressure on the chip's branch
1600 predictor. */
1601
1602 static void
1603 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
1604 basic_block &entry_bb, tree *counts,
1605 basic_block &zero_iter1_bb, int &first_zero_iter1,
1606 basic_block &zero_iter2_bb, int &first_zero_iter2,
1607 basic_block &l2_dom_bb)
1608 {
1609 tree t, type = TREE_TYPE (fd->loop.v);
1610 edge e, ne;
1611 int i;
1612
1613 /* Collapsed loops need work for expansion into SSA form. */
1614 gcc_assert (!gimple_in_ssa_p (cfun));
1615
1616 if (gimple_omp_for_combined_into_p (fd->for_stmt)
1617 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
1618 {
1619 gcc_assert (fd->ordered == 0);
1620 /* First two _looptemp_ clauses are for istart/iend, counts[0]
1621 isn't supposed to be handled, as the inner loop doesn't
1622 use it. */
1623 tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
1624 OMP_CLAUSE__LOOPTEMP_);
1625 gcc_assert (innerc);
1626 for (i = 0; i < fd->collapse; i++)
1627 {
1628 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
1629 OMP_CLAUSE__LOOPTEMP_);
1630 gcc_assert (innerc);
1631 if (i)
1632 counts[i] = OMP_CLAUSE_DECL (innerc);
1633 else
1634 counts[0] = NULL_TREE;
1635 }
1636 return;
1637 }
1638
1639 for (i = fd->collapse; i < fd->ordered; i++)
1640 {
1641 tree itype = TREE_TYPE (fd->loops[i].v);
1642 counts[i] = NULL_TREE;
1643 t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
1644 fold_convert (itype, fd->loops[i].n1),
1645 fold_convert (itype, fd->loops[i].n2));
1646 if (t && integer_zerop (t))
1647 {
1648 for (i = fd->collapse; i < fd->ordered; i++)
1649 counts[i] = build_int_cst (type, 0);
1650 break;
1651 }
1652 }
1653 for (i = 0; i < (fd->ordered ? fd->ordered : fd->collapse); i++)
1654 {
1655 tree itype = TREE_TYPE (fd->loops[i].v);
1656
1657 if (i >= fd->collapse && counts[i])
1658 continue;
1659 if ((SSA_VAR_P (fd->loop.n2) || i >= fd->collapse)
1660 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
1661 fold_convert (itype, fd->loops[i].n1),
1662 fold_convert (itype, fd->loops[i].n2)))
1663 == NULL_TREE || !integer_onep (t)))
1664 {
1665 gcond *cond_stmt;
1666 tree n1, n2;
1667 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
1668 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
1669 true, GSI_SAME_STMT);
1670 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
1671 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
1672 true, GSI_SAME_STMT);
1673 cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
1674 NULL_TREE, NULL_TREE);
1675 gsi_insert_before (gsi, cond_stmt, GSI_SAME_STMT);
1676 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
1677 expand_omp_regimplify_p, NULL, NULL)
1678 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
1679 expand_omp_regimplify_p, NULL, NULL))
1680 {
1681 *gsi = gsi_for_stmt (cond_stmt);
1682 gimple_regimplify_operands (cond_stmt, gsi);
1683 }
1684 e = split_block (entry_bb, cond_stmt);
1685 basic_block &zero_iter_bb
1686 = i < fd->collapse ? zero_iter1_bb : zero_iter2_bb;
1687 int &first_zero_iter
1688 = i < fd->collapse ? first_zero_iter1 : first_zero_iter2;
1689 if (zero_iter_bb == NULL)
1690 {
1691 gassign *assign_stmt;
1692 first_zero_iter = i;
1693 zero_iter_bb = create_empty_bb (entry_bb);
1694 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
1695 *gsi = gsi_after_labels (zero_iter_bb);
1696 if (i < fd->collapse)
1697 assign_stmt = gimple_build_assign (fd->loop.n2,
1698 build_zero_cst (type));
1699 else
1700 {
1701 counts[i] = create_tmp_reg (type, ".count");
1702 assign_stmt
1703 = gimple_build_assign (counts[i], build_zero_cst (type));
1704 }
1705 gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT);
1706 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
1707 entry_bb);
1708 }
1709 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
1710 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
1711 e->flags = EDGE_TRUE_VALUE;
1712 e->probability = REG_BR_PROB_BASE - ne->probability;
1713 if (l2_dom_bb == NULL)
1714 l2_dom_bb = entry_bb;
1715 entry_bb = e->dest;
1716 *gsi = gsi_last_bb (entry_bb);
1717 }
1718
1719 if (POINTER_TYPE_P (itype))
1720 itype = signed_type_for (itype);
1721 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
1722 ? -1 : 1));
1723 t = fold_build2 (PLUS_EXPR, itype,
1724 fold_convert (itype, fd->loops[i].step), t);
1725 t = fold_build2 (PLUS_EXPR, itype, t,
1726 fold_convert (itype, fd->loops[i].n2));
1727 t = fold_build2 (MINUS_EXPR, itype, t,
1728 fold_convert (itype, fd->loops[i].n1));
1729 /* ?? We could probably use CEIL_DIV_EXPR instead of
1730 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
1731 generate the same code in the end because generically we
1732 don't know that the values involved must be negative for
1733 GT?? */
1734 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
1735 t = fold_build2 (TRUNC_DIV_EXPR, itype,
1736 fold_build1 (NEGATE_EXPR, itype, t),
1737 fold_build1 (NEGATE_EXPR, itype,
1738 fold_convert (itype,
1739 fd->loops[i].step)));
1740 else
1741 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
1742 fold_convert (itype, fd->loops[i].step));
1743 t = fold_convert (type, t);
1744 if (TREE_CODE (t) == INTEGER_CST)
1745 counts[i] = t;
1746 else
1747 {
1748 if (i < fd->collapse || i != first_zero_iter2)
1749 counts[i] = create_tmp_reg (type, ".count");
1750 expand_omp_build_assign (gsi, counts[i], t);
1751 }
1752 if (SSA_VAR_P (fd->loop.n2) && i < fd->collapse)
1753 {
1754 if (i == 0)
1755 t = counts[0];
1756 else
1757 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
1758 expand_omp_build_assign (gsi, fd->loop.n2, t);
1759 }
1760 }
1761 }
1762
1763 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
1764 T = V;
1765 V3 = N31 + (T % count3) * STEP3;
1766 T = T / count3;
1767 V2 = N21 + (T % count2) * STEP2;
1768 T = T / count2;
1769 V1 = N11 + T * STEP1;
1770 if this loop doesn't have an inner loop construct combined with it.
1771 If it does have an inner loop construct combined with it and the
1772 iteration count isn't known constant, store values from counts array
1773 into its _looptemp_ temporaries instead. */
1774
1775 static void
1776 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
1777 tree *counts, gimple *inner_stmt, tree startvar)
1778 {
1779 int i;
1780 if (gimple_omp_for_combined_p (fd->for_stmt))
1781 {
1782 /* If fd->loop.n2 is constant, then no propagation of the counts
1783 is needed, they are constant. */
1784 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
1785 return;
1786
1787 tree clauses = gimple_code (inner_stmt) != GIMPLE_OMP_FOR
1788 ? gimple_omp_taskreg_clauses (inner_stmt)
1789 : gimple_omp_for_clauses (inner_stmt);
1790 /* First two _looptemp_ clauses are for istart/iend, counts[0]
1791 isn't supposed to be handled, as the inner loop doesn't
1792 use it. */
1793 tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
1794 gcc_assert (innerc);
1795 for (i = 0; i < fd->collapse; i++)
1796 {
1797 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
1798 OMP_CLAUSE__LOOPTEMP_);
1799 gcc_assert (innerc);
1800 if (i)
1801 {
1802 tree tem = OMP_CLAUSE_DECL (innerc);
1803 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
1804 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
1805 false, GSI_CONTINUE_LINKING);
1806 gassign *stmt = gimple_build_assign (tem, t);
1807 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
1808 }
1809 }
1810 return;
1811 }
1812
1813 tree type = TREE_TYPE (fd->loop.v);
1814 tree tem = create_tmp_reg (type, ".tem");
1815 gassign *stmt = gimple_build_assign (tem, startvar);
1816 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
1817
1818 for (i = fd->collapse - 1; i >= 0; i--)
1819 {
1820 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
1821 itype = vtype;
1822 if (POINTER_TYPE_P (vtype))
1823 itype = signed_type_for (vtype);
1824 if (i != 0)
1825 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
1826 else
1827 t = tem;
1828 t = fold_convert (itype, t);
1829 t = fold_build2 (MULT_EXPR, itype, t,
1830 fold_convert (itype, fd->loops[i].step));
1831 if (POINTER_TYPE_P (vtype))
1832 t = fold_build_pointer_plus (fd->loops[i].n1, t);
1833 else
1834 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
1835 t = force_gimple_operand_gsi (gsi, t,
1836 DECL_P (fd->loops[i].v)
1837 && TREE_ADDRESSABLE (fd->loops[i].v),
1838 NULL_TREE, false,
1839 GSI_CONTINUE_LINKING);
1840 stmt = gimple_build_assign (fd->loops[i].v, t);
1841 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
1842 if (i != 0)
1843 {
1844 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
1845 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
1846 false, GSI_CONTINUE_LINKING);
1847 stmt = gimple_build_assign (tem, t);
1848 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
1849 }
1850 }
1851 }
1852
1853 /* Helper function for expand_omp_for_*. Generate code like:
1854 L10:
1855 V3 += STEP3;
1856 if (V3 cond3 N32) goto BODY_BB; else goto L11;
1857 L11:
1858 V3 = N31;
1859 V2 += STEP2;
1860 if (V2 cond2 N22) goto BODY_BB; else goto L12;
1861 L12:
1862 V2 = N21;
1863 V1 += STEP1;
1864 goto BODY_BB; */
1865
1866 static basic_block
1867 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
1868 basic_block body_bb)
1869 {
1870 basic_block last_bb, bb, collapse_bb = NULL;
1871 int i;
1872 gimple_stmt_iterator gsi;
1873 edge e;
1874 tree t;
1875 gimple *stmt;
1876
1877 last_bb = cont_bb;
1878 for (i = fd->collapse - 1; i >= 0; i--)
1879 {
1880 tree vtype = TREE_TYPE (fd->loops[i].v);
1881
1882 bb = create_empty_bb (last_bb);
1883 add_bb_to_loop (bb, last_bb->loop_father);
1884 gsi = gsi_start_bb (bb);
1885
1886 if (i < fd->collapse - 1)
1887 {
1888 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
1889 e->probability = REG_BR_PROB_BASE / 8;
1890
1891 t = fd->loops[i + 1].n1;
1892 t = force_gimple_operand_gsi (&gsi, t,
1893 DECL_P (fd->loops[i + 1].v)
1894 && TREE_ADDRESSABLE (fd->loops[i
1895 + 1].v),
1896 NULL_TREE, false,
1897 GSI_CONTINUE_LINKING);
1898 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
1899 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1900 }
1901 else
1902 collapse_bb = bb;
1903
1904 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
1905
1906 if (POINTER_TYPE_P (vtype))
1907 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
1908 else
1909 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
1910 t = force_gimple_operand_gsi (&gsi, t,
1911 DECL_P (fd->loops[i].v)
1912 && TREE_ADDRESSABLE (fd->loops[i].v),
1913 NULL_TREE, false, GSI_CONTINUE_LINKING);
1914 stmt = gimple_build_assign (fd->loops[i].v, t);
1915 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1916
1917 if (i > 0)
1918 {
1919 t = fd->loops[i].n2;
1920 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
1921 false, GSI_CONTINUE_LINKING);
1922 tree v = fd->loops[i].v;
1923 if (DECL_P (v) && TREE_ADDRESSABLE (v))
1924 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
1925 false, GSI_CONTINUE_LINKING);
1926 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
1927 stmt = gimple_build_cond_empty (t);
1928 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1929 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
1930 e->probability = REG_BR_PROB_BASE * 7 / 8;
1931 }
1932 else
1933 make_edge (bb, body_bb, EDGE_FALLTHRU);
1934 last_bb = bb;
1935 }
1936
1937 return collapse_bb;
1938 }
1939
1940 /* Expand #pragma omp ordered depend(source). */
1941
1942 static void
1943 expand_omp_ordered_source (gimple_stmt_iterator *gsi, struct omp_for_data *fd,
1944 tree *counts, location_t loc)
1945 {
1946 enum built_in_function source_ix
1947 = fd->iter_type == long_integer_type_node
1948 ? BUILT_IN_GOMP_DOACROSS_POST : BUILT_IN_GOMP_DOACROSS_ULL_POST;
1949 gimple *g
1950 = gimple_build_call (builtin_decl_explicit (source_ix), 1,
1951 build_fold_addr_expr (counts[fd->ordered]));
1952 gimple_set_location (g, loc);
1953 gsi_insert_before (gsi, g, GSI_SAME_STMT);
1954 }
1955
1956 /* Expand a single depend from #pragma omp ordered depend(sink:...). */
1957
1958 static void
1959 expand_omp_ordered_sink (gimple_stmt_iterator *gsi, struct omp_for_data *fd,
1960 tree *counts, tree c, location_t loc)
1961 {
1962 auto_vec<tree, 10> args;
1963 enum built_in_function sink_ix
1964 = fd->iter_type == long_integer_type_node
1965 ? BUILT_IN_GOMP_DOACROSS_WAIT : BUILT_IN_GOMP_DOACROSS_ULL_WAIT;
1966 tree t, off, coff = NULL_TREE, deps = OMP_CLAUSE_DECL (c), cond = NULL_TREE;
1967 int i;
1968 gimple_stmt_iterator gsi2 = *gsi;
1969 bool warned_step = false;
1970
1971 for (i = 0; i < fd->ordered; i++)
1972 {
1973 tree step = NULL_TREE;
1974 off = TREE_PURPOSE (deps);
1975 if (TREE_CODE (off) == TRUNC_DIV_EXPR)
1976 {
1977 step = TREE_OPERAND (off, 1);
1978 off = TREE_OPERAND (off, 0);
1979 }
1980 if (!integer_zerop (off))
1981 {
1982 gcc_assert (fd->loops[i].cond_code == LT_EXPR
1983 || fd->loops[i].cond_code == GT_EXPR);
1984 bool forward = fd->loops[i].cond_code == LT_EXPR;
1985 if (step)
1986 {
1987 /* Non-simple Fortran DO loops. If step is variable,
1988 we don't know at compile even the direction, so can't
1989 warn. */
1990 if (TREE_CODE (step) != INTEGER_CST)
1991 break;
1992 forward = tree_int_cst_sgn (step) != -1;
1993 }
1994 if (forward ^ OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
1995 warning_at (loc, 0, "%<depend(sink)%> clause waiting for "
1996 "lexically later iteration");
1997 break;
1998 }
1999 deps = TREE_CHAIN (deps);
2000 }
2001 /* If all offsets corresponding to the collapsed loops are zero,
2002 this depend clause can be ignored. FIXME: but there is still a
2003 flush needed. We need to emit one __sync_synchronize () for it
2004 though (perhaps conditionally)? Solve this together with the
2005 conservative dependence folding optimization.
2006 if (i >= fd->collapse)
2007 return; */
2008
2009 deps = OMP_CLAUSE_DECL (c);
2010 gsi_prev (&gsi2);
2011 edge e1 = split_block (gsi_bb (gsi2), gsi_stmt (gsi2));
2012 edge e2 = split_block_after_labels (e1->dest);
2013
2014 gsi2 = gsi_after_labels (e1->dest);
2015 *gsi = gsi_last_bb (e1->src);
2016 for (i = 0; i < fd->ordered; i++)
2017 {
2018 tree itype = TREE_TYPE (fd->loops[i].v);
2019 tree step = NULL_TREE;
2020 tree orig_off = NULL_TREE;
2021 if (POINTER_TYPE_P (itype))
2022 itype = sizetype;
2023 if (i)
2024 deps = TREE_CHAIN (deps);
2025 off = TREE_PURPOSE (deps);
2026 if (TREE_CODE (off) == TRUNC_DIV_EXPR)
2027 {
2028 step = TREE_OPERAND (off, 1);
2029 off = TREE_OPERAND (off, 0);
2030 gcc_assert (fd->loops[i].cond_code == LT_EXPR
2031 && integer_onep (fd->loops[i].step)
2032 && !POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)));
2033 }
2034 tree s = fold_convert_loc (loc, itype, step ? step : fd->loops[i].step);
2035 if (step)
2036 {
2037 off = fold_convert_loc (loc, itype, off);
2038 orig_off = off;
2039 off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s);
2040 }
2041
2042 if (integer_zerop (off))
2043 t = boolean_true_node;
2044 else
2045 {
2046 tree a;
2047 tree co = fold_convert_loc (loc, itype, off);
2048 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
2049 {
2050 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2051 co = fold_build1_loc (loc, NEGATE_EXPR, itype, co);
2052 a = fold_build2_loc (loc, POINTER_PLUS_EXPR,
2053 TREE_TYPE (fd->loops[i].v), fd->loops[i].v,
2054 co);
2055 }
2056 else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2057 a = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
2058 fd->loops[i].v, co);
2059 else
2060 a = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
2061 fd->loops[i].v, co);
2062 if (step)
2063 {
2064 tree t1, t2;
2065 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2066 t1 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a,
2067 fd->loops[i].n1);
2068 else
2069 t1 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a,
2070 fd->loops[i].n2);
2071 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2072 t2 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a,
2073 fd->loops[i].n2);
2074 else
2075 t2 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a,
2076 fd->loops[i].n1);
2077 t = fold_build2_loc (loc, LT_EXPR, boolean_type_node,
2078 step, build_int_cst (TREE_TYPE (step), 0));
2079 if (TREE_CODE (step) != INTEGER_CST)
2080 {
2081 t1 = unshare_expr (t1);
2082 t1 = force_gimple_operand_gsi (gsi, t1, true, NULL_TREE,
2083 false, GSI_CONTINUE_LINKING);
2084 t2 = unshare_expr (t2);
2085 t2 = force_gimple_operand_gsi (gsi, t2, true, NULL_TREE,
2086 false, GSI_CONTINUE_LINKING);
2087 }
2088 t = fold_build3_loc (loc, COND_EXPR, boolean_type_node,
2089 t, t2, t1);
2090 }
2091 else if (fd->loops[i].cond_code == LT_EXPR)
2092 {
2093 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2094 t = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a,
2095 fd->loops[i].n1);
2096 else
2097 t = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a,
2098 fd->loops[i].n2);
2099 }
2100 else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2101 t = fold_build2_loc (loc, GT_EXPR, boolean_type_node, a,
2102 fd->loops[i].n2);
2103 else
2104 t = fold_build2_loc (loc, LE_EXPR, boolean_type_node, a,
2105 fd->loops[i].n1);
2106 }
2107 if (cond)
2108 cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node, cond, t);
2109 else
2110 cond = t;
2111
2112 off = fold_convert_loc (loc, itype, off);
2113
2114 if (step
2115 || (fd->loops[i].cond_code == LT_EXPR
2116 ? !integer_onep (fd->loops[i].step)
2117 : !integer_minus_onep (fd->loops[i].step)))
2118 {
2119 if (step == NULL_TREE
2120 && TYPE_UNSIGNED (itype)
2121 && fd->loops[i].cond_code == GT_EXPR)
2122 t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype, off,
2123 fold_build1_loc (loc, NEGATE_EXPR, itype,
2124 s));
2125 else
2126 t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype,
2127 orig_off ? orig_off : off, s);
2128 t = fold_build2_loc (loc, EQ_EXPR, boolean_type_node, t,
2129 build_int_cst (itype, 0));
2130 if (integer_zerop (t) && !warned_step)
2131 {
2132 warning_at (loc, 0, "%<depend(sink)%> refers to iteration never "
2133 "in the iteration space");
2134 warned_step = true;
2135 }
2136 cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node,
2137 cond, t);
2138 }
2139
2140 if (i <= fd->collapse - 1 && fd->collapse > 1)
2141 t = fd->loop.v;
2142 else if (counts[i])
2143 t = counts[i];
2144 else
2145 {
2146 t = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
2147 fd->loops[i].v, fd->loops[i].n1);
2148 t = fold_convert_loc (loc, fd->iter_type, t);
2149 }
2150 if (step)
2151 /* We have divided off by step already earlier. */;
2152 else if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
2153 off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off,
2154 fold_build1_loc (loc, NEGATE_EXPR, itype,
2155 s));
2156 else
2157 off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s);
2158 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2159 off = fold_build1_loc (loc, NEGATE_EXPR, itype, off);
2160 off = fold_convert_loc (loc, fd->iter_type, off);
2161 if (i <= fd->collapse - 1 && fd->collapse > 1)
2162 {
2163 if (i)
2164 off = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, coff,
2165 off);
2166 if (i < fd->collapse - 1)
2167 {
2168 coff = fold_build2_loc (loc, MULT_EXPR, fd->iter_type, off,
2169 counts[i]);
2170 continue;
2171 }
2172 }
2173 off = unshare_expr (off);
2174 t = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, t, off);
2175 t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
2176 true, GSI_SAME_STMT);
2177 args.safe_push (t);
2178 }
2179 gimple *g = gimple_build_call_vec (builtin_decl_explicit (sink_ix), args);
2180 gimple_set_location (g, loc);
2181 gsi_insert_before (&gsi2, g, GSI_SAME_STMT);
2182
2183 cond = unshare_expr (cond);
2184 cond = force_gimple_operand_gsi (gsi, cond, true, NULL_TREE, false,
2185 GSI_CONTINUE_LINKING);
2186 gsi_insert_after (gsi, gimple_build_cond_empty (cond), GSI_NEW_STMT);
2187 edge e3 = make_edge (e1->src, e2->dest, EDGE_FALSE_VALUE);
2188 e3->probability = REG_BR_PROB_BASE / 8;
2189 e1->probability = REG_BR_PROB_BASE - e3->probability;
2190 e1->flags = EDGE_TRUE_VALUE;
2191 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src);
2192
2193 *gsi = gsi_after_labels (e2->dest);
2194 }
2195
2196 /* Expand all #pragma omp ordered depend(source) and
2197 #pragma omp ordered depend(sink:...) constructs in the current
2198 #pragma omp for ordered(n) region. */
2199
2200 static void
2201 expand_omp_ordered_source_sink (struct omp_region *region,
2202 struct omp_for_data *fd, tree *counts,
2203 basic_block cont_bb)
2204 {
2205 struct omp_region *inner;
2206 int i;
2207 for (i = fd->collapse - 1; i < fd->ordered; i++)
2208 if (i == fd->collapse - 1 && fd->collapse > 1)
2209 counts[i] = NULL_TREE;
2210 else if (i >= fd->collapse && !cont_bb)
2211 counts[i] = build_zero_cst (fd->iter_type);
2212 else if (!POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))
2213 && integer_onep (fd->loops[i].step))
2214 counts[i] = NULL_TREE;
2215 else
2216 counts[i] = create_tmp_var (fd->iter_type, ".orditer");
2217 tree atype
2218 = build_array_type_nelts (fd->iter_type, fd->ordered - fd->collapse + 1);
2219 counts[fd->ordered] = create_tmp_var (atype, ".orditera");
2220 TREE_ADDRESSABLE (counts[fd->ordered]) = 1;
2221
2222 for (inner = region->inner; inner; inner = inner->next)
2223 if (inner->type == GIMPLE_OMP_ORDERED)
2224 {
2225 gomp_ordered *ord_stmt = inner->ord_stmt;
2226 gimple_stmt_iterator gsi = gsi_for_stmt (ord_stmt);
2227 location_t loc = gimple_location (ord_stmt);
2228 tree c;
2229 for (c = gimple_omp_ordered_clauses (ord_stmt);
2230 c; c = OMP_CLAUSE_CHAIN (c))
2231 if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE)
2232 break;
2233 if (c)
2234 expand_omp_ordered_source (&gsi, fd, counts, loc);
2235 for (c = gimple_omp_ordered_clauses (ord_stmt);
2236 c; c = OMP_CLAUSE_CHAIN (c))
2237 if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
2238 expand_omp_ordered_sink (&gsi, fd, counts, c, loc);
2239 gsi_remove (&gsi, true);
2240 }
2241 }
2242
2243 /* Wrap the body into fd->ordered - fd->collapse loops that aren't
2244 collapsed. */
2245
2246 static basic_block
2247 expand_omp_for_ordered_loops (struct omp_for_data *fd, tree *counts,
2248 basic_block cont_bb, basic_block body_bb,
2249 bool ordered_lastprivate)
2250 {
2251 if (fd->ordered == fd->collapse)
2252 return cont_bb;
2253
2254 if (!cont_bb)
2255 {
2256 gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
2257 for (int i = fd->collapse; i < fd->ordered; i++)
2258 {
2259 tree type = TREE_TYPE (fd->loops[i].v);
2260 tree n1 = fold_convert (type, fd->loops[i].n1);
2261 expand_omp_build_assign (&gsi, fd->loops[i].v, n1);
2262 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
2263 size_int (i - fd->collapse + 1),
2264 NULL_TREE, NULL_TREE);
2265 expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type));
2266 }
2267 return NULL;
2268 }
2269
2270 for (int i = fd->ordered - 1; i >= fd->collapse; i--)
2271 {
2272 tree t, type = TREE_TYPE (fd->loops[i].v);
2273 gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
2274 expand_omp_build_assign (&gsi, fd->loops[i].v,
2275 fold_convert (type, fd->loops[i].n1));
2276 if (counts[i])
2277 expand_omp_build_assign (&gsi, counts[i],
2278 build_zero_cst (fd->iter_type));
2279 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
2280 size_int (i - fd->collapse + 1),
2281 NULL_TREE, NULL_TREE);
2282 expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type));
2283 if (!gsi_end_p (gsi))
2284 gsi_prev (&gsi);
2285 else
2286 gsi = gsi_last_bb (body_bb);
2287 edge e1 = split_block (body_bb, gsi_stmt (gsi));
2288 basic_block new_body = e1->dest;
2289 if (body_bb == cont_bb)
2290 cont_bb = new_body;
2291 edge e2 = NULL;
2292 basic_block new_header;
2293 if (EDGE_COUNT (cont_bb->preds) > 0)
2294 {
2295 gsi = gsi_last_bb (cont_bb);
2296 if (POINTER_TYPE_P (type))
2297 t = fold_build_pointer_plus (fd->loops[i].v,
2298 fold_convert (sizetype,
2299 fd->loops[i].step));
2300 else
2301 t = fold_build2 (PLUS_EXPR, type, fd->loops[i].v,
2302 fold_convert (type, fd->loops[i].step));
2303 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
2304 if (counts[i])
2305 {
2306 t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[i],
2307 build_int_cst (fd->iter_type, 1));
2308 expand_omp_build_assign (&gsi, counts[i], t);
2309 t = counts[i];
2310 }
2311 else
2312 {
2313 t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
2314 fd->loops[i].v, fd->loops[i].n1);
2315 t = fold_convert (fd->iter_type, t);
2316 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
2317 true, GSI_SAME_STMT);
2318 }
2319 aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
2320 size_int (i - fd->collapse + 1),
2321 NULL_TREE, NULL_TREE);
2322 expand_omp_build_assign (&gsi, aref, t);
2323 gsi_prev (&gsi);
2324 e2 = split_block (cont_bb, gsi_stmt (gsi));
2325 new_header = e2->dest;
2326 }
2327 else
2328 new_header = cont_bb;
2329 gsi = gsi_after_labels (new_header);
2330 tree v = force_gimple_operand_gsi (&gsi, fd->loops[i].v, true, NULL_TREE,
2331 true, GSI_SAME_STMT);
2332 tree n2
2333 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loops[i].n2),
2334 true, NULL_TREE, true, GSI_SAME_STMT);
2335 t = build2 (fd->loops[i].cond_code, boolean_type_node, v, n2);
2336 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_NEW_STMT);
2337 edge e3 = split_block (new_header, gsi_stmt (gsi));
2338 cont_bb = e3->dest;
2339 remove_edge (e1);
2340 make_edge (body_bb, new_header, EDGE_FALLTHRU);
2341 e3->flags = EDGE_FALSE_VALUE;
2342 e3->probability = REG_BR_PROB_BASE / 8;
2343 e1 = make_edge (new_header, new_body, EDGE_TRUE_VALUE);
2344 e1->probability = REG_BR_PROB_BASE - e3->probability;
2345
2346 set_immediate_dominator (CDI_DOMINATORS, new_header, body_bb);
2347 set_immediate_dominator (CDI_DOMINATORS, new_body, new_header);
2348
2349 if (e2)
2350 {
2351 struct loop *loop = alloc_loop ();
2352 loop->header = new_header;
2353 loop->latch = e2->src;
2354 add_loop (loop, body_bb->loop_father);
2355 }
2356 }
2357
2358 /* If there are any lastprivate clauses and it is possible some loops
2359 might have zero iterations, ensure all the decls are initialized,
2360 otherwise we could crash evaluating C++ class iterators with lastprivate
2361 clauses. */
2362 bool need_inits = false;
2363 for (int i = fd->collapse; ordered_lastprivate && i < fd->ordered; i++)
2364 if (need_inits)
2365 {
2366 tree type = TREE_TYPE (fd->loops[i].v);
2367 gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
2368 expand_omp_build_assign (&gsi, fd->loops[i].v,
2369 fold_convert (type, fd->loops[i].n1));
2370 }
2371 else
2372 {
2373 tree type = TREE_TYPE (fd->loops[i].v);
2374 tree this_cond = fold_build2 (fd->loops[i].cond_code,
2375 boolean_type_node,
2376 fold_convert (type, fd->loops[i].n1),
2377 fold_convert (type, fd->loops[i].n2));
2378 if (!integer_onep (this_cond))
2379 need_inits = true;
2380 }
2381
2382 return cont_bb;
2383 }
2384
2385 /* A subroutine of expand_omp_for. Generate code for a parallel
2386 loop with any schedule. Given parameters:
2387
2388 for (V = N1; V cond N2; V += STEP) BODY;
2389
2390 where COND is "<" or ">", we generate pseudocode
2391
2392 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
2393 if (more) goto L0; else goto L3;
2394 L0:
2395 V = istart0;
2396 iend = iend0;
2397 L1:
2398 BODY;
2399 V += STEP;
2400 if (V cond iend) goto L1; else goto L2;
2401 L2:
2402 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
2403 L3:
2404
2405 If this is a combined omp parallel loop, instead of the call to
2406 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
2407 If this is gimple_omp_for_combined_p loop, then instead of assigning
2408 V and iend in L0 we assign the first two _looptemp_ clause decls of the
2409 inner GIMPLE_OMP_FOR and V += STEP; and
2410 if (V cond iend) goto L1; else goto L2; are removed.
2411
2412 For collapsed loops, given parameters:
2413 collapse(3)
2414 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
2415 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
2416 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
2417 BODY;
2418
2419 we generate pseudocode
2420
2421 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
2422 if (cond3 is <)
2423 adj = STEP3 - 1;
2424 else
2425 adj = STEP3 + 1;
2426 count3 = (adj + N32 - N31) / STEP3;
2427 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
2428 if (cond2 is <)
2429 adj = STEP2 - 1;
2430 else
2431 adj = STEP2 + 1;
2432 count2 = (adj + N22 - N21) / STEP2;
2433 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
2434 if (cond1 is <)
2435 adj = STEP1 - 1;
2436 else
2437 adj = STEP1 + 1;
2438 count1 = (adj + N12 - N11) / STEP1;
2439 count = count1 * count2 * count3;
2440 goto Z1;
2441 Z0:
2442 count = 0;
2443 Z1:
2444 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
2445 if (more) goto L0; else goto L3;
2446 L0:
2447 V = istart0;
2448 T = V;
2449 V3 = N31 + (T % count3) * STEP3;
2450 T = T / count3;
2451 V2 = N21 + (T % count2) * STEP2;
2452 T = T / count2;
2453 V1 = N11 + T * STEP1;
2454 iend = iend0;
2455 L1:
2456 BODY;
2457 V += 1;
2458 if (V < iend) goto L10; else goto L2;
2459 L10:
2460 V3 += STEP3;
2461 if (V3 cond3 N32) goto L1; else goto L11;
2462 L11:
2463 V3 = N31;
2464 V2 += STEP2;
2465 if (V2 cond2 N22) goto L1; else goto L12;
2466 L12:
2467 V2 = N21;
2468 V1 += STEP1;
2469 goto L1;
2470 L2:
2471 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
2472 L3:
2473
2474 */
2475
2476 static void
2477 expand_omp_for_generic (struct omp_region *region,
2478 struct omp_for_data *fd,
2479 enum built_in_function start_fn,
2480 enum built_in_function next_fn,
2481 gimple *inner_stmt)
2482 {
2483 tree type, istart0, iend0, iend;
2484 tree t, vmain, vback, bias = NULL_TREE;
2485 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
2486 basic_block l2_bb = NULL, l3_bb = NULL;
2487 gimple_stmt_iterator gsi;
2488 gassign *assign_stmt;
2489 bool in_combined_parallel = is_combined_parallel (region);
2490 bool broken_loop = region->cont == NULL;
2491 edge e, ne;
2492 tree *counts = NULL;
2493 int i;
2494 bool ordered_lastprivate = false;
2495
2496 gcc_assert (!broken_loop || !in_combined_parallel);
2497 gcc_assert (fd->iter_type == long_integer_type_node
2498 || !in_combined_parallel);
2499
2500 entry_bb = region->entry;
2501 cont_bb = region->cont;
2502 collapse_bb = NULL;
2503 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
2504 gcc_assert (broken_loop
2505 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
2506 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
2507 l1_bb = single_succ (l0_bb);
2508 if (!broken_loop)
2509 {
2510 l2_bb = create_empty_bb (cont_bb);
2511 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb
2512 || (single_succ_edge (BRANCH_EDGE (cont_bb)->dest)->dest
2513 == l1_bb));
2514 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
2515 }
2516 else
2517 l2_bb = NULL;
2518 l3_bb = BRANCH_EDGE (entry_bb)->dest;
2519 exit_bb = region->exit;
2520
2521 gsi = gsi_last_bb (entry_bb);
2522
2523 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
2524 if (fd->ordered
2525 && omp_find_clause (gimple_omp_for_clauses (gsi_stmt (gsi)),
2526 OMP_CLAUSE_LASTPRIVATE))
2527 ordered_lastprivate = false;
2528 if (fd->collapse > 1 || fd->ordered)
2529 {
2530 int first_zero_iter1 = -1, first_zero_iter2 = -1;
2531 basic_block zero_iter1_bb = NULL, zero_iter2_bb = NULL, l2_dom_bb = NULL;
2532
2533 counts = XALLOCAVEC (tree, fd->ordered ? fd->ordered + 1 : fd->collapse);
2534 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
2535 zero_iter1_bb, first_zero_iter1,
2536 zero_iter2_bb, first_zero_iter2, l2_dom_bb);
2537
2538 if (zero_iter1_bb)
2539 {
2540 /* Some counts[i] vars might be uninitialized if
2541 some loop has zero iterations. But the body shouldn't
2542 be executed in that case, so just avoid uninit warnings. */
2543 for (i = first_zero_iter1;
2544 i < (fd->ordered ? fd->ordered : fd->collapse); i++)
2545 if (SSA_VAR_P (counts[i]))
2546 TREE_NO_WARNING (counts[i]) = 1;
2547 gsi_prev (&gsi);
2548 e = split_block (entry_bb, gsi_stmt (gsi));
2549 entry_bb = e->dest;
2550 make_edge (zero_iter1_bb, entry_bb, EDGE_FALLTHRU);
2551 gsi = gsi_last_bb (entry_bb);
2552 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
2553 get_immediate_dominator (CDI_DOMINATORS,
2554 zero_iter1_bb));
2555 }
2556 if (zero_iter2_bb)
2557 {
2558 /* Some counts[i] vars might be uninitialized if
2559 some loop has zero iterations. But the body shouldn't
2560 be executed in that case, so just avoid uninit warnings. */
2561 for (i = first_zero_iter2; i < fd->ordered; i++)
2562 if (SSA_VAR_P (counts[i]))
2563 TREE_NO_WARNING (counts[i]) = 1;
2564 if (zero_iter1_bb)
2565 make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU);
2566 else
2567 {
2568 gsi_prev (&gsi);
2569 e = split_block (entry_bb, gsi_stmt (gsi));
2570 entry_bb = e->dest;
2571 make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU);
2572 gsi = gsi_last_bb (entry_bb);
2573 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
2574 get_immediate_dominator
2575 (CDI_DOMINATORS, zero_iter2_bb));
2576 }
2577 }
2578 if (fd->collapse == 1)
2579 {
2580 counts[0] = fd->loop.n2;
2581 fd->loop = fd->loops[0];
2582 }
2583 }
2584
2585 type = TREE_TYPE (fd->loop.v);
2586 istart0 = create_tmp_var (fd->iter_type, ".istart0");
2587 iend0 = create_tmp_var (fd->iter_type, ".iend0");
2588 TREE_ADDRESSABLE (istart0) = 1;
2589 TREE_ADDRESSABLE (iend0) = 1;
2590
2591 /* See if we need to bias by LLONG_MIN. */
2592 if (fd->iter_type == long_long_unsigned_type_node
2593 && TREE_CODE (type) == INTEGER_TYPE
2594 && !TYPE_UNSIGNED (type)
2595 && fd->ordered == 0)
2596 {
2597 tree n1, n2;
2598
2599 if (fd->loop.cond_code == LT_EXPR)
2600 {
2601 n1 = fd->loop.n1;
2602 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
2603 }
2604 else
2605 {
2606 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
2607 n2 = fd->loop.n1;
2608 }
2609 if (TREE_CODE (n1) != INTEGER_CST
2610 || TREE_CODE (n2) != INTEGER_CST
2611 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
2612 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
2613 }
2614
2615 gimple_stmt_iterator gsif = gsi;
2616 gsi_prev (&gsif);
2617
2618 tree arr = NULL_TREE;
2619 if (in_combined_parallel)
2620 {
2621 gcc_assert (fd->ordered == 0);
2622 /* In a combined parallel loop, emit a call to
2623 GOMP_loop_foo_next. */
2624 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
2625 build_fold_addr_expr (istart0),
2626 build_fold_addr_expr (iend0));
2627 }
2628 else
2629 {
2630 tree t0, t1, t2, t3, t4;
2631 /* If this is not a combined parallel loop, emit a call to
2632 GOMP_loop_foo_start in ENTRY_BB. */
2633 t4 = build_fold_addr_expr (iend0);
2634 t3 = build_fold_addr_expr (istart0);
2635 if (fd->ordered)
2636 {
2637 t0 = build_int_cst (unsigned_type_node,
2638 fd->ordered - fd->collapse + 1);
2639 arr = create_tmp_var (build_array_type_nelts (fd->iter_type,
2640 fd->ordered
2641 - fd->collapse + 1),
2642 ".omp_counts");
2643 DECL_NAMELESS (arr) = 1;
2644 TREE_ADDRESSABLE (arr) = 1;
2645 TREE_STATIC (arr) = 1;
2646 vec<constructor_elt, va_gc> *v;
2647 vec_alloc (v, fd->ordered - fd->collapse + 1);
2648 int idx;
2649
2650 for (idx = 0; idx < fd->ordered - fd->collapse + 1; idx++)
2651 {
2652 tree c;
2653 if (idx == 0 && fd->collapse > 1)
2654 c = fd->loop.n2;
2655 else
2656 c = counts[idx + fd->collapse - 1];
2657 tree purpose = size_int (idx);
2658 CONSTRUCTOR_APPEND_ELT (v, purpose, c);
2659 if (TREE_CODE (c) != INTEGER_CST)
2660 TREE_STATIC (arr) = 0;
2661 }
2662
2663 DECL_INITIAL (arr) = build_constructor (TREE_TYPE (arr), v);
2664 if (!TREE_STATIC (arr))
2665 force_gimple_operand_gsi (&gsi, build1 (DECL_EXPR,
2666 void_type_node, arr),
2667 true, NULL_TREE, true, GSI_SAME_STMT);
2668 t1 = build_fold_addr_expr (arr);
2669 t2 = NULL_TREE;
2670 }
2671 else
2672 {
2673 t2 = fold_convert (fd->iter_type, fd->loop.step);
2674 t1 = fd->loop.n2;
2675 t0 = fd->loop.n1;
2676 if (gimple_omp_for_combined_into_p (fd->for_stmt))
2677 {
2678 tree innerc
2679 = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
2680 OMP_CLAUSE__LOOPTEMP_);
2681 gcc_assert (innerc);
2682 t0 = OMP_CLAUSE_DECL (innerc);
2683 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
2684 OMP_CLAUSE__LOOPTEMP_);
2685 gcc_assert (innerc);
2686 t1 = OMP_CLAUSE_DECL (innerc);
2687 }
2688 if (POINTER_TYPE_P (TREE_TYPE (t0))
2689 && TYPE_PRECISION (TREE_TYPE (t0))
2690 != TYPE_PRECISION (fd->iter_type))
2691 {
2692 /* Avoid casting pointers to integer of a different size. */
2693 tree itype = signed_type_for (type);
2694 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
2695 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
2696 }
2697 else
2698 {
2699 t1 = fold_convert (fd->iter_type, t1);
2700 t0 = fold_convert (fd->iter_type, t0);
2701 }
2702 if (bias)
2703 {
2704 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
2705 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
2706 }
2707 }
2708 if (fd->iter_type == long_integer_type_node || fd->ordered)
2709 {
2710 if (fd->chunk_size)
2711 {
2712 t = fold_convert (fd->iter_type, fd->chunk_size);
2713 t = omp_adjust_chunk_size (t, fd->simd_schedule);
2714 if (fd->ordered)
2715 t = build_call_expr (builtin_decl_explicit (start_fn),
2716 5, t0, t1, t, t3, t4);
2717 else
2718 t = build_call_expr (builtin_decl_explicit (start_fn),
2719 6, t0, t1, t2, t, t3, t4);
2720 }
2721 else if (fd->ordered)
2722 t = build_call_expr (builtin_decl_explicit (start_fn),
2723 4, t0, t1, t3, t4);
2724 else
2725 t = build_call_expr (builtin_decl_explicit (start_fn),
2726 5, t0, t1, t2, t3, t4);
2727 }
2728 else
2729 {
2730 tree t5;
2731 tree c_bool_type;
2732 tree bfn_decl;
2733
2734 /* The GOMP_loop_ull_*start functions have additional boolean
2735 argument, true for < loops and false for > loops.
2736 In Fortran, the C bool type can be different from
2737 boolean_type_node. */
2738 bfn_decl = builtin_decl_explicit (start_fn);
2739 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
2740 t5 = build_int_cst (c_bool_type,
2741 fd->loop.cond_code == LT_EXPR ? 1 : 0);
2742 if (fd->chunk_size)
2743 {
2744 tree bfn_decl = builtin_decl_explicit (start_fn);
2745 t = fold_convert (fd->iter_type, fd->chunk_size);
2746 t = omp_adjust_chunk_size (t, fd->simd_schedule);
2747 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
2748 }
2749 else
2750 t = build_call_expr (builtin_decl_explicit (start_fn),
2751 6, t5, t0, t1, t2, t3, t4);
2752 }
2753 }
2754 if (TREE_TYPE (t) != boolean_type_node)
2755 t = fold_build2 (NE_EXPR, boolean_type_node,
2756 t, build_int_cst (TREE_TYPE (t), 0));
2757 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
2758 true, GSI_SAME_STMT);
2759 if (arr && !TREE_STATIC (arr))
2760 {
2761 tree clobber = build_constructor (TREE_TYPE (arr), NULL);
2762 TREE_THIS_VOLATILE (clobber) = 1;
2763 gsi_insert_before (&gsi, gimple_build_assign (arr, clobber),
2764 GSI_SAME_STMT);
2765 }
2766 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
2767
2768 /* Remove the GIMPLE_OMP_FOR statement. */
2769 gsi_remove (&gsi, true);
2770
2771 if (gsi_end_p (gsif))
2772 gsif = gsi_after_labels (gsi_bb (gsif));
2773 gsi_next (&gsif);
2774
2775 /* Iteration setup for sequential loop goes in L0_BB. */
2776 tree startvar = fd->loop.v;
2777 tree endvar = NULL_TREE;
2778
2779 if (gimple_omp_for_combined_p (fd->for_stmt))
2780 {
2781 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
2782 && gimple_omp_for_kind (inner_stmt)
2783 == GF_OMP_FOR_KIND_SIMD);
2784 tree innerc = omp_find_clause (gimple_omp_for_clauses (inner_stmt),
2785 OMP_CLAUSE__LOOPTEMP_);
2786 gcc_assert (innerc);
2787 startvar = OMP_CLAUSE_DECL (innerc);
2788 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
2789 OMP_CLAUSE__LOOPTEMP_);
2790 gcc_assert (innerc);
2791 endvar = OMP_CLAUSE_DECL (innerc);
2792 }
2793
2794 gsi = gsi_start_bb (l0_bb);
2795 t = istart0;
2796 if (fd->ordered && fd->collapse == 1)
2797 t = fold_build2 (MULT_EXPR, fd->iter_type, t,
2798 fold_convert (fd->iter_type, fd->loop.step));
2799 else if (bias)
2800 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
2801 if (fd->ordered && fd->collapse == 1)
2802 {
2803 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
2804 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar),
2805 fd->loop.n1, fold_convert (sizetype, t));
2806 else
2807 {
2808 t = fold_convert (TREE_TYPE (startvar), t);
2809 t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar),
2810 fd->loop.n1, t);
2811 }
2812 }
2813 else
2814 {
2815 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
2816 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
2817 t = fold_convert (TREE_TYPE (startvar), t);
2818 }
2819 t = force_gimple_operand_gsi (&gsi, t,
2820 DECL_P (startvar)
2821 && TREE_ADDRESSABLE (startvar),
2822 NULL_TREE, false, GSI_CONTINUE_LINKING);
2823 assign_stmt = gimple_build_assign (startvar, t);
2824 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
2825
2826 t = iend0;
2827 if (fd->ordered && fd->collapse == 1)
2828 t = fold_build2 (MULT_EXPR, fd->iter_type, t,
2829 fold_convert (fd->iter_type, fd->loop.step));
2830 else if (bias)
2831 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
2832 if (fd->ordered && fd->collapse == 1)
2833 {
2834 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
2835 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar),
2836 fd->loop.n1, fold_convert (sizetype, t));
2837 else
2838 {
2839 t = fold_convert (TREE_TYPE (startvar), t);
2840 t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar),
2841 fd->loop.n1, t);
2842 }
2843 }
2844 else
2845 {
2846 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
2847 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
2848 t = fold_convert (TREE_TYPE (startvar), t);
2849 }
2850 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
2851 false, GSI_CONTINUE_LINKING);
2852 if (endvar)
2853 {
2854 assign_stmt = gimple_build_assign (endvar, iend);
2855 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
2856 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
2857 assign_stmt = gimple_build_assign (fd->loop.v, iend);
2858 else
2859 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, iend);
2860 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
2861 }
2862 /* Handle linear clause adjustments. */
2863 tree itercnt = NULL_TREE;
2864 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR)
2865 for (tree c = gimple_omp_for_clauses (fd->for_stmt);
2866 c; c = OMP_CLAUSE_CHAIN (c))
2867 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2868 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
2869 {
2870 tree d = OMP_CLAUSE_DECL (c);
2871 bool is_ref = omp_is_reference (d);
2872 tree t = d, a, dest;
2873 if (is_ref)
2874 t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t);
2875 tree type = TREE_TYPE (t);
2876 if (POINTER_TYPE_P (type))
2877 type = sizetype;
2878 dest = unshare_expr (t);
2879 tree v = create_tmp_var (TREE_TYPE (t), NULL);
2880 expand_omp_build_assign (&gsif, v, t);
2881 if (itercnt == NULL_TREE)
2882 {
2883 itercnt = startvar;
2884 tree n1 = fd->loop.n1;
2885 if (POINTER_TYPE_P (TREE_TYPE (itercnt)))
2886 {
2887 itercnt
2888 = fold_convert (signed_type_for (TREE_TYPE (itercnt)),
2889 itercnt);
2890 n1 = fold_convert (TREE_TYPE (itercnt), n1);
2891 }
2892 itercnt = fold_build2 (MINUS_EXPR, TREE_TYPE (itercnt),
2893 itercnt, n1);
2894 itercnt = fold_build2 (EXACT_DIV_EXPR, TREE_TYPE (itercnt),
2895 itercnt, fd->loop.step);
2896 itercnt = force_gimple_operand_gsi (&gsi, itercnt, true,
2897 NULL_TREE, false,
2898 GSI_CONTINUE_LINKING);
2899 }
2900 a = fold_build2 (MULT_EXPR, type,
2901 fold_convert (type, itercnt),
2902 fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c)));
2903 t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR
2904 : POINTER_PLUS_EXPR, TREE_TYPE (t), v, a);
2905 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
2906 false, GSI_CONTINUE_LINKING);
2907 assign_stmt = gimple_build_assign (dest, t);
2908 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
2909 }
2910 if (fd->collapse > 1)
2911 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
2912
2913 if (fd->ordered)
2914 {
2915 /* Until now, counts array contained number of iterations or
2916 variable containing it for ith loop. From now on, we need
2917 those counts only for collapsed loops, and only for the 2nd
2918 till the last collapsed one. Move those one element earlier,
2919 we'll use counts[fd->collapse - 1] for the first source/sink
2920 iteration counter and so on and counts[fd->ordered]
2921 as the array holding the current counter values for
2922 depend(source). */
2923 if (fd->collapse > 1)
2924 memmove (counts, counts + 1, (fd->collapse - 1) * sizeof (counts[0]));
2925 if (broken_loop)
2926 {
2927 int i;
2928 for (i = fd->collapse; i < fd->ordered; i++)
2929 {
2930 tree type = TREE_TYPE (fd->loops[i].v);
2931 tree this_cond
2932 = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
2933 fold_convert (type, fd->loops[i].n1),
2934 fold_convert (type, fd->loops[i].n2));
2935 if (!integer_onep (this_cond))
2936 break;
2937 }
2938 if (i < fd->ordered)
2939 {
2940 cont_bb
2941 = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
2942 add_bb_to_loop (cont_bb, l1_bb->loop_father);
2943 gimple_stmt_iterator gsi = gsi_after_labels (cont_bb);
2944 gimple *g = gimple_build_omp_continue (fd->loop.v, fd->loop.v);
2945 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2946 make_edge (cont_bb, l3_bb, EDGE_FALLTHRU);
2947 make_edge (cont_bb, l1_bb, 0);
2948 l2_bb = create_empty_bb (cont_bb);
2949 broken_loop = false;
2950 }
2951 }
2952 expand_omp_ordered_source_sink (region, fd, counts, cont_bb);
2953 cont_bb = expand_omp_for_ordered_loops (fd, counts, cont_bb, l1_bb,
2954 ordered_lastprivate);
2955 if (counts[fd->collapse - 1])
2956 {
2957 gcc_assert (fd->collapse == 1);
2958 gsi = gsi_last_bb (l0_bb);
2959 expand_omp_build_assign (&gsi, counts[fd->collapse - 1],
2960 istart0, true);
2961 gsi = gsi_last_bb (cont_bb);
2962 t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[fd->collapse - 1],
2963 build_int_cst (fd->iter_type, 1));
2964 expand_omp_build_assign (&gsi, counts[fd->collapse - 1], t);
2965 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
2966 size_zero_node, NULL_TREE, NULL_TREE);
2967 expand_omp_build_assign (&gsi, aref, counts[fd->collapse - 1]);
2968 t = counts[fd->collapse - 1];
2969 }
2970 else if (fd->collapse > 1)
2971 t = fd->loop.v;
2972 else
2973 {
2974 t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v),
2975 fd->loops[0].v, fd->loops[0].n1);
2976 t = fold_convert (fd->iter_type, t);
2977 }
2978 gsi = gsi_last_bb (l0_bb);
2979 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
2980 size_zero_node, NULL_TREE, NULL_TREE);
2981 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
2982 false, GSI_CONTINUE_LINKING);
2983 expand_omp_build_assign (&gsi, aref, t, true);
2984 }
2985
2986 if (!broken_loop)
2987 {
2988 /* Code to control the increment and predicate for the sequential
2989 loop goes in the CONT_BB. */
2990 gsi = gsi_last_bb (cont_bb);
2991 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
2992 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
2993 vmain = gimple_omp_continue_control_use (cont_stmt);
2994 vback = gimple_omp_continue_control_def (cont_stmt);
2995
2996 if (!gimple_omp_for_combined_p (fd->for_stmt))
2997 {
2998 if (POINTER_TYPE_P (type))
2999 t = fold_build_pointer_plus (vmain, fd->loop.step);
3000 else
3001 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3002 t = force_gimple_operand_gsi (&gsi, t,
3003 DECL_P (vback)
3004 && TREE_ADDRESSABLE (vback),
3005 NULL_TREE, true, GSI_SAME_STMT);
3006 assign_stmt = gimple_build_assign (vback, t);
3007 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
3008
3009 if (fd->ordered && counts[fd->collapse - 1] == NULL_TREE)
3010 {
3011 if (fd->collapse > 1)
3012 t = fd->loop.v;
3013 else
3014 {
3015 t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v),
3016 fd->loops[0].v, fd->loops[0].n1);
3017 t = fold_convert (fd->iter_type, t);
3018 }
3019 tree aref = build4 (ARRAY_REF, fd->iter_type,
3020 counts[fd->ordered], size_zero_node,
3021 NULL_TREE, NULL_TREE);
3022 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3023 true, GSI_SAME_STMT);
3024 expand_omp_build_assign (&gsi, aref, t);
3025 }
3026
3027 t = build2 (fd->loop.cond_code, boolean_type_node,
3028 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
3029 iend);
3030 gcond *cond_stmt = gimple_build_cond_empty (t);
3031 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
3032 }
3033
3034 /* Remove GIMPLE_OMP_CONTINUE. */
3035 gsi_remove (&gsi, true);
3036
3037 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
3038 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
3039
3040 /* Emit code to get the next parallel iteration in L2_BB. */
3041 gsi = gsi_start_bb (l2_bb);
3042
3043 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
3044 build_fold_addr_expr (istart0),
3045 build_fold_addr_expr (iend0));
3046 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3047 false, GSI_CONTINUE_LINKING);
3048 if (TREE_TYPE (t) != boolean_type_node)
3049 t = fold_build2 (NE_EXPR, boolean_type_node,
3050 t, build_int_cst (TREE_TYPE (t), 0));
3051 gcond *cond_stmt = gimple_build_cond_empty (t);
3052 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
3053 }
3054
3055 /* Add the loop cleanup function. */
3056 gsi = gsi_last_bb (exit_bb);
3057 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
3058 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
3059 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
3060 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
3061 else
3062 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
3063 gcall *call_stmt = gimple_build_call (t, 0);
3064 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
3065 gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
3066 gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT);
3067 if (fd->ordered)
3068 {
3069 tree arr = counts[fd->ordered];
3070 tree clobber = build_constructor (TREE_TYPE (arr), NULL);
3071 TREE_THIS_VOLATILE (clobber) = 1;
3072 gsi_insert_after (&gsi, gimple_build_assign (arr, clobber),
3073 GSI_SAME_STMT);
3074 }
3075 gsi_remove (&gsi, true);
3076
3077 /* Connect the new blocks. */
3078 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
3079 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
3080
3081 if (!broken_loop)
3082 {
3083 gimple_seq phis;
3084
3085 e = find_edge (cont_bb, l3_bb);
3086 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
3087
3088 phis = phi_nodes (l3_bb);
3089 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
3090 {
3091 gimple *phi = gsi_stmt (gsi);
3092 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
3093 PHI_ARG_DEF_FROM_EDGE (phi, e));
3094 }
3095 remove_edge (e);
3096
3097 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
3098 e = find_edge (cont_bb, l1_bb);
3099 if (e == NULL)
3100 {
3101 e = BRANCH_EDGE (cont_bb);
3102 gcc_assert (single_succ (e->dest) == l1_bb);
3103 }
3104 if (gimple_omp_for_combined_p (fd->for_stmt))
3105 {
3106 remove_edge (e);
3107 e = NULL;
3108 }
3109 else if (fd->collapse > 1)
3110 {
3111 remove_edge (e);
3112 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
3113 }
3114 else
3115 e->flags = EDGE_TRUE_VALUE;
3116 if (e)
3117 {
3118 e->probability = REG_BR_PROB_BASE * 7 / 8;
3119 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
3120 }
3121 else
3122 {
3123 e = find_edge (cont_bb, l2_bb);
3124 e->flags = EDGE_FALLTHRU;
3125 }
3126 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
3127
3128 if (gimple_in_ssa_p (cfun))
3129 {
3130 /* Add phis to the outer loop that connect to the phis in the inner,
3131 original loop, and move the loop entry value of the inner phi to
3132 the loop entry value of the outer phi. */
3133 gphi_iterator psi;
3134 for (psi = gsi_start_phis (l3_bb); !gsi_end_p (psi); gsi_next (&psi))
3135 {
3136 source_location locus;
3137 gphi *nphi;
3138 gphi *exit_phi = psi.phi ();
3139
3140 edge l2_to_l3 = find_edge (l2_bb, l3_bb);
3141 tree exit_res = PHI_ARG_DEF_FROM_EDGE (exit_phi, l2_to_l3);
3142
3143 basic_block latch = BRANCH_EDGE (cont_bb)->dest;
3144 edge latch_to_l1 = find_edge (latch, l1_bb);
3145 gphi *inner_phi
3146 = find_phi_with_arg_on_edge (exit_res, latch_to_l1);
3147
3148 tree t = gimple_phi_result (exit_phi);
3149 tree new_res = copy_ssa_name (t, NULL);
3150 nphi = create_phi_node (new_res, l0_bb);
3151
3152 edge l0_to_l1 = find_edge (l0_bb, l1_bb);
3153 t = PHI_ARG_DEF_FROM_EDGE (inner_phi, l0_to_l1);
3154 locus = gimple_phi_arg_location_from_edge (inner_phi, l0_to_l1);
3155 edge entry_to_l0 = find_edge (entry_bb, l0_bb);
3156 add_phi_arg (nphi, t, entry_to_l0, locus);
3157
3158 edge l2_to_l0 = find_edge (l2_bb, l0_bb);
3159 add_phi_arg (nphi, exit_res, l2_to_l0, UNKNOWN_LOCATION);
3160
3161 add_phi_arg (inner_phi, new_res, l0_to_l1, UNKNOWN_LOCATION);
3162 };
3163 }
3164
3165 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
3166 recompute_dominator (CDI_DOMINATORS, l2_bb));
3167 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
3168 recompute_dominator (CDI_DOMINATORS, l3_bb));
3169 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
3170 recompute_dominator (CDI_DOMINATORS, l0_bb));
3171 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
3172 recompute_dominator (CDI_DOMINATORS, l1_bb));
3173
3174 /* We enter expand_omp_for_generic with a loop. This original loop may
3175 have its own loop struct, or it may be part of an outer loop struct
3176 (which may be the fake loop). */
3177 struct loop *outer_loop = entry_bb->loop_father;
3178 bool orig_loop_has_loop_struct = l1_bb->loop_father != outer_loop;
3179
3180 add_bb_to_loop (l2_bb, outer_loop);
3181
3182 /* We've added a new loop around the original loop. Allocate the
3183 corresponding loop struct. */
3184 struct loop *new_loop = alloc_loop ();
3185 new_loop->header = l0_bb;
3186 new_loop->latch = l2_bb;
3187 add_loop (new_loop, outer_loop);
3188
3189 /* Allocate a loop structure for the original loop unless we already
3190 had one. */
3191 if (!orig_loop_has_loop_struct
3192 && !gimple_omp_for_combined_p (fd->for_stmt))
3193 {
3194 struct loop *orig_loop = alloc_loop ();
3195 orig_loop->header = l1_bb;
3196 /* The loop may have multiple latches. */
3197 add_loop (orig_loop, new_loop);
3198 }
3199 }
3200 }
3201
3202 /* A subroutine of expand_omp_for. Generate code for a parallel
3203 loop with static schedule and no specified chunk size. Given
3204 parameters:
3205
3206 for (V = N1; V cond N2; V += STEP) BODY;
3207
3208 where COND is "<" or ">", we generate pseudocode
3209
3210 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
3211 if (cond is <)
3212 adj = STEP - 1;
3213 else
3214 adj = STEP + 1;
3215 if ((__typeof (V)) -1 > 0 && cond is >)
3216 n = -(adj + N2 - N1) / -STEP;
3217 else
3218 n = (adj + N2 - N1) / STEP;
3219 q = n / nthreads;
3220 tt = n % nthreads;
3221 if (threadid < tt) goto L3; else goto L4;
3222 L3:
3223 tt = 0;
3224 q = q + 1;
3225 L4:
3226 s0 = q * threadid + tt;
3227 e0 = s0 + q;
3228 V = s0 * STEP + N1;
3229 if (s0 >= e0) goto L2; else goto L0;
3230 L0:
3231 e = e0 * STEP + N1;
3232 L1:
3233 BODY;
3234 V += STEP;
3235 if (V cond e) goto L1;
3236 L2:
3237 */
3238
3239 static void
3240 expand_omp_for_static_nochunk (struct omp_region *region,
3241 struct omp_for_data *fd,
3242 gimple *inner_stmt)
3243 {
3244 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
3245 tree type, itype, vmain, vback;
3246 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
3247 basic_block body_bb, cont_bb, collapse_bb = NULL;
3248 basic_block fin_bb;
3249 gimple_stmt_iterator gsi;
3250 edge ep;
3251 bool broken_loop = region->cont == NULL;
3252 tree *counts = NULL;
3253 tree n1, n2, step;
3254
3255 itype = type = TREE_TYPE (fd->loop.v);
3256 if (POINTER_TYPE_P (type))
3257 itype = signed_type_for (type);
3258
3259 entry_bb = region->entry;
3260 cont_bb = region->cont;
3261 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3262 fin_bb = BRANCH_EDGE (entry_bb)->dest;
3263 gcc_assert (broken_loop
3264 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
3265 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3266 body_bb = single_succ (seq_start_bb);
3267 if (!broken_loop)
3268 {
3269 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb
3270 || single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb);
3271 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3272 }
3273 exit_bb = region->exit;
3274
3275 /* Iteration space partitioning goes in ENTRY_BB. */
3276 gsi = gsi_last_bb (entry_bb);
3277 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3278
3279 if (fd->collapse > 1)
3280 {
3281 int first_zero_iter = -1, dummy = -1;
3282 basic_block l2_dom_bb = NULL, dummy_bb = NULL;
3283
3284 counts = XALLOCAVEC (tree, fd->collapse);
3285 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
3286 fin_bb, first_zero_iter,
3287 dummy_bb, dummy, l2_dom_bb);
3288 t = NULL_TREE;
3289 }
3290 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
3291 t = integer_one_node;
3292 else
3293 t = fold_binary (fd->loop.cond_code, boolean_type_node,
3294 fold_convert (type, fd->loop.n1),
3295 fold_convert (type, fd->loop.n2));
3296 if (fd->collapse == 1
3297 && TYPE_UNSIGNED (type)
3298 && (t == NULL_TREE || !integer_onep (t)))
3299 {
3300 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
3301 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
3302 true, GSI_SAME_STMT);
3303 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
3304 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
3305 true, GSI_SAME_STMT);
3306 gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
3307 NULL_TREE, NULL_TREE);
3308 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
3309 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
3310 expand_omp_regimplify_p, NULL, NULL)
3311 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
3312 expand_omp_regimplify_p, NULL, NULL))
3313 {
3314 gsi = gsi_for_stmt (cond_stmt);
3315 gimple_regimplify_operands (cond_stmt, &gsi);
3316 }
3317 ep = split_block (entry_bb, cond_stmt);
3318 ep->flags = EDGE_TRUE_VALUE;
3319 entry_bb = ep->dest;
3320 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
3321 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
3322 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
3323 if (gimple_in_ssa_p (cfun))
3324 {
3325 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
3326 for (gphi_iterator gpi = gsi_start_phis (fin_bb);
3327 !gsi_end_p (gpi); gsi_next (&gpi))
3328 {
3329 gphi *phi = gpi.phi ();
3330 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
3331 ep, UNKNOWN_LOCATION);
3332 }
3333 }
3334 gsi = gsi_last_bb (entry_bb);
3335 }
3336
3337 switch (gimple_omp_for_kind (fd->for_stmt))
3338 {
3339 case GF_OMP_FOR_KIND_FOR:
3340 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3341 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3342 break;
3343 case GF_OMP_FOR_KIND_DISTRIBUTE:
3344 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS);
3345 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM);
3346 break;
3347 default:
3348 gcc_unreachable ();
3349 }
3350 nthreads = build_call_expr (nthreads, 0);
3351 nthreads = fold_convert (itype, nthreads);
3352 nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE,
3353 true, GSI_SAME_STMT);
3354 threadid = build_call_expr (threadid, 0);
3355 threadid = fold_convert (itype, threadid);
3356 threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
3357 true, GSI_SAME_STMT);
3358
3359 n1 = fd->loop.n1;
3360 n2 = fd->loop.n2;
3361 step = fd->loop.step;
3362 if (gimple_omp_for_combined_into_p (fd->for_stmt))
3363 {
3364 tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
3365 OMP_CLAUSE__LOOPTEMP_);
3366 gcc_assert (innerc);
3367 n1 = OMP_CLAUSE_DECL (innerc);
3368 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
3369 OMP_CLAUSE__LOOPTEMP_);
3370 gcc_assert (innerc);
3371 n2 = OMP_CLAUSE_DECL (innerc);
3372 }
3373 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
3374 true, NULL_TREE, true, GSI_SAME_STMT);
3375 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
3376 true, NULL_TREE, true, GSI_SAME_STMT);
3377 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
3378 true, NULL_TREE, true, GSI_SAME_STMT);
3379
3380 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
3381 t = fold_build2 (PLUS_EXPR, itype, step, t);
3382 t = fold_build2 (PLUS_EXPR, itype, t, n2);
3383 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
3384 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
3385 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3386 fold_build1 (NEGATE_EXPR, itype, t),
3387 fold_build1 (NEGATE_EXPR, itype, step));
3388 else
3389 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
3390 t = fold_convert (itype, t);
3391 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
3392
3393 q = create_tmp_reg (itype, "q");
3394 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
3395 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
3396 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
3397
3398 tt = create_tmp_reg (itype, "tt");
3399 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
3400 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
3401 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
3402
3403 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
3404 gcond *cond_stmt = gimple_build_cond_empty (t);
3405 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
3406
3407 second_bb = split_block (entry_bb, cond_stmt)->dest;
3408 gsi = gsi_last_bb (second_bb);
3409 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3410
3411 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
3412 GSI_SAME_STMT);
3413 gassign *assign_stmt
3414 = gimple_build_assign (q, PLUS_EXPR, q, build_int_cst (itype, 1));
3415 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
3416
3417 third_bb = split_block (second_bb, assign_stmt)->dest;
3418 gsi = gsi_last_bb (third_bb);
3419 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3420
3421 t = build2 (MULT_EXPR, itype, q, threadid);
3422 t = build2 (PLUS_EXPR, itype, t, tt);
3423 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
3424
3425 t = fold_build2 (PLUS_EXPR, itype, s0, q);
3426 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
3427
3428 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
3429 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3430
3431 /* Remove the GIMPLE_OMP_FOR statement. */
3432 gsi_remove (&gsi, true);
3433
3434 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3435 gsi = gsi_start_bb (seq_start_bb);
3436
3437 tree startvar = fd->loop.v;
3438 tree endvar = NULL_TREE;
3439
3440 if (gimple_omp_for_combined_p (fd->for_stmt))
3441 {
3442 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
3443 ? gimple_omp_parallel_clauses (inner_stmt)
3444 : gimple_omp_for_clauses (inner_stmt);
3445 tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
3446 gcc_assert (innerc);
3447 startvar = OMP_CLAUSE_DECL (innerc);
3448 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
3449 OMP_CLAUSE__LOOPTEMP_);
3450 gcc_assert (innerc);
3451 endvar = OMP_CLAUSE_DECL (innerc);
3452 if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST
3453 && gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
3454 {
3455 int i;
3456 for (i = 1; i < fd->collapse; i++)
3457 {
3458 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
3459 OMP_CLAUSE__LOOPTEMP_);
3460 gcc_assert (innerc);
3461 }
3462 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
3463 OMP_CLAUSE__LOOPTEMP_);
3464 if (innerc)
3465 {
3466 /* If needed (distribute parallel for with lastprivate),
3467 propagate down the total number of iterations. */
3468 tree t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc)),
3469 fd->loop.n2);
3470 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false,
3471 GSI_CONTINUE_LINKING);
3472 assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t);
3473 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
3474 }
3475 }
3476 }
3477 t = fold_convert (itype, s0);
3478 t = fold_build2 (MULT_EXPR, itype, t, step);
3479 if (POINTER_TYPE_P (type))
3480 t = fold_build_pointer_plus (n1, t);
3481 else
3482 t = fold_build2 (PLUS_EXPR, type, t, n1);
3483 t = fold_convert (TREE_TYPE (startvar), t);
3484 t = force_gimple_operand_gsi (&gsi, t,
3485 DECL_P (startvar)
3486 && TREE_ADDRESSABLE (startvar),
3487 NULL_TREE, false, GSI_CONTINUE_LINKING);
3488 assign_stmt = gimple_build_assign (startvar, t);
3489 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
3490
3491 t = fold_convert (itype, e0);
3492 t = fold_build2 (MULT_EXPR, itype, t, step);
3493 if (POINTER_TYPE_P (type))
3494 t = fold_build_pointer_plus (n1, t);
3495 else
3496 t = fold_build2 (PLUS_EXPR, type, t, n1);
3497 t = fold_convert (TREE_TYPE (startvar), t);
3498 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3499 false, GSI_CONTINUE_LINKING);
3500 if (endvar)
3501 {
3502 assign_stmt = gimple_build_assign (endvar, e);
3503 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
3504 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
3505 assign_stmt = gimple_build_assign (fd->loop.v, e);
3506 else
3507 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
3508 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
3509 }
3510 /* Handle linear clause adjustments. */
3511 tree itercnt = NULL_TREE;
3512 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR)
3513 for (tree c = gimple_omp_for_clauses (fd->for_stmt);
3514 c; c = OMP_CLAUSE_CHAIN (c))
3515 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3516 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
3517 {
3518 tree d = OMP_CLAUSE_DECL (c);
3519 bool is_ref = omp_is_reference (d);
3520 tree t = d, a, dest;
3521 if (is_ref)
3522 t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t);
3523 if (itercnt == NULL_TREE)
3524 {
3525 if (gimple_omp_for_combined_into_p (fd->for_stmt))
3526 {
3527 itercnt = fold_build2 (MINUS_EXPR, itype,
3528 fold_convert (itype, n1),
3529 fold_convert (itype, fd->loop.n1));
3530 itercnt = fold_build2 (EXACT_DIV_EXPR, itype, itercnt, step);
3531 itercnt = fold_build2 (PLUS_EXPR, itype, itercnt, s0);
3532 itercnt = force_gimple_operand_gsi (&gsi, itercnt, true,
3533 NULL_TREE, false,
3534 GSI_CONTINUE_LINKING);
3535 }
3536 else
3537 itercnt = s0;
3538 }
3539 tree type = TREE_TYPE (t);
3540 if (POINTER_TYPE_P (type))
3541 type = sizetype;
3542 a = fold_build2 (MULT_EXPR, type,
3543 fold_convert (type, itercnt),
3544 fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c)));
3545 dest = unshare_expr (t);
3546 t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR
3547 : POINTER_PLUS_EXPR, TREE_TYPE (t), t, a);
3548 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3549 false, GSI_CONTINUE_LINKING);
3550 assign_stmt = gimple_build_assign (dest, t);
3551 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
3552 }
3553 if (fd->collapse > 1)
3554 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
3555
3556 if (!broken_loop)
3557 {
3558 /* The code controlling the sequential loop replaces the
3559 GIMPLE_OMP_CONTINUE. */
3560 gsi = gsi_last_bb (cont_bb);
3561 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
3562 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
3563 vmain = gimple_omp_continue_control_use (cont_stmt);
3564 vback = gimple_omp_continue_control_def (cont_stmt);
3565
3566 if (!gimple_omp_for_combined_p (fd->for_stmt))
3567 {
3568 if (POINTER_TYPE_P (type))
3569 t = fold_build_pointer_plus (vmain, step);
3570 else
3571 t = fold_build2 (PLUS_EXPR, type, vmain, step);
3572 t = force_gimple_operand_gsi (&gsi, t,
3573 DECL_P (vback)
3574 && TREE_ADDRESSABLE (vback),
3575 NULL_TREE, true, GSI_SAME_STMT);
3576 assign_stmt = gimple_build_assign (vback, t);
3577 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
3578
3579 t = build2 (fd->loop.cond_code, boolean_type_node,
3580 DECL_P (vback) && TREE_ADDRESSABLE (vback)
3581 ? t : vback, e);
3582 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3583 }
3584
3585 /* Remove the GIMPLE_OMP_CONTINUE statement. */
3586 gsi_remove (&gsi, true);
3587
3588 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
3589 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
3590 }
3591
3592 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
3593 gsi = gsi_last_bb (exit_bb);
3594 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
3595 {
3596 t = gimple_omp_return_lhs (gsi_stmt (gsi));
3597 gsi_insert_after (&gsi, omp_build_barrier (t), GSI_SAME_STMT);
3598 }
3599 gsi_remove (&gsi, true);
3600
3601 /* Connect all the blocks. */
3602 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
3603 ep->probability = REG_BR_PROB_BASE / 4 * 3;
3604 ep = find_edge (entry_bb, second_bb);
3605 ep->flags = EDGE_TRUE_VALUE;
3606 ep->probability = REG_BR_PROB_BASE / 4;
3607 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
3608 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
3609
3610 if (!broken_loop)
3611 {
3612 ep = find_edge (cont_bb, body_bb);
3613 if (ep == NULL)
3614 {
3615 ep = BRANCH_EDGE (cont_bb);
3616 gcc_assert (single_succ (ep->dest) == body_bb);
3617 }
3618 if (gimple_omp_for_combined_p (fd->for_stmt))
3619 {
3620 remove_edge (ep);
3621 ep = NULL;
3622 }
3623 else if (fd->collapse > 1)
3624 {
3625 remove_edge (ep);
3626 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
3627 }
3628 else
3629 ep->flags = EDGE_TRUE_VALUE;
3630 find_edge (cont_bb, fin_bb)->flags
3631 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
3632 }
3633
3634 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
3635 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
3636 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
3637
3638 set_immediate_dominator (CDI_DOMINATORS, body_bb,
3639 recompute_dominator (CDI_DOMINATORS, body_bb));
3640 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
3641 recompute_dominator (CDI_DOMINATORS, fin_bb));
3642
3643 struct loop *loop = body_bb->loop_father;
3644 if (loop != entry_bb->loop_father)
3645 {
3646 gcc_assert (broken_loop || loop->header == body_bb);
3647 gcc_assert (broken_loop
3648 || loop->latch == region->cont
3649 || single_pred (loop->latch) == region->cont);
3650 return;
3651 }
3652
3653 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
3654 {
3655 loop = alloc_loop ();
3656 loop->header = body_bb;
3657 if (collapse_bb == NULL)
3658 loop->latch = cont_bb;
3659 add_loop (loop, body_bb->loop_father);
3660 }
3661 }
3662
3663 /* Return phi in E->DEST with ARG on edge E. */
3664
3665 static gphi *
3666 find_phi_with_arg_on_edge (tree arg, edge e)
3667 {
3668 basic_block bb = e->dest;
3669
3670 for (gphi_iterator gpi = gsi_start_phis (bb);
3671 !gsi_end_p (gpi);
3672 gsi_next (&gpi))
3673 {
3674 gphi *phi = gpi.phi ();
3675 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == arg)
3676 return phi;
3677 }
3678
3679 return NULL;
3680 }
3681
3682 /* A subroutine of expand_omp_for. Generate code for a parallel
3683 loop with static schedule and a specified chunk size. Given
3684 parameters:
3685
3686 for (V = N1; V cond N2; V += STEP) BODY;
3687
3688 where COND is "<" or ">", we generate pseudocode
3689
3690 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
3691 if (cond is <)
3692 adj = STEP - 1;
3693 else
3694 adj = STEP + 1;
3695 if ((__typeof (V)) -1 > 0 && cond is >)
3696 n = -(adj + N2 - N1) / -STEP;
3697 else
3698 n = (adj + N2 - N1) / STEP;
3699 trip = 0;
3700 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
3701 here so that V is defined
3702 if the loop is not entered
3703 L0:
3704 s0 = (trip * nthreads + threadid) * CHUNK;
3705 e0 = min (s0 + CHUNK, n);
3706 if (s0 < n) goto L1; else goto L4;
3707 L1:
3708 V = s0 * STEP + N1;
3709 e = e0 * STEP + N1;
3710 L2:
3711 BODY;
3712 V += STEP;
3713 if (V cond e) goto L2; else goto L3;
3714 L3:
3715 trip += 1;
3716 goto L0;
3717 L4:
3718 */
3719
3720 static void
3721 expand_omp_for_static_chunk (struct omp_region *region,
3722 struct omp_for_data *fd, gimple *inner_stmt)
3723 {
3724 tree n, s0, e0, e, t;
3725 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
3726 tree type, itype, vmain, vback, vextra;
3727 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
3728 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
3729 gimple_stmt_iterator gsi;
3730 edge se;
3731 bool broken_loop = region->cont == NULL;
3732 tree *counts = NULL;
3733 tree n1, n2, step;
3734
3735 itype = type = TREE_TYPE (fd->loop.v);
3736 if (POINTER_TYPE_P (type))
3737 itype = signed_type_for (type);
3738
3739 entry_bb = region->entry;
3740 se = split_block (entry_bb, last_stmt (entry_bb));
3741 entry_bb = se->src;
3742 iter_part_bb = se->dest;
3743 cont_bb = region->cont;
3744 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
3745 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
3746 gcc_assert (broken_loop
3747 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
3748 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
3749 body_bb = single_succ (seq_start_bb);
3750 if (!broken_loop)
3751 {
3752 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb
3753 || single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb);
3754 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3755 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
3756 }
3757 exit_bb = region->exit;
3758
3759 /* Trip and adjustment setup goes in ENTRY_BB. */
3760 gsi = gsi_last_bb (entry_bb);
3761 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3762
3763 if (fd->collapse > 1)
3764 {
3765 int first_zero_iter = -1, dummy = -1;
3766 basic_block l2_dom_bb = NULL, dummy_bb = NULL;
3767
3768 counts = XALLOCAVEC (tree, fd->collapse);
3769 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
3770 fin_bb, first_zero_iter,
3771 dummy_bb, dummy, l2_dom_bb);
3772 t = NULL_TREE;
3773 }
3774 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
3775 t = integer_one_node;
3776 else
3777 t = fold_binary (fd->loop.cond_code, boolean_type_node,
3778 fold_convert (type, fd->loop.n1),
3779 fold_convert (type, fd->loop.n2));
3780 if (fd->collapse == 1
3781 && TYPE_UNSIGNED (type)
3782 && (t == NULL_TREE || !integer_onep (t)))
3783 {
3784 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
3785 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
3786 true, GSI_SAME_STMT);
3787 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
3788 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
3789 true, GSI_SAME_STMT);
3790 gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
3791 NULL_TREE, NULL_TREE);
3792 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
3793 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
3794 expand_omp_regimplify_p, NULL, NULL)
3795 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
3796 expand_omp_regimplify_p, NULL, NULL))
3797 {
3798 gsi = gsi_for_stmt (cond_stmt);
3799 gimple_regimplify_operands (cond_stmt, &gsi);
3800 }
3801 se = split_block (entry_bb, cond_stmt);
3802 se->flags = EDGE_TRUE_VALUE;
3803 entry_bb = se->dest;
3804 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
3805 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
3806 se->probability = REG_BR_PROB_BASE / 2000 - 1;
3807 if (gimple_in_ssa_p (cfun))
3808 {
3809 int dest_idx = find_edge (iter_part_bb, fin_bb)->dest_idx;
3810 for (gphi_iterator gpi = gsi_start_phis (fin_bb);
3811 !gsi_end_p (gpi); gsi_next (&gpi))
3812 {
3813 gphi *phi = gpi.phi ();
3814 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
3815 se, UNKNOWN_LOCATION);
3816 }
3817 }
3818 gsi = gsi_last_bb (entry_bb);
3819 }
3820
3821 switch (gimple_omp_for_kind (fd->for_stmt))
3822 {
3823 case GF_OMP_FOR_KIND_FOR:
3824 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3825 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3826 break;
3827 case GF_OMP_FOR_KIND_DISTRIBUTE:
3828 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS);
3829 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM);
3830 break;
3831 default:
3832 gcc_unreachable ();
3833 }
3834 nthreads = build_call_expr (nthreads, 0);
3835 nthreads = fold_convert (itype, nthreads);
3836 nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE,
3837 true, GSI_SAME_STMT);
3838 threadid = build_call_expr (threadid, 0);
3839 threadid = fold_convert (itype, threadid);
3840 threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
3841 true, GSI_SAME_STMT);
3842
3843 n1 = fd->loop.n1;
3844 n2 = fd->loop.n2;
3845 step = fd->loop.step;
3846 if (gimple_omp_for_combined_into_p (fd->for_stmt))
3847 {
3848 tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
3849 OMP_CLAUSE__LOOPTEMP_);
3850 gcc_assert (innerc);
3851 n1 = OMP_CLAUSE_DECL (innerc);
3852 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
3853 OMP_CLAUSE__LOOPTEMP_);
3854 gcc_assert (innerc);
3855 n2 = OMP_CLAUSE_DECL (innerc);
3856 }
3857 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
3858 true, NULL_TREE, true, GSI_SAME_STMT);
3859 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
3860 true, NULL_TREE, true, GSI_SAME_STMT);
3861 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
3862 true, NULL_TREE, true, GSI_SAME_STMT);
3863 tree chunk_size = fold_convert (itype, fd->chunk_size);
3864 chunk_size = omp_adjust_chunk_size (chunk_size, fd->simd_schedule);
3865 chunk_size
3866 = force_gimple_operand_gsi (&gsi, chunk_size, true, NULL_TREE, true,
3867 GSI_SAME_STMT);
3868
3869 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
3870 t = fold_build2 (PLUS_EXPR, itype, step, t);
3871 t = fold_build2 (PLUS_EXPR, itype, t, n2);
3872 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
3873 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
3874 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3875 fold_build1 (NEGATE_EXPR, itype, t),
3876 fold_build1 (NEGATE_EXPR, itype, step));
3877 else
3878 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
3879 t = fold_convert (itype, t);
3880 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3881 true, GSI_SAME_STMT);
3882
3883 trip_var = create_tmp_reg (itype, ".trip");
3884 if (gimple_in_ssa_p (cfun))
3885 {
3886 trip_init = make_ssa_name (trip_var);
3887 trip_main = make_ssa_name (trip_var);
3888 trip_back = make_ssa_name (trip_var);
3889 }
3890 else
3891 {
3892 trip_init = trip_var;
3893 trip_main = trip_var;
3894 trip_back = trip_var;
3895 }
3896
3897 gassign *assign_stmt
3898 = gimple_build_assign (trip_init, build_int_cst (itype, 0));
3899 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
3900
3901 t = fold_build2 (MULT_EXPR, itype, threadid, chunk_size);
3902 t = fold_build2 (MULT_EXPR, itype, t, step);
3903 if (POINTER_TYPE_P (type))
3904 t = fold_build_pointer_plus (n1, t);
3905 else
3906 t = fold_build2 (PLUS_EXPR, type, t, n1);
3907 vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3908 true, GSI_SAME_STMT);
3909
3910 /* Remove the GIMPLE_OMP_FOR. */
3911 gsi_remove (&gsi, true);
3912
3913 gimple_stmt_iterator gsif = gsi;
3914
3915 /* Iteration space partitioning goes in ITER_PART_BB. */
3916 gsi = gsi_last_bb (iter_part_bb);
3917
3918 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
3919 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
3920 t = fold_build2 (MULT_EXPR, itype, t, chunk_size);
3921 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3922 false, GSI_CONTINUE_LINKING);
3923
3924 t = fold_build2 (PLUS_EXPR, itype, s0, chunk_size);
3925 t = fold_build2 (MIN_EXPR, itype, t, n);
3926 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3927 false, GSI_CONTINUE_LINKING);
3928
3929 t = build2 (LT_EXPR, boolean_type_node, s0, n);
3930 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
3931
3932 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3933 gsi = gsi_start_bb (seq_start_bb);
3934
3935 tree startvar = fd->loop.v;
3936 tree endvar = NULL_TREE;
3937
3938 if (gimple_omp_for_combined_p (fd->for_stmt))
3939 {
3940 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
3941 ? gimple_omp_parallel_clauses (inner_stmt)
3942 : gimple_omp_for_clauses (inner_stmt);
3943 tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
3944 gcc_assert (innerc);
3945 startvar = OMP_CLAUSE_DECL (innerc);
3946 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
3947 OMP_CLAUSE__LOOPTEMP_);
3948 gcc_assert (innerc);
3949 endvar = OMP_CLAUSE_DECL (innerc);
3950 if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST
3951 && gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
3952 {
3953 int i;
3954 for (i = 1; i < fd->collapse; i++)
3955 {
3956 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
3957 OMP_CLAUSE__LOOPTEMP_);
3958 gcc_assert (innerc);
3959 }
3960 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
3961 OMP_CLAUSE__LOOPTEMP_);
3962 if (innerc)
3963 {
3964 /* If needed (distribute parallel for with lastprivate),
3965 propagate down the total number of iterations. */
3966 tree t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc)),
3967 fd->loop.n2);
3968 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false,
3969 GSI_CONTINUE_LINKING);
3970 assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t);
3971 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
3972 }
3973 }
3974 }
3975
3976 t = fold_convert (itype, s0);
3977 t = fold_build2 (MULT_EXPR, itype, t, step);
3978 if (POINTER_TYPE_P (type))
3979 t = fold_build_pointer_plus (n1, t);
3980 else
3981 t = fold_build2 (PLUS_EXPR, type, t, n1);
3982 t = fold_convert (TREE_TYPE (startvar), t);
3983 t = force_gimple_operand_gsi (&gsi, t,
3984 DECL_P (startvar)
3985 && TREE_ADDRESSABLE (startvar),
3986 NULL_TREE, false, GSI_CONTINUE_LINKING);
3987 assign_stmt = gimple_build_assign (startvar, t);
3988 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
3989
3990 t = fold_convert (itype, e0);
3991 t = fold_build2 (MULT_EXPR, itype, t, step);
3992 if (POINTER_TYPE_P (type))
3993 t = fold_build_pointer_plus (n1, t);
3994 else
3995 t = fold_build2 (PLUS_EXPR, type, t, n1);
3996 t = fold_convert (TREE_TYPE (startvar), t);
3997 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3998 false, GSI_CONTINUE_LINKING);
3999 if (endvar)
4000 {
4001 assign_stmt = gimple_build_assign (endvar, e);
4002 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
4003 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
4004 assign_stmt = gimple_build_assign (fd->loop.v, e);
4005 else
4006 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
4007 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
4008 }
4009 /* Handle linear clause adjustments. */
4010 tree itercnt = NULL_TREE, itercntbias = NULL_TREE;
4011 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR)
4012 for (tree c = gimple_omp_for_clauses (fd->for_stmt);
4013 c; c = OMP_CLAUSE_CHAIN (c))
4014 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
4015 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
4016 {
4017 tree d = OMP_CLAUSE_DECL (c);
4018 bool is_ref = omp_is_reference (d);
4019 tree t = d, a, dest;
4020 if (is_ref)
4021 t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t);
4022 tree type = TREE_TYPE (t);
4023 if (POINTER_TYPE_P (type))
4024 type = sizetype;
4025 dest = unshare_expr (t);
4026 tree v = create_tmp_var (TREE_TYPE (t), NULL);
4027 expand_omp_build_assign (&gsif, v, t);
4028 if (itercnt == NULL_TREE)
4029 {
4030 if (gimple_omp_for_combined_into_p (fd->for_stmt))
4031 {
4032 itercntbias
4033 = fold_build2 (MINUS_EXPR, itype, fold_convert (itype, n1),
4034 fold_convert (itype, fd->loop.n1));
4035 itercntbias = fold_build2 (EXACT_DIV_EXPR, itype,
4036 itercntbias, step);
4037 itercntbias
4038 = force_gimple_operand_gsi (&gsif, itercntbias, true,
4039 NULL_TREE, true,
4040 GSI_SAME_STMT);
4041 itercnt = fold_build2 (PLUS_EXPR, itype, itercntbias, s0);
4042 itercnt = force_gimple_operand_gsi (&gsi, itercnt, true,
4043 NULL_TREE, false,
4044 GSI_CONTINUE_LINKING);
4045 }
4046 else
4047 itercnt = s0;
4048 }
4049 a = fold_build2 (MULT_EXPR, type,
4050 fold_convert (type, itercnt),
4051 fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c)));
4052 t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR
4053 : POINTER_PLUS_EXPR, TREE_TYPE (t), v, a);
4054 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4055 false, GSI_CONTINUE_LINKING);
4056 assign_stmt = gimple_build_assign (dest, t);
4057 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
4058 }
4059 if (fd->collapse > 1)
4060 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
4061
4062 if (!broken_loop)
4063 {
4064 /* The code controlling the sequential loop goes in CONT_BB,
4065 replacing the GIMPLE_OMP_CONTINUE. */
4066 gsi = gsi_last_bb (cont_bb);
4067 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
4068 vmain = gimple_omp_continue_control_use (cont_stmt);
4069 vback = gimple_omp_continue_control_def (cont_stmt);
4070
4071 if (!gimple_omp_for_combined_p (fd->for_stmt))
4072 {
4073 if (POINTER_TYPE_P (type))
4074 t = fold_build_pointer_plus (vmain, step);
4075 else
4076 t = fold_build2 (PLUS_EXPR, type, vmain, step);
4077 if (DECL_P (vback) && TREE_ADDRESSABLE (vback))
4078 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4079 true, GSI_SAME_STMT);
4080 assign_stmt = gimple_build_assign (vback, t);
4081 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
4082
4083 if (tree_int_cst_equal (fd->chunk_size, integer_one_node))
4084 t = build2 (EQ_EXPR, boolean_type_node,
4085 build_int_cst (itype, 0),
4086 build_int_cst (itype, 1));
4087 else
4088 t = build2 (fd->loop.cond_code, boolean_type_node,
4089 DECL_P (vback) && TREE_ADDRESSABLE (vback)
4090 ? t : vback, e);
4091 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4092 }
4093
4094 /* Remove GIMPLE_OMP_CONTINUE. */
4095 gsi_remove (&gsi, true);
4096
4097 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
4098 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
4099
4100 /* Trip update code goes into TRIP_UPDATE_BB. */
4101 gsi = gsi_start_bb (trip_update_bb);
4102
4103 t = build_int_cst (itype, 1);
4104 t = build2 (PLUS_EXPR, itype, trip_main, t);
4105 assign_stmt = gimple_build_assign (trip_back, t);
4106 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
4107 }
4108
4109 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4110 gsi = gsi_last_bb (exit_bb);
4111 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4112 {
4113 t = gimple_omp_return_lhs (gsi_stmt (gsi));
4114 gsi_insert_after (&gsi, omp_build_barrier (t), GSI_SAME_STMT);
4115 }
4116 gsi_remove (&gsi, true);
4117
4118 /* Connect the new blocks. */
4119 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4120 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4121
4122 if (!broken_loop)
4123 {
4124 se = find_edge (cont_bb, body_bb);
4125 if (se == NULL)
4126 {
4127 se = BRANCH_EDGE (cont_bb);
4128 gcc_assert (single_succ (se->dest) == body_bb);
4129 }
4130 if (gimple_omp_for_combined_p (fd->for_stmt))
4131 {
4132 remove_edge (se);
4133 se = NULL;
4134 }
4135 else if (fd->collapse > 1)
4136 {
4137 remove_edge (se);
4138 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4139 }
4140 else
4141 se->flags = EDGE_TRUE_VALUE;
4142 find_edge (cont_bb, trip_update_bb)->flags
4143 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
4144
4145 redirect_edge_and_branch (single_succ_edge (trip_update_bb),
4146 iter_part_bb);
4147 }
4148
4149 if (gimple_in_ssa_p (cfun))
4150 {
4151 gphi_iterator psi;
4152 gphi *phi;
4153 edge re, ene;
4154 edge_var_map *vm;
4155 size_t i;
4156
4157 gcc_assert (fd->collapse == 1 && !broken_loop);
4158
4159 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4160 remove arguments of the phi nodes in fin_bb. We need to create
4161 appropriate phi nodes in iter_part_bb instead. */
4162 se = find_edge (iter_part_bb, fin_bb);
4163 re = single_succ_edge (trip_update_bb);
4164 vec<edge_var_map> *head = redirect_edge_var_map_vector (re);
4165 ene = single_succ_edge (entry_bb);
4166
4167 psi = gsi_start_phis (fin_bb);
4168 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
4169 gsi_next (&psi), ++i)
4170 {
4171 gphi *nphi;
4172 source_location locus;
4173
4174 phi = psi.phi ();
4175 t = gimple_phi_result (phi);
4176 gcc_assert (t == redirect_edge_var_map_result (vm));
4177
4178 if (!single_pred_p (fin_bb))
4179 t = copy_ssa_name (t, phi);
4180
4181 nphi = create_phi_node (t, iter_part_bb);
4182
4183 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4184 locus = gimple_phi_arg_location_from_edge (phi, se);
4185
4186 /* A special case -- fd->loop.v is not yet computed in
4187 iter_part_bb, we need to use vextra instead. */
4188 if (t == fd->loop.v)
4189 t = vextra;
4190 add_phi_arg (nphi, t, ene, locus);
4191 locus = redirect_edge_var_map_location (vm);
4192 tree back_arg = redirect_edge_var_map_def (vm);
4193 add_phi_arg (nphi, back_arg, re, locus);
4194 edge ce = find_edge (cont_bb, body_bb);
4195 if (ce == NULL)
4196 {
4197 ce = BRANCH_EDGE (cont_bb);
4198 gcc_assert (single_succ (ce->dest) == body_bb);
4199 ce = single_succ_edge (ce->dest);
4200 }
4201 gphi *inner_loop_phi = find_phi_with_arg_on_edge (back_arg, ce);
4202 gcc_assert (inner_loop_phi != NULL);
4203 add_phi_arg (inner_loop_phi, gimple_phi_result (nphi),
4204 find_edge (seq_start_bb, body_bb), locus);
4205
4206 if (!single_pred_p (fin_bb))
4207 add_phi_arg (phi, gimple_phi_result (nphi), se, locus);
4208 }
4209 gcc_assert (gsi_end_p (psi) && (head == NULL || i == head->length ()));
4210 redirect_edge_var_map_clear (re);
4211 if (single_pred_p (fin_bb))
4212 while (1)
4213 {
4214 psi = gsi_start_phis (fin_bb);
4215 if (gsi_end_p (psi))
4216 break;
4217 remove_phi_node (&psi, false);
4218 }
4219
4220 /* Make phi node for trip. */
4221 phi = create_phi_node (trip_main, iter_part_bb);
4222 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4223 UNKNOWN_LOCATION);
4224 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4225 UNKNOWN_LOCATION);
4226 }
4227
4228 if (!broken_loop)
4229 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4230 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4231 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4232 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4233 recompute_dominator (CDI_DOMINATORS, fin_bb));
4234 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4235 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4236 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4237 recompute_dominator (CDI_DOMINATORS, body_bb));
4238
4239 if (!broken_loop)
4240 {
4241 struct loop *loop = body_bb->loop_father;
4242 struct loop *trip_loop = alloc_loop ();
4243 trip_loop->header = iter_part_bb;
4244 trip_loop->latch = trip_update_bb;
4245 add_loop (trip_loop, iter_part_bb->loop_father);
4246
4247 if (loop != entry_bb->loop_father)
4248 {
4249 gcc_assert (loop->header == body_bb);
4250 gcc_assert (loop->latch == region->cont
4251 || single_pred (loop->latch) == region->cont);
4252 trip_loop->inner = loop;
4253 return;
4254 }
4255
4256 if (!gimple_omp_for_combined_p (fd->for_stmt))
4257 {
4258 loop = alloc_loop ();
4259 loop->header = body_bb;
4260 if (collapse_bb == NULL)
4261 loop->latch = cont_bb;
4262 add_loop (loop, trip_loop);
4263 }
4264 }
4265 }
4266
4267 /* A subroutine of expand_omp_for. Generate code for _Cilk_for loop.
4268 Given parameters:
4269 for (V = N1; V cond N2; V += STEP) BODY;
4270
4271 where COND is "<" or ">" or "!=", we generate pseudocode
4272
4273 for (ind_var = low; ind_var < high; ind_var++)
4274 {
4275 V = n1 + (ind_var * STEP)
4276
4277 <BODY>
4278 }
4279
4280 In the above pseudocode, low and high are function parameters of the
4281 child function. In the function below, we are inserting a temp.
4282 variable that will be making a call to two OMP functions that will not be
4283 found in the body of _Cilk_for (since OMP_FOR cannot be mixed
4284 with _Cilk_for). These functions are replaced with low and high
4285 by the function that handles taskreg. */
4286
4287
4288 static void
4289 expand_cilk_for (struct omp_region *region, struct omp_for_data *fd)
4290 {
4291 bool broken_loop = region->cont == NULL;
4292 basic_block entry_bb = region->entry;
4293 basic_block cont_bb = region->cont;
4294
4295 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4296 gcc_assert (broken_loop
4297 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4298 basic_block l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
4299 basic_block l1_bb, l2_bb;
4300
4301 if (!broken_loop)
4302 {
4303 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
4304 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4305 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
4306 l2_bb = BRANCH_EDGE (entry_bb)->dest;
4307 }
4308 else
4309 {
4310 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
4311 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
4312 l2_bb = single_succ (l1_bb);
4313 }
4314 basic_block exit_bb = region->exit;
4315 basic_block l2_dom_bb = NULL;
4316
4317 gimple_stmt_iterator gsi = gsi_last_bb (entry_bb);
4318
4319 /* Below statements until the "tree high_val = ..." are pseudo statements
4320 used to pass information to be used by expand_omp_taskreg.
4321 low_val and high_val will be replaced by the __low and __high
4322 parameter from the child function.
4323
4324 The call_exprs part is a place-holder, it is mainly used
4325 to distinctly identify to the top-level part that this is
4326 where we should put low and high (reasoning given in header
4327 comment). */
4328
4329 gomp_parallel *par_stmt
4330 = as_a <gomp_parallel *> (last_stmt (region->outer->entry));
4331 tree child_fndecl = gimple_omp_parallel_child_fn (par_stmt);
4332 tree t, low_val = NULL_TREE, high_val = NULL_TREE;
4333 for (t = DECL_ARGUMENTS (child_fndecl); t; t = TREE_CHAIN (t))
4334 {
4335 if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__high"))
4336 high_val = t;
4337 else if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__low"))
4338 low_val = t;
4339 }
4340 gcc_assert (low_val && high_val);
4341
4342 tree type = TREE_TYPE (low_val);
4343 tree ind_var = create_tmp_reg (type, "__cilk_ind_var");
4344 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4345
4346 /* Not needed in SSA form right now. */
4347 gcc_assert (!gimple_in_ssa_p (cfun));
4348 if (l2_dom_bb == NULL)
4349 l2_dom_bb = l1_bb;
4350
4351 tree n1 = low_val;
4352 tree n2 = high_val;
4353
4354 gimple *stmt = gimple_build_assign (ind_var, n1);
4355
4356 /* Replace the GIMPLE_OMP_FOR statement. */
4357 gsi_replace (&gsi, stmt, true);
4358
4359 if (!broken_loop)
4360 {
4361 /* Code to control the increment goes in the CONT_BB. */
4362 gsi = gsi_last_bb (cont_bb);
4363 stmt = gsi_stmt (gsi);
4364 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4365 stmt = gimple_build_assign (ind_var, PLUS_EXPR, ind_var,
4366 build_one_cst (type));
4367
4368 /* Replace GIMPLE_OMP_CONTINUE. */
4369 gsi_replace (&gsi, stmt, true);
4370 }
4371
4372 /* Emit the condition in L1_BB. */
4373 gsi = gsi_after_labels (l1_bb);
4374 t = fold_build2 (MULT_EXPR, TREE_TYPE (fd->loop.step),
4375 fold_convert (TREE_TYPE (fd->loop.step), ind_var),
4376 fd->loop.step);
4377 if (POINTER_TYPE_P (TREE_TYPE (fd->loop.n1)))
4378 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (fd->loop.n1),
4379 fd->loop.n1, fold_convert (sizetype, t));
4380 else
4381 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loop.n1),
4382 fd->loop.n1, fold_convert (TREE_TYPE (fd->loop.n1), t));
4383 t = fold_convert (TREE_TYPE (fd->loop.v), t);
4384 expand_omp_build_assign (&gsi, fd->loop.v, t);
4385
4386 /* The condition is always '<' since the runtime will fill in the low
4387 and high values. */
4388 stmt = gimple_build_cond (LT_EXPR, ind_var, n2, NULL_TREE, NULL_TREE);
4389 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4390
4391 /* Remove GIMPLE_OMP_RETURN. */
4392 gsi = gsi_last_bb (exit_bb);
4393 gsi_remove (&gsi, true);
4394
4395 /* Connect the new blocks. */
4396 remove_edge (FALLTHRU_EDGE (entry_bb));
4397
4398 edge e, ne;
4399 if (!broken_loop)
4400 {
4401 remove_edge (BRANCH_EDGE (entry_bb));
4402 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
4403
4404 e = BRANCH_EDGE (l1_bb);
4405 ne = FALLTHRU_EDGE (l1_bb);
4406 e->flags = EDGE_TRUE_VALUE;
4407 }
4408 else
4409 {
4410 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4411
4412 ne = single_succ_edge (l1_bb);
4413 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
4414
4415 }
4416 ne->flags = EDGE_FALSE_VALUE;
4417 e->probability = REG_BR_PROB_BASE * 7 / 8;
4418 ne->probability = REG_BR_PROB_BASE / 8;
4419
4420 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
4421 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
4422 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
4423
4424 if (!broken_loop)
4425 {
4426 struct loop *loop = alloc_loop ();
4427 loop->header = l1_bb;
4428 loop->latch = cont_bb;
4429 add_loop (loop, l1_bb->loop_father);
4430 loop->safelen = INT_MAX;
4431 }
4432
4433 /* Pick the correct library function based on the precision of the
4434 induction variable type. */
4435 tree lib_fun = NULL_TREE;
4436 if (TYPE_PRECISION (type) == 32)
4437 lib_fun = cilk_for_32_fndecl;
4438 else if (TYPE_PRECISION (type) == 64)
4439 lib_fun = cilk_for_64_fndecl;
4440 else
4441 gcc_unreachable ();
4442
4443 gcc_assert (fd->sched_kind == OMP_CLAUSE_SCHEDULE_CILKFOR);
4444
4445 /* WS_ARGS contains the library function flavor to call:
4446 __libcilkrts_cilk_for_64 or __libcilkrts_cilk_for_32), and the
4447 user-defined grain value. If the user does not define one, then zero
4448 is passed in by the parser. */
4449 vec_alloc (region->ws_args, 2);
4450 region->ws_args->quick_push (lib_fun);
4451 region->ws_args->quick_push (fd->chunk_size);
4452 }
4453
4454 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
4455 loop. Given parameters:
4456
4457 for (V = N1; V cond N2; V += STEP) BODY;
4458
4459 where COND is "<" or ">", we generate pseudocode
4460
4461 V = N1;
4462 goto L1;
4463 L0:
4464 BODY;
4465 V += STEP;
4466 L1:
4467 if (V cond N2) goto L0; else goto L2;
4468 L2:
4469
4470 For collapsed loops, given parameters:
4471 collapse(3)
4472 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
4473 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
4474 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
4475 BODY;
4476
4477 we generate pseudocode
4478
4479 if (cond3 is <)
4480 adj = STEP3 - 1;
4481 else
4482 adj = STEP3 + 1;
4483 count3 = (adj + N32 - N31) / STEP3;
4484 if (cond2 is <)
4485 adj = STEP2 - 1;
4486 else
4487 adj = STEP2 + 1;
4488 count2 = (adj + N22 - N21) / STEP2;
4489 if (cond1 is <)
4490 adj = STEP1 - 1;
4491 else
4492 adj = STEP1 + 1;
4493 count1 = (adj + N12 - N11) / STEP1;
4494 count = count1 * count2 * count3;
4495 V = 0;
4496 V1 = N11;
4497 V2 = N21;
4498 V3 = N31;
4499 goto L1;
4500 L0:
4501 BODY;
4502 V += 1;
4503 V3 += STEP3;
4504 V2 += (V3 cond3 N32) ? 0 : STEP2;
4505 V3 = (V3 cond3 N32) ? V3 : N31;
4506 V1 += (V2 cond2 N22) ? 0 : STEP1;
4507 V2 = (V2 cond2 N22) ? V2 : N21;
4508 L1:
4509 if (V < count) goto L0; else goto L2;
4510 L2:
4511
4512 */
4513
4514 static void
4515 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
4516 {
4517 tree type, t;
4518 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
4519 gimple_stmt_iterator gsi;
4520 gimple *stmt;
4521 gcond *cond_stmt;
4522 bool broken_loop = region->cont == NULL;
4523 edge e, ne;
4524 tree *counts = NULL;
4525 int i;
4526 int safelen_int = INT_MAX;
4527 tree safelen = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
4528 OMP_CLAUSE_SAFELEN);
4529 tree simduid = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
4530 OMP_CLAUSE__SIMDUID_);
4531 tree n1, n2;
4532
4533 if (safelen)
4534 {
4535 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
4536 if (TREE_CODE (safelen) != INTEGER_CST)
4537 safelen_int = 0;
4538 else if (tree_fits_uhwi_p (safelen) && tree_to_uhwi (safelen) < INT_MAX)
4539 safelen_int = tree_to_uhwi (safelen);
4540 if (safelen_int == 1)
4541 safelen_int = 0;
4542 }
4543 type = TREE_TYPE (fd->loop.v);
4544 entry_bb = region->entry;
4545 cont_bb = region->cont;
4546 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4547 gcc_assert (broken_loop
4548 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4549 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
4550 if (!broken_loop)
4551 {
4552 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
4553 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4554 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
4555 l2_bb = BRANCH_EDGE (entry_bb)->dest;
4556 }
4557 else
4558 {
4559 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
4560 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
4561 l2_bb = single_succ (l1_bb);
4562 }
4563 exit_bb = region->exit;
4564 l2_dom_bb = NULL;
4565
4566 gsi = gsi_last_bb (entry_bb);
4567
4568 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4569 /* Not needed in SSA form right now. */
4570 gcc_assert (!gimple_in_ssa_p (cfun));
4571 if (fd->collapse > 1)
4572 {
4573 int first_zero_iter = -1, dummy = -1;
4574 basic_block zero_iter_bb = l2_bb, dummy_bb = NULL;
4575
4576 counts = XALLOCAVEC (tree, fd->collapse);
4577 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
4578 zero_iter_bb, first_zero_iter,
4579 dummy_bb, dummy, l2_dom_bb);
4580 }
4581 if (l2_dom_bb == NULL)
4582 l2_dom_bb = l1_bb;
4583
4584 n1 = fd->loop.n1;
4585 n2 = fd->loop.n2;
4586 if (gimple_omp_for_combined_into_p (fd->for_stmt))
4587 {
4588 tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
4589 OMP_CLAUSE__LOOPTEMP_);
4590 gcc_assert (innerc);
4591 n1 = OMP_CLAUSE_DECL (innerc);
4592 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
4593 OMP_CLAUSE__LOOPTEMP_);
4594 gcc_assert (innerc);
4595 n2 = OMP_CLAUSE_DECL (innerc);
4596 }
4597 tree step = fd->loop.step;
4598
4599 bool is_simt = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
4600 OMP_CLAUSE__SIMT_);
4601 if (is_simt)
4602 {
4603 cfun->curr_properties &= ~PROP_gimple_lomp_dev;
4604 is_simt = safelen_int > 1;
4605 }
4606 tree simt_lane = NULL_TREE, simt_maxlane = NULL_TREE;
4607 if (is_simt)
4608 {
4609 simt_lane = create_tmp_var (unsigned_type_node);
4610 gimple *g = gimple_build_call_internal (IFN_GOMP_SIMT_LANE, 0);
4611 gimple_call_set_lhs (g, simt_lane);
4612 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
4613 tree offset = fold_build2 (MULT_EXPR, TREE_TYPE (step), step,
4614 fold_convert (TREE_TYPE (step), simt_lane));
4615 n1 = fold_convert (type, n1);
4616 if (POINTER_TYPE_P (type))
4617 n1 = fold_build_pointer_plus (n1, offset);
4618 else
4619 n1 = fold_build2 (PLUS_EXPR, type, n1, fold_convert (type, offset));
4620
4621 /* Collapsed loops not handled for SIMT yet: limit to one lane only. */
4622 if (fd->collapse > 1)
4623 simt_maxlane = build_one_cst (unsigned_type_node);
4624 else if (safelen_int < omp_max_simt_vf ())
4625 simt_maxlane = build_int_cst (unsigned_type_node, safelen_int);
4626 tree vf
4627 = build_call_expr_internal_loc (UNKNOWN_LOCATION, IFN_GOMP_SIMT_VF,
4628 unsigned_type_node, 0);
4629 if (simt_maxlane)
4630 vf = fold_build2 (MIN_EXPR, unsigned_type_node, vf, simt_maxlane);
4631 vf = fold_convert (TREE_TYPE (step), vf);
4632 step = fold_build2 (MULT_EXPR, TREE_TYPE (step), step, vf);
4633 }
4634
4635 expand_omp_build_assign (&gsi, fd->loop.v, fold_convert (type, n1));
4636 if (fd->collapse > 1)
4637 {
4638 if (gimple_omp_for_combined_into_p (fd->for_stmt))
4639 {
4640 gsi_prev (&gsi);
4641 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
4642 gsi_next (&gsi);
4643 }
4644 else
4645 for (i = 0; i < fd->collapse; i++)
4646 {
4647 tree itype = TREE_TYPE (fd->loops[i].v);
4648 if (POINTER_TYPE_P (itype))
4649 itype = signed_type_for (itype);
4650 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
4651 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
4652 }
4653 }
4654
4655 /* Remove the GIMPLE_OMP_FOR statement. */
4656 gsi_remove (&gsi, true);
4657
4658 if (!broken_loop)
4659 {
4660 /* Code to control the increment goes in the CONT_BB. */
4661 gsi = gsi_last_bb (cont_bb);
4662 stmt = gsi_stmt (gsi);
4663 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4664
4665 if (POINTER_TYPE_P (type))
4666 t = fold_build_pointer_plus (fd->loop.v, step);
4667 else
4668 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, step);
4669 expand_omp_build_assign (&gsi, fd->loop.v, t);
4670
4671 if (fd->collapse > 1)
4672 {
4673 i = fd->collapse - 1;
4674 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
4675 {
4676 t = fold_convert (sizetype, fd->loops[i].step);
4677 t = fold_build_pointer_plus (fd->loops[i].v, t);
4678 }
4679 else
4680 {
4681 t = fold_convert (TREE_TYPE (fd->loops[i].v),
4682 fd->loops[i].step);
4683 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
4684 fd->loops[i].v, t);
4685 }
4686 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
4687
4688 for (i = fd->collapse - 1; i > 0; i--)
4689 {
4690 tree itype = TREE_TYPE (fd->loops[i].v);
4691 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
4692 if (POINTER_TYPE_P (itype2))
4693 itype2 = signed_type_for (itype2);
4694 t = build3 (COND_EXPR, itype2,
4695 build2 (fd->loops[i].cond_code, boolean_type_node,
4696 fd->loops[i].v,
4697 fold_convert (itype, fd->loops[i].n2)),
4698 build_int_cst (itype2, 0),
4699 fold_convert (itype2, fd->loops[i - 1].step));
4700 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
4701 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
4702 else
4703 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
4704 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
4705
4706 t = build3 (COND_EXPR, itype,
4707 build2 (fd->loops[i].cond_code, boolean_type_node,
4708 fd->loops[i].v,
4709 fold_convert (itype, fd->loops[i].n2)),
4710 fd->loops[i].v,
4711 fold_convert (itype, fd->loops[i].n1));
4712 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
4713 }
4714 }
4715
4716 /* Remove GIMPLE_OMP_CONTINUE. */
4717 gsi_remove (&gsi, true);
4718 }
4719
4720 /* Emit the condition in L1_BB. */
4721 gsi = gsi_start_bb (l1_bb);
4722
4723 t = fold_convert (type, n2);
4724 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4725 false, GSI_CONTINUE_LINKING);
4726 tree v = fd->loop.v;
4727 if (DECL_P (v) && TREE_ADDRESSABLE (v))
4728 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
4729 false, GSI_CONTINUE_LINKING);
4730 t = build2 (fd->loop.cond_code, boolean_type_node, v, t);
4731 cond_stmt = gimple_build_cond_empty (t);
4732 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
4733 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p,
4734 NULL, NULL)
4735 || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p,
4736 NULL, NULL))
4737 {
4738 gsi = gsi_for_stmt (cond_stmt);
4739 gimple_regimplify_operands (cond_stmt, &gsi);
4740 }
4741
4742 /* Add 'V -= STEP * (SIMT_VF - 1)' after the loop. */
4743 if (is_simt)
4744 {
4745 gsi = gsi_start_bb (l2_bb);
4746 step = fold_build2 (MINUS_EXPR, TREE_TYPE (step), fd->loop.step, step);
4747 if (POINTER_TYPE_P (type))
4748 t = fold_build_pointer_plus (fd->loop.v, step);
4749 else
4750 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, step);
4751 expand_omp_build_assign (&gsi, fd->loop.v, t);
4752 }
4753
4754 /* Remove GIMPLE_OMP_RETURN. */
4755 gsi = gsi_last_bb (exit_bb);
4756 gsi_remove (&gsi, true);
4757
4758 /* Connect the new blocks. */
4759 remove_edge (FALLTHRU_EDGE (entry_bb));
4760
4761 if (!broken_loop)
4762 {
4763 remove_edge (BRANCH_EDGE (entry_bb));
4764 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
4765
4766 e = BRANCH_EDGE (l1_bb);
4767 ne = FALLTHRU_EDGE (l1_bb);
4768 e->flags = EDGE_TRUE_VALUE;
4769 }
4770 else
4771 {
4772 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4773
4774 ne = single_succ_edge (l1_bb);
4775 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
4776
4777 }
4778 ne->flags = EDGE_FALSE_VALUE;
4779 e->probability = REG_BR_PROB_BASE * 7 / 8;
4780 ne->probability = REG_BR_PROB_BASE / 8;
4781
4782 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
4783 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
4784
4785 if (simt_maxlane)
4786 {
4787 cond_stmt = gimple_build_cond (LT_EXPR, simt_lane, simt_maxlane,
4788 NULL_TREE, NULL_TREE);
4789 gsi = gsi_last_bb (entry_bb);
4790 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
4791 make_edge (entry_bb, l2_bb, EDGE_FALSE_VALUE);
4792 FALLTHRU_EDGE (entry_bb)->flags = EDGE_TRUE_VALUE;
4793 FALLTHRU_EDGE (entry_bb)->probability = REG_BR_PROB_BASE * 7 / 8;
4794 BRANCH_EDGE (entry_bb)->probability = REG_BR_PROB_BASE / 8;
4795 l2_dom_bb = entry_bb;
4796 }
4797 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
4798
4799 if (!broken_loop)
4800 {
4801 struct loop *loop = alloc_loop ();
4802 loop->header = l1_bb;
4803 loop->latch = cont_bb;
4804 add_loop (loop, l1_bb->loop_father);
4805 loop->safelen = safelen_int;
4806 if (simduid)
4807 {
4808 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
4809 cfun->has_simduid_loops = true;
4810 }
4811 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
4812 the loop. */
4813 if ((flag_tree_loop_vectorize
4814 || (!global_options_set.x_flag_tree_loop_vectorize
4815 && !global_options_set.x_flag_tree_vectorize))
4816 && flag_tree_loop_optimize
4817 && loop->safelen > 1)
4818 {
4819 loop->force_vectorize = true;
4820 cfun->has_force_vectorize_loops = true;
4821 }
4822 }
4823 else if (simduid)
4824 cfun->has_simduid_loops = true;
4825 }
4826
4827 /* Taskloop construct is represented after gimplification with
4828 two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched
4829 in between them. This routine expands the outer GIMPLE_OMP_FOR,
4830 which should just compute all the needed loop temporaries
4831 for GIMPLE_OMP_TASK. */
4832
4833 static void
4834 expand_omp_taskloop_for_outer (struct omp_region *region,
4835 struct omp_for_data *fd,
4836 gimple *inner_stmt)
4837 {
4838 tree type, bias = NULL_TREE;
4839 basic_block entry_bb, cont_bb, exit_bb;
4840 gimple_stmt_iterator gsi;
4841 gassign *assign_stmt;
4842 tree *counts = NULL;
4843 int i;
4844
4845 gcc_assert (inner_stmt);
4846 gcc_assert (region->cont);
4847 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_TASK
4848 && gimple_omp_task_taskloop_p (inner_stmt));
4849 type = TREE_TYPE (fd->loop.v);
4850
4851 /* See if we need to bias by LLONG_MIN. */
4852 if (fd->iter_type == long_long_unsigned_type_node
4853 && TREE_CODE (type) == INTEGER_TYPE
4854 && !TYPE_UNSIGNED (type))
4855 {
4856 tree n1, n2;
4857
4858 if (fd->loop.cond_code == LT_EXPR)
4859 {
4860 n1 = fd->loop.n1;
4861 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
4862 }
4863 else
4864 {
4865 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
4866 n2 = fd->loop.n1;
4867 }
4868 if (TREE_CODE (n1) != INTEGER_CST
4869 || TREE_CODE (n2) != INTEGER_CST
4870 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
4871 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
4872 }
4873
4874 entry_bb = region->entry;
4875 cont_bb = region->cont;
4876 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4877 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4878 exit_bb = region->exit;
4879
4880 gsi = gsi_last_bb (entry_bb);
4881 gimple *for_stmt = gsi_stmt (gsi);
4882 gcc_assert (gimple_code (for_stmt) == GIMPLE_OMP_FOR);
4883 if (fd->collapse > 1)
4884 {
4885 int first_zero_iter = -1, dummy = -1;
4886 basic_block zero_iter_bb = NULL, dummy_bb = NULL, l2_dom_bb = NULL;
4887
4888 counts = XALLOCAVEC (tree, fd->collapse);
4889 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
4890 zero_iter_bb, first_zero_iter,
4891 dummy_bb, dummy, l2_dom_bb);
4892
4893 if (zero_iter_bb)
4894 {
4895 /* Some counts[i] vars might be uninitialized if
4896 some loop has zero iterations. But the body shouldn't
4897 be executed in that case, so just avoid uninit warnings. */
4898 for (i = first_zero_iter; i < fd->collapse; i++)
4899 if (SSA_VAR_P (counts[i]))
4900 TREE_NO_WARNING (counts[i]) = 1;
4901 gsi_prev (&gsi);
4902 edge e = split_block (entry_bb, gsi_stmt (gsi));
4903 entry_bb = e->dest;
4904 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
4905 gsi = gsi_last_bb (entry_bb);
4906 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
4907 get_immediate_dominator (CDI_DOMINATORS,
4908 zero_iter_bb));
4909 }
4910 }
4911
4912 tree t0, t1;
4913 t1 = fd->loop.n2;
4914 t0 = fd->loop.n1;
4915 if (POINTER_TYPE_P (TREE_TYPE (t0))
4916 && TYPE_PRECISION (TREE_TYPE (t0))
4917 != TYPE_PRECISION (fd->iter_type))
4918 {
4919 /* Avoid casting pointers to integer of a different size. */
4920 tree itype = signed_type_for (type);
4921 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
4922 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
4923 }
4924 else
4925 {
4926 t1 = fold_convert (fd->iter_type, t1);
4927 t0 = fold_convert (fd->iter_type, t0);
4928 }
4929 if (bias)
4930 {
4931 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
4932 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
4933 }
4934
4935 tree innerc = omp_find_clause (gimple_omp_task_clauses (inner_stmt),
4936 OMP_CLAUSE__LOOPTEMP_);
4937 gcc_assert (innerc);
4938 tree startvar = OMP_CLAUSE_DECL (innerc);
4939 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_);
4940 gcc_assert (innerc);
4941 tree endvar = OMP_CLAUSE_DECL (innerc);
4942 if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
4943 {
4944 gcc_assert (innerc);
4945 for (i = 1; i < fd->collapse; i++)
4946 {
4947 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
4948 OMP_CLAUSE__LOOPTEMP_);
4949 gcc_assert (innerc);
4950 }
4951 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
4952 OMP_CLAUSE__LOOPTEMP_);
4953 if (innerc)
4954 {
4955 /* If needed (inner taskloop has lastprivate clause), propagate
4956 down the total number of iterations. */
4957 tree t = force_gimple_operand_gsi (&gsi, fd->loop.n2, false,
4958 NULL_TREE, false,
4959 GSI_CONTINUE_LINKING);
4960 assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t);
4961 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
4962 }
4963 }
4964
4965 t0 = force_gimple_operand_gsi (&gsi, t0, false, NULL_TREE, false,
4966 GSI_CONTINUE_LINKING);
4967 assign_stmt = gimple_build_assign (startvar, t0);
4968 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
4969
4970 t1 = force_gimple_operand_gsi (&gsi, t1, false, NULL_TREE, false,
4971 GSI_CONTINUE_LINKING);
4972 assign_stmt = gimple_build_assign (endvar, t1);
4973 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
4974 if (fd->collapse > 1)
4975 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
4976
4977 /* Remove the GIMPLE_OMP_FOR statement. */
4978 gsi = gsi_for_stmt (for_stmt);
4979 gsi_remove (&gsi, true);
4980
4981 gsi = gsi_last_bb (cont_bb);
4982 gsi_remove (&gsi, true);
4983
4984 gsi = gsi_last_bb (exit_bb);
4985 gsi_remove (&gsi, true);
4986
4987 FALLTHRU_EDGE (entry_bb)->probability = REG_BR_PROB_BASE;
4988 remove_edge (BRANCH_EDGE (entry_bb));
4989 FALLTHRU_EDGE (cont_bb)->probability = REG_BR_PROB_BASE;
4990 remove_edge (BRANCH_EDGE (cont_bb));
4991 set_immediate_dominator (CDI_DOMINATORS, exit_bb, cont_bb);
4992 set_immediate_dominator (CDI_DOMINATORS, region->entry,
4993 recompute_dominator (CDI_DOMINATORS, region->entry));
4994 }
4995
4996 /* Taskloop construct is represented after gimplification with
4997 two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched
4998 in between them. This routine expands the inner GIMPLE_OMP_FOR.
4999 GOMP_taskloop{,_ull} function arranges for each task to be given just
5000 a single range of iterations. */
5001
5002 static void
5003 expand_omp_taskloop_for_inner (struct omp_region *region,
5004 struct omp_for_data *fd,
5005 gimple *inner_stmt)
5006 {
5007 tree e, t, type, itype, vmain, vback, bias = NULL_TREE;
5008 basic_block entry_bb, exit_bb, body_bb, cont_bb, collapse_bb = NULL;
5009 basic_block fin_bb;
5010 gimple_stmt_iterator gsi;
5011 edge ep;
5012 bool broken_loop = region->cont == NULL;
5013 tree *counts = NULL;
5014 tree n1, n2, step;
5015
5016 itype = type = TREE_TYPE (fd->loop.v);
5017 if (POINTER_TYPE_P (type))
5018 itype = signed_type_for (type);
5019
5020 /* See if we need to bias by LLONG_MIN. */
5021 if (fd->iter_type == long_long_unsigned_type_node
5022 && TREE_CODE (type) == INTEGER_TYPE
5023 && !TYPE_UNSIGNED (type))
5024 {
5025 tree n1, n2;
5026
5027 if (fd->loop.cond_code == LT_EXPR)
5028 {
5029 n1 = fd->loop.n1;
5030 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
5031 }
5032 else
5033 {
5034 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
5035 n2 = fd->loop.n1;
5036 }
5037 if (TREE_CODE (n1) != INTEGER_CST
5038 || TREE_CODE (n2) != INTEGER_CST
5039 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
5040 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
5041 }
5042
5043 entry_bb = region->entry;
5044 cont_bb = region->cont;
5045 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5046 fin_bb = BRANCH_EDGE (entry_bb)->dest;
5047 gcc_assert (broken_loop
5048 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
5049 body_bb = FALLTHRU_EDGE (entry_bb)->dest;
5050 if (!broken_loop)
5051 {
5052 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
5053 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5054 }
5055 exit_bb = region->exit;
5056
5057 /* Iteration space partitioning goes in ENTRY_BB. */
5058 gsi = gsi_last_bb (entry_bb);
5059 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5060
5061 if (fd->collapse > 1)
5062 {
5063 int first_zero_iter = -1, dummy = -1;
5064 basic_block l2_dom_bb = NULL, dummy_bb = NULL;
5065
5066 counts = XALLOCAVEC (tree, fd->collapse);
5067 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5068 fin_bb, first_zero_iter,
5069 dummy_bb, dummy, l2_dom_bb);
5070 t = NULL_TREE;
5071 }
5072 else
5073 t = integer_one_node;
5074
5075 step = fd->loop.step;
5076 tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
5077 OMP_CLAUSE__LOOPTEMP_);
5078 gcc_assert (innerc);
5079 n1 = OMP_CLAUSE_DECL (innerc);
5080 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_);
5081 gcc_assert (innerc);
5082 n2 = OMP_CLAUSE_DECL (innerc);
5083 if (bias)
5084 {
5085 n1 = fold_build2 (PLUS_EXPR, fd->iter_type, n1, bias);
5086 n2 = fold_build2 (PLUS_EXPR, fd->iter_type, n2, bias);
5087 }
5088 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
5089 true, NULL_TREE, true, GSI_SAME_STMT);
5090 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
5091 true, NULL_TREE, true, GSI_SAME_STMT);
5092 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
5093 true, NULL_TREE, true, GSI_SAME_STMT);
5094
5095 tree startvar = fd->loop.v;
5096 tree endvar = NULL_TREE;
5097
5098 if (gimple_omp_for_combined_p (fd->for_stmt))
5099 {
5100 tree clauses = gimple_omp_for_clauses (inner_stmt);
5101 tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5102 gcc_assert (innerc);
5103 startvar = OMP_CLAUSE_DECL (innerc);
5104 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
5105 OMP_CLAUSE__LOOPTEMP_);
5106 gcc_assert (innerc);
5107 endvar = OMP_CLAUSE_DECL (innerc);
5108 }
5109 t = fold_convert (TREE_TYPE (startvar), n1);
5110 t = force_gimple_operand_gsi (&gsi, t,
5111 DECL_P (startvar)
5112 && TREE_ADDRESSABLE (startvar),
5113 NULL_TREE, false, GSI_CONTINUE_LINKING);
5114 gimple *assign_stmt = gimple_build_assign (startvar, t);
5115 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5116
5117 t = fold_convert (TREE_TYPE (startvar), n2);
5118 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5119 false, GSI_CONTINUE_LINKING);
5120 if (endvar)
5121 {
5122 assign_stmt = gimple_build_assign (endvar, e);
5123 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5124 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
5125 assign_stmt = gimple_build_assign (fd->loop.v, e);
5126 else
5127 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
5128 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5129 }
5130 if (fd->collapse > 1)
5131 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
5132
5133 if (!broken_loop)
5134 {
5135 /* The code controlling the sequential loop replaces the
5136 GIMPLE_OMP_CONTINUE. */
5137 gsi = gsi_last_bb (cont_bb);
5138 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
5139 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
5140 vmain = gimple_omp_continue_control_use (cont_stmt);
5141 vback = gimple_omp_continue_control_def (cont_stmt);
5142
5143 if (!gimple_omp_for_combined_p (fd->for_stmt))
5144 {
5145 if (POINTER_TYPE_P (type))
5146 t = fold_build_pointer_plus (vmain, step);
5147 else
5148 t = fold_build2 (PLUS_EXPR, type, vmain, step);
5149 t = force_gimple_operand_gsi (&gsi, t,
5150 DECL_P (vback)
5151 && TREE_ADDRESSABLE (vback),
5152 NULL_TREE, true, GSI_SAME_STMT);
5153 assign_stmt = gimple_build_assign (vback, t);
5154 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
5155
5156 t = build2 (fd->loop.cond_code, boolean_type_node,
5157 DECL_P (vback) && TREE_ADDRESSABLE (vback)
5158 ? t : vback, e);
5159 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5160 }
5161
5162 /* Remove the GIMPLE_OMP_CONTINUE statement. */
5163 gsi_remove (&gsi, true);
5164
5165 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
5166 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
5167 }
5168
5169 /* Remove the GIMPLE_OMP_FOR statement. */
5170 gsi = gsi_for_stmt (fd->for_stmt);
5171 gsi_remove (&gsi, true);
5172
5173 /* Remove the GIMPLE_OMP_RETURN statement. */
5174 gsi = gsi_last_bb (exit_bb);
5175 gsi_remove (&gsi, true);
5176
5177 FALLTHRU_EDGE (entry_bb)->probability = REG_BR_PROB_BASE;
5178 if (!broken_loop)
5179 remove_edge (BRANCH_EDGE (entry_bb));
5180 else
5181 {
5182 remove_edge_and_dominated_blocks (BRANCH_EDGE (entry_bb));
5183 region->outer->cont = NULL;
5184 }
5185
5186 /* Connect all the blocks. */
5187 if (!broken_loop)
5188 {
5189 ep = find_edge (cont_bb, body_bb);
5190 if (gimple_omp_for_combined_p (fd->for_stmt))
5191 {
5192 remove_edge (ep);
5193 ep = NULL;
5194 }
5195 else if (fd->collapse > 1)
5196 {
5197 remove_edge (ep);
5198 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
5199 }
5200 else
5201 ep->flags = EDGE_TRUE_VALUE;
5202 find_edge (cont_bb, fin_bb)->flags
5203 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
5204 }
5205
5206 set_immediate_dominator (CDI_DOMINATORS, body_bb,
5207 recompute_dominator (CDI_DOMINATORS, body_bb));
5208 if (!broken_loop)
5209 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
5210 recompute_dominator (CDI_DOMINATORS, fin_bb));
5211
5212 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
5213 {
5214 struct loop *loop = alloc_loop ();
5215 loop->header = body_bb;
5216 if (collapse_bb == NULL)
5217 loop->latch = cont_bb;
5218 add_loop (loop, body_bb->loop_father);
5219 }
5220 }
5221
5222 /* A subroutine of expand_omp_for. Generate code for an OpenACC
5223 partitioned loop. The lowering here is abstracted, in that the
5224 loop parameters are passed through internal functions, which are
5225 further lowered by oacc_device_lower, once we get to the target
5226 compiler. The loop is of the form:
5227
5228 for (V = B; V LTGT E; V += S) {BODY}
5229
5230 where LTGT is < or >. We may have a specified chunking size, CHUNKING
5231 (constant 0 for no chunking) and we will have a GWV partitioning
5232 mask, specifying dimensions over which the loop is to be
5233 partitioned (see note below). We generate code that looks like:
5234
5235 <entry_bb> [incoming FALL->body, BRANCH->exit]
5236 typedef signedintify (typeof (V)) T; // underlying signed integral type
5237 T range = E - B;
5238 T chunk_no = 0;
5239 T DIR = LTGT == '<' ? +1 : -1;
5240 T chunk_max = GOACC_LOOP_CHUNK (dir, range, S, CHUNK_SIZE, GWV);
5241 T step = GOACC_LOOP_STEP (dir, range, S, CHUNK_SIZE, GWV);
5242
5243 <head_bb> [created by splitting end of entry_bb]
5244 T offset = GOACC_LOOP_OFFSET (dir, range, S, CHUNK_SIZE, GWV, chunk_no);
5245 T bound = GOACC_LOOP_BOUND (dir, range, S, CHUNK_SIZE, GWV, offset);
5246 if (!(offset LTGT bound)) goto bottom_bb;
5247
5248 <body_bb> [incoming]
5249 V = B + offset;
5250 {BODY}
5251
5252 <cont_bb> [incoming, may == body_bb FALL->exit_bb, BRANCH->body_bb]
5253 offset += step;
5254 if (offset LTGT bound) goto body_bb; [*]
5255
5256 <bottom_bb> [created by splitting start of exit_bb] insert BRANCH->head_bb
5257 chunk_no++;
5258 if (chunk < chunk_max) goto head_bb;
5259
5260 <exit_bb> [incoming]
5261 V = B + ((range -/+ 1) / S +/- 1) * S [*]
5262
5263 [*] Needed if V live at end of loop
5264
5265 Note: CHUNKING & GWV mask are specified explicitly here. This is a
5266 transition, and will be specified by a more general mechanism shortly.
5267 */
5268
5269 static void
5270 expand_oacc_for (struct omp_region *region, struct omp_for_data *fd)
5271 {
5272 tree v = fd->loop.v;
5273 enum tree_code cond_code = fd->loop.cond_code;
5274 enum tree_code plus_code = PLUS_EXPR;
5275
5276 tree chunk_size = integer_minus_one_node;
5277 tree gwv = integer_zero_node;
5278 tree iter_type = TREE_TYPE (v);
5279 tree diff_type = iter_type;
5280 tree plus_type = iter_type;
5281 struct oacc_collapse *counts = NULL;
5282
5283 gcc_checking_assert (gimple_omp_for_kind (fd->for_stmt)
5284 == GF_OMP_FOR_KIND_OACC_LOOP);
5285 gcc_assert (!gimple_omp_for_combined_into_p (fd->for_stmt));
5286 gcc_assert (cond_code == LT_EXPR || cond_code == GT_EXPR);
5287
5288 if (POINTER_TYPE_P (iter_type))
5289 {
5290 plus_code = POINTER_PLUS_EXPR;
5291 plus_type = sizetype;
5292 }
5293 if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type))
5294 diff_type = signed_type_for (diff_type);
5295
5296 basic_block entry_bb = region->entry; /* BB ending in OMP_FOR */
5297 basic_block exit_bb = region->exit; /* BB ending in OMP_RETURN */
5298 basic_block cont_bb = region->cont; /* BB ending in OMP_CONTINUE */
5299 basic_block bottom_bb = NULL;
5300
5301 /* entry_bb has two sucessors; the branch edge is to the exit
5302 block, fallthrough edge to body. */
5303 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2
5304 && BRANCH_EDGE (entry_bb)->dest == exit_bb);
5305
5306 /* If cont_bb non-NULL, it has 2 successors. The branch successor is
5307 body_bb, or to a block whose only successor is the body_bb. Its
5308 fallthrough successor is the final block (same as the branch
5309 successor of the entry_bb). */
5310 if (cont_bb)
5311 {
5312 basic_block body_bb = FALLTHRU_EDGE (entry_bb)->dest;
5313 basic_block bed = BRANCH_EDGE (cont_bb)->dest;
5314
5315 gcc_assert (FALLTHRU_EDGE (cont_bb)->dest == exit_bb);
5316 gcc_assert (bed == body_bb || single_succ_edge (bed)->dest == body_bb);
5317 }
5318 else
5319 gcc_assert (!gimple_in_ssa_p (cfun));
5320
5321 /* The exit block only has entry_bb and cont_bb as predecessors. */
5322 gcc_assert (EDGE_COUNT (exit_bb->preds) == 1 + (cont_bb != NULL));
5323
5324 tree chunk_no;
5325 tree chunk_max = NULL_TREE;
5326 tree bound, offset;
5327 tree step = create_tmp_var (diff_type, ".step");
5328 bool up = cond_code == LT_EXPR;
5329 tree dir = build_int_cst (diff_type, up ? +1 : -1);
5330 bool chunking = !gimple_in_ssa_p (cfun);;
5331 bool negating;
5332
5333 /* SSA instances. */
5334 tree offset_incr = NULL_TREE;
5335 tree offset_init = NULL_TREE;
5336
5337 gimple_stmt_iterator gsi;
5338 gassign *ass;
5339 gcall *call;
5340 gimple *stmt;
5341 tree expr;
5342 location_t loc;
5343 edge split, be, fte;
5344
5345 /* Split the end of entry_bb to create head_bb. */
5346 split = split_block (entry_bb, last_stmt (entry_bb));
5347 basic_block head_bb = split->dest;
5348 entry_bb = split->src;
5349
5350 /* Chunk setup goes at end of entry_bb, replacing the omp_for. */
5351 gsi = gsi_last_bb (entry_bb);
5352 gomp_for *for_stmt = as_a <gomp_for *> (gsi_stmt (gsi));
5353 loc = gimple_location (for_stmt);
5354
5355 if (gimple_in_ssa_p (cfun))
5356 {
5357 offset_init = gimple_omp_for_index (for_stmt, 0);
5358 gcc_assert (integer_zerop (fd->loop.n1));
5359 /* The SSA parallelizer does gang parallelism. */
5360 gwv = build_int_cst (integer_type_node, GOMP_DIM_MASK (GOMP_DIM_GANG));
5361 }
5362
5363 if (fd->collapse > 1)
5364 {
5365 counts = XALLOCAVEC (struct oacc_collapse, fd->collapse);
5366 tree total = expand_oacc_collapse_init (fd, &gsi, counts,
5367 TREE_TYPE (fd->loop.n2));
5368
5369 if (SSA_VAR_P (fd->loop.n2))
5370 {
5371 total = force_gimple_operand_gsi (&gsi, total, false, NULL_TREE,
5372 true, GSI_SAME_STMT);
5373 ass = gimple_build_assign (fd->loop.n2, total);
5374 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
5375 }
5376
5377 }
5378
5379 tree b = fd->loop.n1;
5380 tree e = fd->loop.n2;
5381 tree s = fd->loop.step;
5382
5383 b = force_gimple_operand_gsi (&gsi, b, true, NULL_TREE, true, GSI_SAME_STMT);
5384 e = force_gimple_operand_gsi (&gsi, e, true, NULL_TREE, true, GSI_SAME_STMT);
5385
5386 /* Convert the step, avoiding possible unsigned->signed overflow. */
5387 negating = !up && TYPE_UNSIGNED (TREE_TYPE (s));
5388 if (negating)
5389 s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s);
5390 s = fold_convert (diff_type, s);
5391 if (negating)
5392 s = fold_build1 (NEGATE_EXPR, diff_type, s);
5393 s = force_gimple_operand_gsi (&gsi, s, true, NULL_TREE, true, GSI_SAME_STMT);
5394
5395 if (!chunking)
5396 chunk_size = integer_zero_node;
5397 expr = fold_convert (diff_type, chunk_size);
5398 chunk_size = force_gimple_operand_gsi (&gsi, expr, true,
5399 NULL_TREE, true, GSI_SAME_STMT);
5400 /* Determine the range, avoiding possible unsigned->signed overflow. */
5401 negating = !up && TYPE_UNSIGNED (iter_type);
5402 expr = fold_build2 (MINUS_EXPR, plus_type,
5403 fold_convert (plus_type, negating ? b : e),
5404 fold_convert (plus_type, negating ? e : b));
5405 expr = fold_convert (diff_type, expr);
5406 if (negating)
5407 expr = fold_build1 (NEGATE_EXPR, diff_type, expr);
5408 tree range = force_gimple_operand_gsi (&gsi, expr, true,
5409 NULL_TREE, true, GSI_SAME_STMT);
5410
5411 chunk_no = build_int_cst (diff_type, 0);
5412 if (chunking)
5413 {
5414 gcc_assert (!gimple_in_ssa_p (cfun));
5415
5416 expr = chunk_no;
5417 chunk_max = create_tmp_var (diff_type, ".chunk_max");
5418 chunk_no = create_tmp_var (diff_type, ".chunk_no");
5419
5420 ass = gimple_build_assign (chunk_no, expr);
5421 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
5422
5423 call = gimple_build_call_internal (IFN_GOACC_LOOP, 6,
5424 build_int_cst (integer_type_node,
5425 IFN_GOACC_LOOP_CHUNKS),
5426 dir, range, s, chunk_size, gwv);
5427 gimple_call_set_lhs (call, chunk_max);
5428 gimple_set_location (call, loc);
5429 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
5430 }
5431 else
5432 chunk_size = chunk_no;
5433
5434 call = gimple_build_call_internal (IFN_GOACC_LOOP, 6,
5435 build_int_cst (integer_type_node,
5436 IFN_GOACC_LOOP_STEP),
5437 dir, range, s, chunk_size, gwv);
5438 gimple_call_set_lhs (call, step);
5439 gimple_set_location (call, loc);
5440 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
5441
5442 /* Remove the GIMPLE_OMP_FOR. */
5443 gsi_remove (&gsi, true);
5444
5445 /* Fixup edges from head_bb. */
5446 be = BRANCH_EDGE (head_bb);
5447 fte = FALLTHRU_EDGE (head_bb);
5448 be->flags |= EDGE_FALSE_VALUE;
5449 fte->flags ^= EDGE_FALLTHRU | EDGE_TRUE_VALUE;
5450
5451 basic_block body_bb = fte->dest;
5452
5453 if (gimple_in_ssa_p (cfun))
5454 {
5455 gsi = gsi_last_bb (cont_bb);
5456 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
5457
5458 offset = gimple_omp_continue_control_use (cont_stmt);
5459 offset_incr = gimple_omp_continue_control_def (cont_stmt);
5460 }
5461 else
5462 {
5463 offset = create_tmp_var (diff_type, ".offset");
5464 offset_init = offset_incr = offset;
5465 }
5466 bound = create_tmp_var (TREE_TYPE (offset), ".bound");
5467
5468 /* Loop offset & bound go into head_bb. */
5469 gsi = gsi_start_bb (head_bb);
5470
5471 call = gimple_build_call_internal (IFN_GOACC_LOOP, 7,
5472 build_int_cst (integer_type_node,
5473 IFN_GOACC_LOOP_OFFSET),
5474 dir, range, s,
5475 chunk_size, gwv, chunk_no);
5476 gimple_call_set_lhs (call, offset_init);
5477 gimple_set_location (call, loc);
5478 gsi_insert_after (&gsi, call, GSI_CONTINUE_LINKING);
5479
5480 call = gimple_build_call_internal (IFN_GOACC_LOOP, 7,
5481 build_int_cst (integer_type_node,
5482 IFN_GOACC_LOOP_BOUND),
5483 dir, range, s,
5484 chunk_size, gwv, offset_init);
5485 gimple_call_set_lhs (call, bound);
5486 gimple_set_location (call, loc);
5487 gsi_insert_after (&gsi, call, GSI_CONTINUE_LINKING);
5488
5489 expr = build2 (cond_code, boolean_type_node, offset_init, bound);
5490 gsi_insert_after (&gsi, gimple_build_cond_empty (expr),
5491 GSI_CONTINUE_LINKING);
5492
5493 /* V assignment goes into body_bb. */
5494 if (!gimple_in_ssa_p (cfun))
5495 {
5496 gsi = gsi_start_bb (body_bb);
5497
5498 expr = build2 (plus_code, iter_type, b,
5499 fold_convert (plus_type, offset));
5500 expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
5501 true, GSI_SAME_STMT);
5502 ass = gimple_build_assign (v, expr);
5503 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
5504 if (fd->collapse > 1)
5505 expand_oacc_collapse_vars (fd, &gsi, counts, v);
5506 }
5507
5508 /* Loop increment goes into cont_bb. If this is not a loop, we
5509 will have spawned threads as if it was, and each one will
5510 execute one iteration. The specification is not explicit about
5511 whether such constructs are ill-formed or not, and they can
5512 occur, especially when noreturn routines are involved. */
5513 if (cont_bb)
5514 {
5515 gsi = gsi_last_bb (cont_bb);
5516 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
5517 loc = gimple_location (cont_stmt);
5518
5519 /* Increment offset. */
5520 if (gimple_in_ssa_p (cfun))
5521 expr= build2 (plus_code, iter_type, offset,
5522 fold_convert (plus_type, step));
5523 else
5524 expr = build2 (PLUS_EXPR, diff_type, offset, step);
5525 expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
5526 true, GSI_SAME_STMT);
5527 ass = gimple_build_assign (offset_incr, expr);
5528 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
5529 expr = build2 (cond_code, boolean_type_node, offset_incr, bound);
5530 gsi_insert_before (&gsi, gimple_build_cond_empty (expr), GSI_SAME_STMT);
5531
5532 /* Remove the GIMPLE_OMP_CONTINUE. */
5533 gsi_remove (&gsi, true);
5534
5535 /* Fixup edges from cont_bb. */
5536 be = BRANCH_EDGE (cont_bb);
5537 fte = FALLTHRU_EDGE (cont_bb);
5538 be->flags |= EDGE_TRUE_VALUE;
5539 fte->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE;
5540
5541 if (chunking)
5542 {
5543 /* Split the beginning of exit_bb to make bottom_bb. We
5544 need to insert a nop at the start, because splitting is
5545 after a stmt, not before. */
5546 gsi = gsi_start_bb (exit_bb);
5547 stmt = gimple_build_nop ();
5548 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5549 split = split_block (exit_bb, stmt);
5550 bottom_bb = split->src;
5551 exit_bb = split->dest;
5552 gsi = gsi_last_bb (bottom_bb);
5553
5554 /* Chunk increment and test goes into bottom_bb. */
5555 expr = build2 (PLUS_EXPR, diff_type, chunk_no,
5556 build_int_cst (diff_type, 1));
5557 ass = gimple_build_assign (chunk_no, expr);
5558 gsi_insert_after (&gsi, ass, GSI_CONTINUE_LINKING);
5559
5560 /* Chunk test at end of bottom_bb. */
5561 expr = build2 (LT_EXPR, boolean_type_node, chunk_no, chunk_max);
5562 gsi_insert_after (&gsi, gimple_build_cond_empty (expr),
5563 GSI_CONTINUE_LINKING);
5564
5565 /* Fixup edges from bottom_bb. */
5566 split->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE;
5567 make_edge (bottom_bb, head_bb, EDGE_TRUE_VALUE);
5568 }
5569 }
5570
5571 gsi = gsi_last_bb (exit_bb);
5572 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
5573 loc = gimple_location (gsi_stmt (gsi));
5574
5575 if (!gimple_in_ssa_p (cfun))
5576 {
5577 /* Insert the final value of V, in case it is live. This is the
5578 value for the only thread that survives past the join. */
5579 expr = fold_build2 (MINUS_EXPR, diff_type, range, dir);
5580 expr = fold_build2 (PLUS_EXPR, diff_type, expr, s);
5581 expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s);
5582 expr = fold_build2 (MULT_EXPR, diff_type, expr, s);
5583 expr = build2 (plus_code, iter_type, b, fold_convert (plus_type, expr));
5584 expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
5585 true, GSI_SAME_STMT);
5586 ass = gimple_build_assign (v, expr);
5587 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
5588 }
5589
5590 /* Remove the OMP_RETURN. */
5591 gsi_remove (&gsi, true);
5592
5593 if (cont_bb)
5594 {
5595 /* We now have one or two nested loops. Update the loop
5596 structures. */
5597 struct loop *parent = entry_bb->loop_father;
5598 struct loop *body = body_bb->loop_father;
5599
5600 if (chunking)
5601 {
5602 struct loop *chunk_loop = alloc_loop ();
5603 chunk_loop->header = head_bb;
5604 chunk_loop->latch = bottom_bb;
5605 add_loop (chunk_loop, parent);
5606 parent = chunk_loop;
5607 }
5608 else if (parent != body)
5609 {
5610 gcc_assert (body->header == body_bb);
5611 gcc_assert (body->latch == cont_bb
5612 || single_pred (body->latch) == cont_bb);
5613 parent = NULL;
5614 }
5615
5616 if (parent)
5617 {
5618 struct loop *body_loop = alloc_loop ();
5619 body_loop->header = body_bb;
5620 body_loop->latch = cont_bb;
5621 add_loop (body_loop, parent);
5622 }
5623 }
5624 }
5625
5626 /* Expand the OMP loop defined by REGION. */
5627
5628 static void
5629 expand_omp_for (struct omp_region *region, gimple *inner_stmt)
5630 {
5631 struct omp_for_data fd;
5632 struct omp_for_data_loop *loops;
5633
5634 loops
5635 = (struct omp_for_data_loop *)
5636 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
5637 * sizeof (struct omp_for_data_loop));
5638 omp_extract_for_data (as_a <gomp_for *> (last_stmt (region->entry)),
5639 &fd, loops);
5640 region->sched_kind = fd.sched_kind;
5641 region->sched_modifiers = fd.sched_modifiers;
5642
5643 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
5644 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
5645 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
5646 if (region->cont)
5647 {
5648 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
5649 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
5650 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
5651 }
5652 else
5653 /* If there isn't a continue then this is a degerate case where
5654 the introduction of abnormal edges during lowering will prevent
5655 original loops from being detected. Fix that up. */
5656 loops_state_set (LOOPS_NEED_FIXUP);
5657
5658 if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD)
5659 expand_omp_simd (region, &fd);
5660 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
5661 expand_cilk_for (region, &fd);
5662 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
5663 {
5664 gcc_assert (!inner_stmt);
5665 expand_oacc_for (region, &fd);
5666 }
5667 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_TASKLOOP)
5668 {
5669 if (gimple_omp_for_combined_into_p (fd.for_stmt))
5670 expand_omp_taskloop_for_inner (region, &fd, inner_stmt);
5671 else
5672 expand_omp_taskloop_for_outer (region, &fd, inner_stmt);
5673 }
5674 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
5675 && !fd.have_ordered)
5676 {
5677 if (fd.chunk_size == NULL)
5678 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
5679 else
5680 expand_omp_for_static_chunk (region, &fd, inner_stmt);
5681 }
5682 else
5683 {
5684 int fn_index, start_ix, next_ix;
5685
5686 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
5687 == GF_OMP_FOR_KIND_FOR);
5688 if (fd.chunk_size == NULL
5689 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
5690 fd.chunk_size = integer_zero_node;
5691 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
5692 switch (fd.sched_kind)
5693 {
5694 case OMP_CLAUSE_SCHEDULE_RUNTIME:
5695 fn_index = 3;
5696 break;
5697 case OMP_CLAUSE_SCHEDULE_DYNAMIC:
5698 case OMP_CLAUSE_SCHEDULE_GUIDED:
5699 if ((fd.sched_modifiers & OMP_CLAUSE_SCHEDULE_NONMONOTONIC)
5700 && !fd.ordered
5701 && !fd.have_ordered)
5702 {
5703 fn_index = 3 + fd.sched_kind;
5704 break;
5705 }
5706 /* FALLTHRU */
5707 default:
5708 fn_index = fd.sched_kind;
5709 break;
5710 }
5711 if (!fd.ordered)
5712 fn_index += fd.have_ordered * 6;
5713 if (fd.ordered)
5714 start_ix = ((int)BUILT_IN_GOMP_LOOP_DOACROSS_STATIC_START) + fn_index;
5715 else
5716 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
5717 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
5718 if (fd.iter_type == long_long_unsigned_type_node)
5719 {
5720 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
5721 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
5722 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
5723 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
5724 }
5725 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
5726 (enum built_in_function) next_ix, inner_stmt);
5727 }
5728
5729 if (gimple_in_ssa_p (cfun))
5730 update_ssa (TODO_update_ssa_only_virtuals);
5731 }
5732
5733 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
5734
5735 v = GOMP_sections_start (n);
5736 L0:
5737 switch (v)
5738 {
5739 case 0:
5740 goto L2;
5741 case 1:
5742 section 1;
5743 goto L1;
5744 case 2:
5745 ...
5746 case n:
5747 ...
5748 default:
5749 abort ();
5750 }
5751 L1:
5752 v = GOMP_sections_next ();
5753 goto L0;
5754 L2:
5755 reduction;
5756
5757 If this is a combined parallel sections, replace the call to
5758 GOMP_sections_start with call to GOMP_sections_next. */
5759
5760 static void
5761 expand_omp_sections (struct omp_region *region)
5762 {
5763 tree t, u, vin = NULL, vmain, vnext, l2;
5764 unsigned len;
5765 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
5766 gimple_stmt_iterator si, switch_si;
5767 gomp_sections *sections_stmt;
5768 gimple *stmt;
5769 gomp_continue *cont;
5770 edge_iterator ei;
5771 edge e;
5772 struct omp_region *inner;
5773 unsigned i, casei;
5774 bool exit_reachable = region->cont != NULL;
5775
5776 gcc_assert (region->exit != NULL);
5777 entry_bb = region->entry;
5778 l0_bb = single_succ (entry_bb);
5779 l1_bb = region->cont;
5780 l2_bb = region->exit;
5781 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
5782 l2 = gimple_block_label (l2_bb);
5783 else
5784 {
5785 /* This can happen if there are reductions. */
5786 len = EDGE_COUNT (l0_bb->succs);
5787 gcc_assert (len > 0);
5788 e = EDGE_SUCC (l0_bb, len - 1);
5789 si = gsi_last_bb (e->dest);
5790 l2 = NULL_TREE;
5791 if (gsi_end_p (si)
5792 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
5793 l2 = gimple_block_label (e->dest);
5794 else
5795 FOR_EACH_EDGE (e, ei, l0_bb->succs)
5796 {
5797 si = gsi_last_bb (e->dest);
5798 if (gsi_end_p (si)
5799 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
5800 {
5801 l2 = gimple_block_label (e->dest);
5802 break;
5803 }
5804 }
5805 }
5806 if (exit_reachable)
5807 default_bb = create_empty_bb (l1_bb->prev_bb);
5808 else
5809 default_bb = create_empty_bb (l0_bb);
5810
5811 /* We will build a switch() with enough cases for all the
5812 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
5813 and a default case to abort if something goes wrong. */
5814 len = EDGE_COUNT (l0_bb->succs);
5815
5816 /* Use vec::quick_push on label_vec throughout, since we know the size
5817 in advance. */
5818 auto_vec<tree> label_vec (len);
5819
5820 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
5821 GIMPLE_OMP_SECTIONS statement. */
5822 si = gsi_last_bb (entry_bb);
5823 sections_stmt = as_a <gomp_sections *> (gsi_stmt (si));
5824 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
5825 vin = gimple_omp_sections_control (sections_stmt);
5826 if (!is_combined_parallel (region))
5827 {
5828 /* If we are not inside a combined parallel+sections region,
5829 call GOMP_sections_start. */
5830 t = build_int_cst (unsigned_type_node, len - 1);
5831 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
5832 stmt = gimple_build_call (u, 1, t);
5833 }
5834 else
5835 {
5836 /* Otherwise, call GOMP_sections_next. */
5837 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
5838 stmt = gimple_build_call (u, 0);
5839 }
5840 gimple_call_set_lhs (stmt, vin);
5841 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5842 gsi_remove (&si, true);
5843
5844 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
5845 L0_BB. */
5846 switch_si = gsi_last_bb (l0_bb);
5847 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
5848 if (exit_reachable)
5849 {
5850 cont = as_a <gomp_continue *> (last_stmt (l1_bb));
5851 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
5852 vmain = gimple_omp_continue_control_use (cont);
5853 vnext = gimple_omp_continue_control_def (cont);
5854 }
5855 else
5856 {
5857 vmain = vin;
5858 vnext = NULL_TREE;
5859 }
5860
5861 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
5862 label_vec.quick_push (t);
5863 i = 1;
5864
5865 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
5866 for (inner = region->inner, casei = 1;
5867 inner;
5868 inner = inner->next, i++, casei++)
5869 {
5870 basic_block s_entry_bb, s_exit_bb;
5871
5872 /* Skip optional reduction region. */
5873 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
5874 {
5875 --i;
5876 --casei;
5877 continue;
5878 }
5879
5880 s_entry_bb = inner->entry;
5881 s_exit_bb = inner->exit;
5882
5883 t = gimple_block_label (s_entry_bb);
5884 u = build_int_cst (unsigned_type_node, casei);
5885 u = build_case_label (u, NULL, t);
5886 label_vec.quick_push (u);
5887
5888 si = gsi_last_bb (s_entry_bb);
5889 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
5890 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
5891 gsi_remove (&si, true);
5892 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
5893
5894 if (s_exit_bb == NULL)
5895 continue;
5896
5897 si = gsi_last_bb (s_exit_bb);
5898 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
5899 gsi_remove (&si, true);
5900
5901 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
5902 }
5903
5904 /* Error handling code goes in DEFAULT_BB. */
5905 t = gimple_block_label (default_bb);
5906 u = build_case_label (NULL, NULL, t);
5907 make_edge (l0_bb, default_bb, 0);
5908 add_bb_to_loop (default_bb, current_loops->tree_root);
5909
5910 stmt = gimple_build_switch (vmain, u, label_vec);
5911 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
5912 gsi_remove (&switch_si, true);
5913
5914 si = gsi_start_bb (default_bb);
5915 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
5916 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
5917
5918 if (exit_reachable)
5919 {
5920 tree bfn_decl;
5921
5922 /* Code to get the next section goes in L1_BB. */
5923 si = gsi_last_bb (l1_bb);
5924 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
5925
5926 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
5927 stmt = gimple_build_call (bfn_decl, 0);
5928 gimple_call_set_lhs (stmt, vnext);
5929 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5930 gsi_remove (&si, true);
5931
5932 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
5933 }
5934
5935 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
5936 si = gsi_last_bb (l2_bb);
5937 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
5938 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
5939 else if (gimple_omp_return_lhs (gsi_stmt (si)))
5940 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
5941 else
5942 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
5943 stmt = gimple_build_call (t, 0);
5944 if (gimple_omp_return_lhs (gsi_stmt (si)))
5945 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
5946 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5947 gsi_remove (&si, true);
5948
5949 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
5950 }
5951
5952 /* Expand code for an OpenMP single directive. We've already expanded
5953 much of the code, here we simply place the GOMP_barrier call. */
5954
5955 static void
5956 expand_omp_single (struct omp_region *region)
5957 {
5958 basic_block entry_bb, exit_bb;
5959 gimple_stmt_iterator si;
5960
5961 entry_bb = region->entry;
5962 exit_bb = region->exit;
5963
5964 si = gsi_last_bb (entry_bb);
5965 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
5966 gsi_remove (&si, true);
5967 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
5968
5969 si = gsi_last_bb (exit_bb);
5970 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
5971 {
5972 tree t = gimple_omp_return_lhs (gsi_stmt (si));
5973 gsi_insert_after (&si, omp_build_barrier (t), GSI_SAME_STMT);
5974 }
5975 gsi_remove (&si, true);
5976 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
5977 }
5978
5979 /* Generic expansion for OpenMP synchronization directives: master,
5980 ordered and critical. All we need to do here is remove the entry
5981 and exit markers for REGION. */
5982
5983 static void
5984 expand_omp_synch (struct omp_region *region)
5985 {
5986 basic_block entry_bb, exit_bb;
5987 gimple_stmt_iterator si;
5988
5989 entry_bb = region->entry;
5990 exit_bb = region->exit;
5991
5992 si = gsi_last_bb (entry_bb);
5993 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
5994 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
5995 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
5996 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
5997 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
5998 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
5999 gsi_remove (&si, true);
6000 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6001
6002 if (exit_bb)
6003 {
6004 si = gsi_last_bb (exit_bb);
6005 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
6006 gsi_remove (&si, true);
6007 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
6008 }
6009 }
6010
6011 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6012 operation as a normal volatile load. */
6013
6014 static bool
6015 expand_omp_atomic_load (basic_block load_bb, tree addr,
6016 tree loaded_val, int index)
6017 {
6018 enum built_in_function tmpbase;
6019 gimple_stmt_iterator gsi;
6020 basic_block store_bb;
6021 location_t loc;
6022 gimple *stmt;
6023 tree decl, call, type, itype;
6024
6025 gsi = gsi_last_bb (load_bb);
6026 stmt = gsi_stmt (gsi);
6027 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
6028 loc = gimple_location (stmt);
6029
6030 /* ??? If the target does not implement atomic_load_optab[mode], and mode
6031 is smaller than word size, then expand_atomic_load assumes that the load
6032 is atomic. We could avoid the builtin entirely in this case. */
6033
6034 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
6035 decl = builtin_decl_explicit (tmpbase);
6036 if (decl == NULL_TREE)
6037 return false;
6038
6039 type = TREE_TYPE (loaded_val);
6040 itype = TREE_TYPE (TREE_TYPE (decl));
6041
6042 call = build_call_expr_loc (loc, decl, 2, addr,
6043 build_int_cst (NULL,
6044 gimple_omp_atomic_seq_cst_p (stmt)
6045 ? MEMMODEL_SEQ_CST
6046 : MEMMODEL_RELAXED));
6047 if (!useless_type_conversion_p (type, itype))
6048 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
6049 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
6050
6051 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6052 gsi_remove (&gsi, true);
6053
6054 store_bb = single_succ (load_bb);
6055 gsi = gsi_last_bb (store_bb);
6056 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
6057 gsi_remove (&gsi, true);
6058
6059 if (gimple_in_ssa_p (cfun))
6060 update_ssa (TODO_update_ssa_no_phi);
6061
6062 return true;
6063 }
6064
6065 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6066 operation as a normal volatile store. */
6067
6068 static bool
6069 expand_omp_atomic_store (basic_block load_bb, tree addr,
6070 tree loaded_val, tree stored_val, int index)
6071 {
6072 enum built_in_function tmpbase;
6073 gimple_stmt_iterator gsi;
6074 basic_block store_bb = single_succ (load_bb);
6075 location_t loc;
6076 gimple *stmt;
6077 tree decl, call, type, itype;
6078 machine_mode imode;
6079 bool exchange;
6080
6081 gsi = gsi_last_bb (load_bb);
6082 stmt = gsi_stmt (gsi);
6083 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
6084
6085 /* If the load value is needed, then this isn't a store but an exchange. */
6086 exchange = gimple_omp_atomic_need_value_p (stmt);
6087
6088 gsi = gsi_last_bb (store_bb);
6089 stmt = gsi_stmt (gsi);
6090 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
6091 loc = gimple_location (stmt);
6092
6093 /* ??? If the target does not implement atomic_store_optab[mode], and mode
6094 is smaller than word size, then expand_atomic_store assumes that the store
6095 is atomic. We could avoid the builtin entirely in this case. */
6096
6097 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
6098 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
6099 decl = builtin_decl_explicit (tmpbase);
6100 if (decl == NULL_TREE)
6101 return false;
6102
6103 type = TREE_TYPE (stored_val);
6104
6105 /* Dig out the type of the function's second argument. */
6106 itype = TREE_TYPE (decl);
6107 itype = TYPE_ARG_TYPES (itype);
6108 itype = TREE_CHAIN (itype);
6109 itype = TREE_VALUE (itype);
6110 imode = TYPE_MODE (itype);
6111
6112 if (exchange && !can_atomic_exchange_p (imode, true))
6113 return false;
6114
6115 if (!useless_type_conversion_p (itype, type))
6116 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
6117 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
6118 build_int_cst (NULL,
6119 gimple_omp_atomic_seq_cst_p (stmt)
6120 ? MEMMODEL_SEQ_CST
6121 : MEMMODEL_RELAXED));
6122 if (exchange)
6123 {
6124 if (!useless_type_conversion_p (type, itype))
6125 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
6126 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
6127 }
6128
6129 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6130 gsi_remove (&gsi, true);
6131
6132 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
6133 gsi = gsi_last_bb (load_bb);
6134 gsi_remove (&gsi, true);
6135
6136 if (gimple_in_ssa_p (cfun))
6137 update_ssa (TODO_update_ssa_no_phi);
6138
6139 return true;
6140 }
6141
6142 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6143 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
6144 size of the data type, and thus usable to find the index of the builtin
6145 decl. Returns false if the expression is not of the proper form. */
6146
6147 static bool
6148 expand_omp_atomic_fetch_op (basic_block load_bb,
6149 tree addr, tree loaded_val,
6150 tree stored_val, int index)
6151 {
6152 enum built_in_function oldbase, newbase, tmpbase;
6153 tree decl, itype, call;
6154 tree lhs, rhs;
6155 basic_block store_bb = single_succ (load_bb);
6156 gimple_stmt_iterator gsi;
6157 gimple *stmt;
6158 location_t loc;
6159 enum tree_code code;
6160 bool need_old, need_new;
6161 machine_mode imode;
6162 bool seq_cst;
6163
6164 /* We expect to find the following sequences:
6165
6166 load_bb:
6167 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
6168
6169 store_bb:
6170 val = tmp OP something; (or: something OP tmp)
6171 GIMPLE_OMP_STORE (val)
6172
6173 ???FIXME: Allow a more flexible sequence.
6174 Perhaps use data flow to pick the statements.
6175
6176 */
6177
6178 gsi = gsi_after_labels (store_bb);
6179 stmt = gsi_stmt (gsi);
6180 loc = gimple_location (stmt);
6181 if (!is_gimple_assign (stmt))
6182 return false;
6183 gsi_next (&gsi);
6184 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
6185 return false;
6186 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
6187 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
6188 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
6189 gcc_checking_assert (!need_old || !need_new);
6190
6191 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
6192 return false;
6193
6194 /* Check for one of the supported fetch-op operations. */
6195 code = gimple_assign_rhs_code (stmt);
6196 switch (code)
6197 {
6198 case PLUS_EXPR:
6199 case POINTER_PLUS_EXPR:
6200 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
6201 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
6202 break;
6203 case MINUS_EXPR:
6204 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
6205 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
6206 break;
6207 case BIT_AND_EXPR:
6208 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
6209 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
6210 break;
6211 case BIT_IOR_EXPR:
6212 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
6213 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
6214 break;
6215 case BIT_XOR_EXPR:
6216 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
6217 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
6218 break;
6219 default:
6220 return false;
6221 }
6222
6223 /* Make sure the expression is of the proper form. */
6224 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
6225 rhs = gimple_assign_rhs2 (stmt);
6226 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
6227 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
6228 rhs = gimple_assign_rhs1 (stmt);
6229 else
6230 return false;
6231
6232 tmpbase = ((enum built_in_function)
6233 ((need_new ? newbase : oldbase) + index + 1));
6234 decl = builtin_decl_explicit (tmpbase);
6235 if (decl == NULL_TREE)
6236 return false;
6237 itype = TREE_TYPE (TREE_TYPE (decl));
6238 imode = TYPE_MODE (itype);
6239
6240 /* We could test all of the various optabs involved, but the fact of the
6241 matter is that (with the exception of i486 vs i586 and xadd) all targets
6242 that support any atomic operaton optab also implements compare-and-swap.
6243 Let optabs.c take care of expanding any compare-and-swap loop. */
6244 if (!can_compare_and_swap_p (imode, true) || !can_atomic_load_p (imode))
6245 return false;
6246
6247 gsi = gsi_last_bb (load_bb);
6248 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
6249
6250 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
6251 It only requires that the operation happen atomically. Thus we can
6252 use the RELAXED memory model. */
6253 call = build_call_expr_loc (loc, decl, 3, addr,
6254 fold_convert_loc (loc, itype, rhs),
6255 build_int_cst (NULL,
6256 seq_cst ? MEMMODEL_SEQ_CST
6257 : MEMMODEL_RELAXED));
6258
6259 if (need_old || need_new)
6260 {
6261 lhs = need_old ? loaded_val : stored_val;
6262 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
6263 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
6264 }
6265 else
6266 call = fold_convert_loc (loc, void_type_node, call);
6267 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6268 gsi_remove (&gsi, true);
6269
6270 gsi = gsi_last_bb (store_bb);
6271 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
6272 gsi_remove (&gsi, true);
6273 gsi = gsi_last_bb (store_bb);
6274 stmt = gsi_stmt (gsi);
6275 gsi_remove (&gsi, true);
6276
6277 if (gimple_in_ssa_p (cfun))
6278 {
6279 release_defs (stmt);
6280 update_ssa (TODO_update_ssa_no_phi);
6281 }
6282
6283 return true;
6284 }
6285
6286 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
6287
6288 oldval = *addr;
6289 repeat:
6290 newval = rhs; // with oldval replacing *addr in rhs
6291 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
6292 if (oldval != newval)
6293 goto repeat;
6294
6295 INDEX is log2 of the size of the data type, and thus usable to find the
6296 index of the builtin decl. */
6297
6298 static bool
6299 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
6300 tree addr, tree loaded_val, tree stored_val,
6301 int index)
6302 {
6303 tree loadedi, storedi, initial, new_storedi, old_vali;
6304 tree type, itype, cmpxchg, iaddr;
6305 gimple_stmt_iterator si;
6306 basic_block loop_header = single_succ (load_bb);
6307 gimple *phi, *stmt;
6308 edge e;
6309 enum built_in_function fncode;
6310
6311 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
6312 order to use the RELAXED memory model effectively. */
6313 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
6314 + index + 1);
6315 cmpxchg = builtin_decl_explicit (fncode);
6316 if (cmpxchg == NULL_TREE)
6317 return false;
6318 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
6319 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
6320
6321 if (!can_compare_and_swap_p (TYPE_MODE (itype), true)
6322 || !can_atomic_load_p (TYPE_MODE (itype)))
6323 return false;
6324
6325 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
6326 si = gsi_last_bb (load_bb);
6327 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
6328
6329 /* For floating-point values, we'll need to view-convert them to integers
6330 so that we can perform the atomic compare and swap. Simplify the
6331 following code by always setting up the "i"ntegral variables. */
6332 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
6333 {
6334 tree iaddr_val;
6335
6336 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
6337 true));
6338 iaddr_val
6339 = force_gimple_operand_gsi (&si,
6340 fold_convert (TREE_TYPE (iaddr), addr),
6341 false, NULL_TREE, true, GSI_SAME_STMT);
6342 stmt = gimple_build_assign (iaddr, iaddr_val);
6343 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6344 loadedi = create_tmp_var (itype);
6345 if (gimple_in_ssa_p (cfun))
6346 loadedi = make_ssa_name (loadedi);
6347 }
6348 else
6349 {
6350 iaddr = addr;
6351 loadedi = loaded_val;
6352 }
6353
6354 fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
6355 tree loaddecl = builtin_decl_explicit (fncode);
6356 if (loaddecl)
6357 initial
6358 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr)),
6359 build_call_expr (loaddecl, 2, iaddr,
6360 build_int_cst (NULL_TREE,
6361 MEMMODEL_RELAXED)));
6362 else
6363 initial = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr,
6364 build_int_cst (TREE_TYPE (iaddr), 0));
6365
6366 initial
6367 = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true,
6368 GSI_SAME_STMT);
6369
6370 /* Move the value to the LOADEDI temporary. */
6371 if (gimple_in_ssa_p (cfun))
6372 {
6373 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
6374 phi = create_phi_node (loadedi, loop_header);
6375 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
6376 initial);
6377 }
6378 else
6379 gsi_insert_before (&si,
6380 gimple_build_assign (loadedi, initial),
6381 GSI_SAME_STMT);
6382 if (loadedi != loaded_val)
6383 {
6384 gimple_stmt_iterator gsi2;
6385 tree x;
6386
6387 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
6388 gsi2 = gsi_start_bb (loop_header);
6389 if (gimple_in_ssa_p (cfun))
6390 {
6391 gassign *stmt;
6392 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
6393 true, GSI_SAME_STMT);
6394 stmt = gimple_build_assign (loaded_val, x);
6395 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
6396 }
6397 else
6398 {
6399 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
6400 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
6401 true, GSI_SAME_STMT);
6402 }
6403 }
6404 gsi_remove (&si, true);
6405
6406 si = gsi_last_bb (store_bb);
6407 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
6408
6409 if (iaddr == addr)
6410 storedi = stored_val;
6411 else
6412 storedi
6413 = force_gimple_operand_gsi (&si,
6414 build1 (VIEW_CONVERT_EXPR, itype,
6415 stored_val), true, NULL_TREE, true,
6416 GSI_SAME_STMT);
6417
6418 /* Build the compare&swap statement. */
6419 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
6420 new_storedi = force_gimple_operand_gsi (&si,
6421 fold_convert (TREE_TYPE (loadedi),
6422 new_storedi),
6423 true, NULL_TREE,
6424 true, GSI_SAME_STMT);
6425
6426 if (gimple_in_ssa_p (cfun))
6427 old_vali = loadedi;
6428 else
6429 {
6430 old_vali = create_tmp_var (TREE_TYPE (loadedi));
6431 stmt = gimple_build_assign (old_vali, loadedi);
6432 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6433
6434 stmt = gimple_build_assign (loadedi, new_storedi);
6435 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6436 }
6437
6438 /* Note that we always perform the comparison as an integer, even for
6439 floating point. This allows the atomic operation to properly
6440 succeed even with NaNs and -0.0. */
6441 tree ne = build2 (NE_EXPR, boolean_type_node, new_storedi, old_vali);
6442 stmt = gimple_build_cond_empty (ne);
6443 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6444
6445 /* Update cfg. */
6446 e = single_succ_edge (store_bb);
6447 e->flags &= ~EDGE_FALLTHRU;
6448 e->flags |= EDGE_FALSE_VALUE;
6449
6450 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
6451
6452 /* Copy the new value to loadedi (we already did that before the condition
6453 if we are not in SSA). */
6454 if (gimple_in_ssa_p (cfun))
6455 {
6456 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
6457 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
6458 }
6459
6460 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
6461 gsi_remove (&si, true);
6462
6463 struct loop *loop = alloc_loop ();
6464 loop->header = loop_header;
6465 loop->latch = store_bb;
6466 add_loop (loop, loop_header->loop_father);
6467
6468 if (gimple_in_ssa_p (cfun))
6469 update_ssa (TODO_update_ssa_no_phi);
6470
6471 return true;
6472 }
6473
6474 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
6475
6476 GOMP_atomic_start ();
6477 *addr = rhs;
6478 GOMP_atomic_end ();
6479
6480 The result is not globally atomic, but works so long as all parallel
6481 references are within #pragma omp atomic directives. According to
6482 responses received from omp@openmp.org, appears to be within spec.
6483 Which makes sense, since that's how several other compilers handle
6484 this situation as well.
6485 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
6486 expanding. STORED_VAL is the operand of the matching
6487 GIMPLE_OMP_ATOMIC_STORE.
6488
6489 We replace
6490 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
6491 loaded_val = *addr;
6492
6493 and replace
6494 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
6495 *addr = stored_val;
6496 */
6497
6498 static bool
6499 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
6500 tree addr, tree loaded_val, tree stored_val)
6501 {
6502 gimple_stmt_iterator si;
6503 gassign *stmt;
6504 tree t;
6505
6506 si = gsi_last_bb (load_bb);
6507 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
6508
6509 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
6510 t = build_call_expr (t, 0);
6511 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
6512
6513 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
6514 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6515 gsi_remove (&si, true);
6516
6517 si = gsi_last_bb (store_bb);
6518 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
6519
6520 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
6521 stored_val);
6522 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6523
6524 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
6525 t = build_call_expr (t, 0);
6526 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
6527 gsi_remove (&si, true);
6528
6529 if (gimple_in_ssa_p (cfun))
6530 update_ssa (TODO_update_ssa_no_phi);
6531 return true;
6532 }
6533
6534 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
6535 using expand_omp_atomic_fetch_op. If it failed, we try to
6536 call expand_omp_atomic_pipeline, and if it fails too, the
6537 ultimate fallback is wrapping the operation in a mutex
6538 (expand_omp_atomic_mutex). REGION is the atomic region built
6539 by build_omp_regions_1(). */
6540
6541 static void
6542 expand_omp_atomic (struct omp_region *region)
6543 {
6544 basic_block load_bb = region->entry, store_bb = region->exit;
6545 gomp_atomic_load *load = as_a <gomp_atomic_load *> (last_stmt (load_bb));
6546 gomp_atomic_store *store = as_a <gomp_atomic_store *> (last_stmt (store_bb));
6547 tree loaded_val = gimple_omp_atomic_load_lhs (load);
6548 tree addr = gimple_omp_atomic_load_rhs (load);
6549 tree stored_val = gimple_omp_atomic_store_val (store);
6550 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
6551 HOST_WIDE_INT index;
6552
6553 /* Make sure the type is one of the supported sizes. */
6554 index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
6555 index = exact_log2 (index);
6556 if (index >= 0 && index <= 4)
6557 {
6558 unsigned int align = TYPE_ALIGN_UNIT (type);
6559
6560 /* __sync builtins require strict data alignment. */
6561 if (exact_log2 (align) >= index)
6562 {
6563 /* Atomic load. */
6564 if (loaded_val == stored_val
6565 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
6566 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
6567 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
6568 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
6569 return;
6570
6571 /* Atomic store. */
6572 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
6573 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
6574 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
6575 && store_bb == single_succ (load_bb)
6576 && first_stmt (store_bb) == store
6577 && expand_omp_atomic_store (load_bb, addr, loaded_val,
6578 stored_val, index))
6579 return;
6580
6581 /* When possible, use specialized atomic update functions. */
6582 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
6583 && store_bb == single_succ (load_bb)
6584 && expand_omp_atomic_fetch_op (load_bb, addr,
6585 loaded_val, stored_val, index))
6586 return;
6587
6588 /* If we don't have specialized __sync builtins, try and implement
6589 as a compare and swap loop. */
6590 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
6591 loaded_val, stored_val, index))
6592 return;
6593 }
6594 }
6595
6596 /* The ultimate fallback is wrapping the operation in a mutex. */
6597 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
6598 }
6599
6600 /* Mark the loops inside the kernels region starting at REGION_ENTRY and ending
6601 at REGION_EXIT. */
6602
6603 static void
6604 mark_loops_in_oacc_kernels_region (basic_block region_entry,
6605 basic_block region_exit)
6606 {
6607 struct loop *outer = region_entry->loop_father;
6608 gcc_assert (region_exit == NULL || outer == region_exit->loop_father);
6609
6610 /* Don't parallelize the kernels region if it contains more than one outer
6611 loop. */
6612 unsigned int nr_outer_loops = 0;
6613 struct loop *single_outer = NULL;
6614 for (struct loop *loop = outer->inner; loop != NULL; loop = loop->next)
6615 {
6616 gcc_assert (loop_outer (loop) == outer);
6617
6618 if (!dominated_by_p (CDI_DOMINATORS, loop->header, region_entry))
6619 continue;
6620
6621 if (region_exit != NULL
6622 && dominated_by_p (CDI_DOMINATORS, loop->header, region_exit))
6623 continue;
6624
6625 nr_outer_loops++;
6626 single_outer = loop;
6627 }
6628 if (nr_outer_loops != 1)
6629 return;
6630
6631 for (struct loop *loop = single_outer->inner;
6632 loop != NULL;
6633 loop = loop->inner)
6634 if (loop->next)
6635 return;
6636
6637 /* Mark the loops in the region. */
6638 for (struct loop *loop = single_outer; loop != NULL; loop = loop->inner)
6639 loop->in_oacc_kernels_region = true;
6640 }
6641
6642 /* Types used to pass grid and wortkgroup sizes to kernel invocation. */
6643
6644 struct GTY(()) grid_launch_attributes_trees
6645 {
6646 tree kernel_dim_array_type;
6647 tree kernel_lattrs_dimnum_decl;
6648 tree kernel_lattrs_grid_decl;
6649 tree kernel_lattrs_group_decl;
6650 tree kernel_launch_attributes_type;
6651 };
6652
6653 static GTY(()) struct grid_launch_attributes_trees *grid_attr_trees;
6654
6655 /* Create types used to pass kernel launch attributes to target. */
6656
6657 static void
6658 grid_create_kernel_launch_attr_types (void)
6659 {
6660 if (grid_attr_trees)
6661 return;
6662 grid_attr_trees = ggc_alloc <grid_launch_attributes_trees> ();
6663
6664 tree dim_arr_index_type
6665 = build_index_type (build_int_cst (integer_type_node, 2));
6666 grid_attr_trees->kernel_dim_array_type
6667 = build_array_type (uint32_type_node, dim_arr_index_type);
6668
6669 grid_attr_trees->kernel_launch_attributes_type = make_node (RECORD_TYPE);
6670 grid_attr_trees->kernel_lattrs_dimnum_decl
6671 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("ndim"),
6672 uint32_type_node);
6673 DECL_CHAIN (grid_attr_trees->kernel_lattrs_dimnum_decl) = NULL_TREE;
6674
6675 grid_attr_trees->kernel_lattrs_grid_decl
6676 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("grid_size"),
6677 grid_attr_trees->kernel_dim_array_type);
6678 DECL_CHAIN (grid_attr_trees->kernel_lattrs_grid_decl)
6679 = grid_attr_trees->kernel_lattrs_dimnum_decl;
6680 grid_attr_trees->kernel_lattrs_group_decl
6681 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("group_size"),
6682 grid_attr_trees->kernel_dim_array_type);
6683 DECL_CHAIN (grid_attr_trees->kernel_lattrs_group_decl)
6684 = grid_attr_trees->kernel_lattrs_grid_decl;
6685 finish_builtin_struct (grid_attr_trees->kernel_launch_attributes_type,
6686 "__gomp_kernel_launch_attributes",
6687 grid_attr_trees->kernel_lattrs_group_decl, NULL_TREE);
6688 }
6689
6690 /* Insert before the current statement in GSI a store of VALUE to INDEX of
6691 array (of type kernel_dim_array_type) FLD_DECL of RANGE_VAR. VALUE must be
6692 of type uint32_type_node. */
6693
6694 static void
6695 grid_insert_store_range_dim (gimple_stmt_iterator *gsi, tree range_var,
6696 tree fld_decl, int index, tree value)
6697 {
6698 tree ref = build4 (ARRAY_REF, uint32_type_node,
6699 build3 (COMPONENT_REF,
6700 grid_attr_trees->kernel_dim_array_type,
6701 range_var, fld_decl, NULL_TREE),
6702 build_int_cst (integer_type_node, index),
6703 NULL_TREE, NULL_TREE);
6704 gsi_insert_before (gsi, gimple_build_assign (ref, value), GSI_SAME_STMT);
6705 }
6706
6707 /* Return a tree representation of a pointer to a structure with grid and
6708 work-group size information. Statements filling that information will be
6709 inserted before GSI, TGT_STMT is the target statement which has the
6710 necessary information in it. */
6711
6712 static tree
6713 grid_get_kernel_launch_attributes (gimple_stmt_iterator *gsi,
6714 gomp_target *tgt_stmt)
6715 {
6716 grid_create_kernel_launch_attr_types ();
6717 tree lattrs = create_tmp_var (grid_attr_trees->kernel_launch_attributes_type,
6718 "__kernel_launch_attrs");
6719
6720 unsigned max_dim = 0;
6721 for (tree clause = gimple_omp_target_clauses (tgt_stmt);
6722 clause;
6723 clause = OMP_CLAUSE_CHAIN (clause))
6724 {
6725 if (OMP_CLAUSE_CODE (clause) != OMP_CLAUSE__GRIDDIM_)
6726 continue;
6727
6728 unsigned dim = OMP_CLAUSE__GRIDDIM__DIMENSION (clause);
6729 max_dim = MAX (dim, max_dim);
6730
6731 grid_insert_store_range_dim (gsi, lattrs,
6732 grid_attr_trees->kernel_lattrs_grid_decl,
6733 dim, OMP_CLAUSE__GRIDDIM__SIZE (clause));
6734 grid_insert_store_range_dim (gsi, lattrs,
6735 grid_attr_trees->kernel_lattrs_group_decl,
6736 dim, OMP_CLAUSE__GRIDDIM__GROUP (clause));
6737 }
6738
6739 tree dimref = build3 (COMPONENT_REF, uint32_type_node, lattrs,
6740 grid_attr_trees->kernel_lattrs_dimnum_decl, NULL_TREE);
6741 gcc_checking_assert (max_dim <= 2);
6742 tree dimensions = build_int_cstu (uint32_type_node, max_dim + 1);
6743 gsi_insert_before (gsi, gimple_build_assign (dimref, dimensions),
6744 GSI_SAME_STMT);
6745 TREE_ADDRESSABLE (lattrs) = 1;
6746 return build_fold_addr_expr (lattrs);
6747 }
6748
6749 /* Build target argument identifier from the DEVICE identifier, value
6750 identifier ID and whether the element also has a SUBSEQUENT_PARAM. */
6751
6752 static tree
6753 get_target_argument_identifier_1 (int device, bool subseqent_param, int id)
6754 {
6755 tree t = build_int_cst (integer_type_node, device);
6756 if (subseqent_param)
6757 t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t,
6758 build_int_cst (integer_type_node,
6759 GOMP_TARGET_ARG_SUBSEQUENT_PARAM));
6760 t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t,
6761 build_int_cst (integer_type_node, id));
6762 return t;
6763 }
6764
6765 /* Like above but return it in type that can be directly stored as an element
6766 of the argument array. */
6767
6768 static tree
6769 get_target_argument_identifier (int device, bool subseqent_param, int id)
6770 {
6771 tree t = get_target_argument_identifier_1 (device, subseqent_param, id);
6772 return fold_convert (ptr_type_node, t);
6773 }
6774
6775 /* Return a target argument consisting of DEVICE identifier, value identifier
6776 ID, and the actual VALUE. */
6777
6778 static tree
6779 get_target_argument_value (gimple_stmt_iterator *gsi, int device, int id,
6780 tree value)
6781 {
6782 tree t = fold_build2 (LSHIFT_EXPR, integer_type_node,
6783 fold_convert (integer_type_node, value),
6784 build_int_cst (unsigned_type_node,
6785 GOMP_TARGET_ARG_VALUE_SHIFT));
6786 t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t,
6787 get_target_argument_identifier_1 (device, false, id));
6788 t = fold_convert (ptr_type_node, t);
6789 return force_gimple_operand_gsi (gsi, t, true, NULL, true, GSI_SAME_STMT);
6790 }
6791
6792 /* If VALUE is an integer constant greater than -2^15 and smaller than 2^15,
6793 push one argument to ARGS with both the DEVICE, ID and VALUE embedded in it,
6794 otherwise push an identifier (with DEVICE and ID) and the VALUE in two
6795 arguments. */
6796
6797 static void
6798 push_target_argument_according_to_value (gimple_stmt_iterator *gsi, int device,
6799 int id, tree value, vec <tree> *args)
6800 {
6801 if (tree_fits_shwi_p (value)
6802 && tree_to_shwi (value) > -(1 << 15)
6803 && tree_to_shwi (value) < (1 << 15))
6804 args->quick_push (get_target_argument_value (gsi, device, id, value));
6805 else
6806 {
6807 args->quick_push (get_target_argument_identifier (device, true, id));
6808 value = fold_convert (ptr_type_node, value);
6809 value = force_gimple_operand_gsi (gsi, value, true, NULL, true,
6810 GSI_SAME_STMT);
6811 args->quick_push (value);
6812 }
6813 }
6814
6815 /* Create an array of arguments that is then passed to GOMP_target. */
6816
6817 static tree
6818 get_target_arguments (gimple_stmt_iterator *gsi, gomp_target *tgt_stmt)
6819 {
6820 auto_vec <tree, 6> args;
6821 tree clauses = gimple_omp_target_clauses (tgt_stmt);
6822 tree t, c = omp_find_clause (clauses, OMP_CLAUSE_NUM_TEAMS);
6823 if (c)
6824 t = OMP_CLAUSE_NUM_TEAMS_EXPR (c);
6825 else
6826 t = integer_minus_one_node;
6827 push_target_argument_according_to_value (gsi, GOMP_TARGET_ARG_DEVICE_ALL,
6828 GOMP_TARGET_ARG_NUM_TEAMS, t, &args);
6829
6830 c = omp_find_clause (clauses, OMP_CLAUSE_THREAD_LIMIT);
6831 if (c)
6832 t = OMP_CLAUSE_THREAD_LIMIT_EXPR (c);
6833 else
6834 t = integer_minus_one_node;
6835 push_target_argument_according_to_value (gsi, GOMP_TARGET_ARG_DEVICE_ALL,
6836 GOMP_TARGET_ARG_THREAD_LIMIT, t,
6837 &args);
6838
6839 /* Add HSA-specific grid sizes, if available. */
6840 if (omp_find_clause (gimple_omp_target_clauses (tgt_stmt),
6841 OMP_CLAUSE__GRIDDIM_))
6842 {
6843 int id = GOMP_TARGET_ARG_HSA_KERNEL_ATTRIBUTES;
6844 t = get_target_argument_identifier (GOMP_DEVICE_HSA, true, id);
6845 args.quick_push (t);
6846 args.quick_push (grid_get_kernel_launch_attributes (gsi, tgt_stmt));
6847 }
6848
6849 /* Produce more, perhaps device specific, arguments here. */
6850
6851 tree argarray = create_tmp_var (build_array_type_nelts (ptr_type_node,
6852 args.length () + 1),
6853 ".omp_target_args");
6854 for (unsigned i = 0; i < args.length (); i++)
6855 {
6856 tree ref = build4 (ARRAY_REF, ptr_type_node, argarray,
6857 build_int_cst (integer_type_node, i),
6858 NULL_TREE, NULL_TREE);
6859 gsi_insert_before (gsi, gimple_build_assign (ref, args[i]),
6860 GSI_SAME_STMT);
6861 }
6862 tree ref = build4 (ARRAY_REF, ptr_type_node, argarray,
6863 build_int_cst (integer_type_node, args.length ()),
6864 NULL_TREE, NULL_TREE);
6865 gsi_insert_before (gsi, gimple_build_assign (ref, null_pointer_node),
6866 GSI_SAME_STMT);
6867 TREE_ADDRESSABLE (argarray) = 1;
6868 return build_fold_addr_expr (argarray);
6869 }
6870
6871 /* Expand the GIMPLE_OMP_TARGET starting at REGION. */
6872
6873 static void
6874 expand_omp_target (struct omp_region *region)
6875 {
6876 basic_block entry_bb, exit_bb, new_bb;
6877 struct function *child_cfun;
6878 tree child_fn, block, t;
6879 gimple_stmt_iterator gsi;
6880 gomp_target *entry_stmt;
6881 gimple *stmt;
6882 edge e;
6883 bool offloaded, data_region;
6884
6885 entry_stmt = as_a <gomp_target *> (last_stmt (region->entry));
6886 new_bb = region->entry;
6887
6888 offloaded = is_gimple_omp_offloaded (entry_stmt);
6889 switch (gimple_omp_target_kind (entry_stmt))
6890 {
6891 case GF_OMP_TARGET_KIND_REGION:
6892 case GF_OMP_TARGET_KIND_UPDATE:
6893 case GF_OMP_TARGET_KIND_ENTER_DATA:
6894 case GF_OMP_TARGET_KIND_EXIT_DATA:
6895 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
6896 case GF_OMP_TARGET_KIND_OACC_KERNELS:
6897 case GF_OMP_TARGET_KIND_OACC_UPDATE:
6898 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
6899 case GF_OMP_TARGET_KIND_OACC_DECLARE:
6900 data_region = false;
6901 break;
6902 case GF_OMP_TARGET_KIND_DATA:
6903 case GF_OMP_TARGET_KIND_OACC_DATA:
6904 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
6905 data_region = true;
6906 break;
6907 default:
6908 gcc_unreachable ();
6909 }
6910
6911 child_fn = NULL_TREE;
6912 child_cfun = NULL;
6913 if (offloaded)
6914 {
6915 child_fn = gimple_omp_target_child_fn (entry_stmt);
6916 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6917 }
6918
6919 /* Supported by expand_omp_taskreg, but not here. */
6920 if (child_cfun != NULL)
6921 gcc_checking_assert (!child_cfun->cfg);
6922 gcc_checking_assert (!gimple_in_ssa_p (cfun));
6923
6924 entry_bb = region->entry;
6925 exit_bb = region->exit;
6926
6927 if (gimple_omp_target_kind (entry_stmt) == GF_OMP_TARGET_KIND_OACC_KERNELS)
6928 mark_loops_in_oacc_kernels_region (region->entry, region->exit);
6929
6930 if (offloaded)
6931 {
6932 unsigned srcidx, dstidx, num;
6933
6934 /* If the offloading region needs data sent from the parent
6935 function, then the very first statement (except possible
6936 tree profile counter updates) of the offloading body
6937 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
6938 &.OMP_DATA_O is passed as an argument to the child function,
6939 we need to replace it with the argument as seen by the child
6940 function.
6941
6942 In most cases, this will end up being the identity assignment
6943 .OMP_DATA_I = .OMP_DATA_I. However, if the offloading body had
6944 a function call that has been inlined, the original PARM_DECL
6945 .OMP_DATA_I may have been converted into a different local
6946 variable. In which case, we need to keep the assignment. */
6947 tree data_arg = gimple_omp_target_data_arg (entry_stmt);
6948 if (data_arg)
6949 {
6950 basic_block entry_succ_bb = single_succ (entry_bb);
6951 gimple_stmt_iterator gsi;
6952 tree arg;
6953 gimple *tgtcopy_stmt = NULL;
6954 tree sender = TREE_VEC_ELT (data_arg, 0);
6955
6956 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
6957 {
6958 gcc_assert (!gsi_end_p (gsi));
6959 stmt = gsi_stmt (gsi);
6960 if (gimple_code (stmt) != GIMPLE_ASSIGN)
6961 continue;
6962
6963 if (gimple_num_ops (stmt) == 2)
6964 {
6965 tree arg = gimple_assign_rhs1 (stmt);
6966
6967 /* We're ignoring the subcode because we're
6968 effectively doing a STRIP_NOPS. */
6969
6970 if (TREE_CODE (arg) == ADDR_EXPR
6971 && TREE_OPERAND (arg, 0) == sender)
6972 {
6973 tgtcopy_stmt = stmt;
6974 break;
6975 }
6976 }
6977 }
6978
6979 gcc_assert (tgtcopy_stmt != NULL);
6980 arg = DECL_ARGUMENTS (child_fn);
6981
6982 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
6983 gsi_remove (&gsi, true);
6984 }
6985
6986 /* Declare local variables needed in CHILD_CFUN. */
6987 block = DECL_INITIAL (child_fn);
6988 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
6989 /* The gimplifier could record temporaries in the offloading block
6990 rather than in containing function's local_decls chain,
6991 which would mean cgraph missed finalizing them. Do it now. */
6992 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
6993 if (VAR_P (t) && TREE_STATIC (t) && !DECL_EXTERNAL (t))
6994 varpool_node::finalize_decl (t);
6995 DECL_SAVED_TREE (child_fn) = NULL;
6996 /* We'll create a CFG for child_fn, so no gimple body is needed. */
6997 gimple_set_body (child_fn, NULL);
6998 TREE_USED (block) = 1;
6999
7000 /* Reset DECL_CONTEXT on function arguments. */
7001 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
7002 DECL_CONTEXT (t) = child_fn;
7003
7004 /* Split ENTRY_BB at GIMPLE_*,
7005 so that it can be moved to the child function. */
7006 gsi = gsi_last_bb (entry_bb);
7007 stmt = gsi_stmt (gsi);
7008 gcc_assert (stmt
7009 && gimple_code (stmt) == gimple_code (entry_stmt));
7010 e = split_block (entry_bb, stmt);
7011 gsi_remove (&gsi, true);
7012 entry_bb = e->dest;
7013 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7014
7015 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
7016 if (exit_bb)
7017 {
7018 gsi = gsi_last_bb (exit_bb);
7019 gcc_assert (!gsi_end_p (gsi)
7020 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
7021 stmt = gimple_build_return (NULL);
7022 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
7023 gsi_remove (&gsi, true);
7024 }
7025
7026 /* Make sure to generate early debug for the function before
7027 outlining anything. */
7028 if (! gimple_in_ssa_p (cfun))
7029 (*debug_hooks->early_global_decl) (cfun->decl);
7030
7031 /* Move the offloading region into CHILD_CFUN. */
7032
7033 block = gimple_block (entry_stmt);
7034
7035 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
7036 if (exit_bb)
7037 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
7038 /* When the OMP expansion process cannot guarantee an up-to-date
7039 loop tree arrange for the child function to fixup loops. */
7040 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
7041 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
7042
7043 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
7044 num = vec_safe_length (child_cfun->local_decls);
7045 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
7046 {
7047 t = (*child_cfun->local_decls)[srcidx];
7048 if (DECL_CONTEXT (t) == cfun->decl)
7049 continue;
7050 if (srcidx != dstidx)
7051 (*child_cfun->local_decls)[dstidx] = t;
7052 dstidx++;
7053 }
7054 if (dstidx != num)
7055 vec_safe_truncate (child_cfun->local_decls, dstidx);
7056
7057 /* Inform the callgraph about the new function. */
7058 child_cfun->curr_properties = cfun->curr_properties;
7059 child_cfun->has_simduid_loops |= cfun->has_simduid_loops;
7060 child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops;
7061 cgraph_node *node = cgraph_node::get_create (child_fn);
7062 node->parallelized_function = 1;
7063 cgraph_node::add_new_function (child_fn, true);
7064
7065 /* Add the new function to the offload table. */
7066 if (ENABLE_OFFLOADING)
7067 vec_safe_push (offload_funcs, child_fn);
7068
7069 bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl)
7070 && !DECL_ASSEMBLER_NAME_SET_P (child_fn);
7071
7072 /* Fix the callgraph edges for child_cfun. Those for cfun will be
7073 fixed in a following pass. */
7074 push_cfun (child_cfun);
7075 if (need_asm)
7076 assign_assembler_name_if_needed (child_fn);
7077 cgraph_edge::rebuild_edges ();
7078
7079 /* Some EH regions might become dead, see PR34608. If
7080 pass_cleanup_cfg isn't the first pass to happen with the
7081 new child, these dead EH edges might cause problems.
7082 Clean them up now. */
7083 if (flag_exceptions)
7084 {
7085 basic_block bb;
7086 bool changed = false;
7087
7088 FOR_EACH_BB_FN (bb, cfun)
7089 changed |= gimple_purge_dead_eh_edges (bb);
7090 if (changed)
7091 cleanup_tree_cfg ();
7092 }
7093 if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
7094 verify_loop_structure ();
7095 pop_cfun ();
7096
7097 if (dump_file && !gimple_in_ssa_p (cfun))
7098 {
7099 omp_any_child_fn_dumped = true;
7100 dump_function_header (dump_file, child_fn, dump_flags);
7101 dump_function_to_file (child_fn, dump_file, dump_flags);
7102 }
7103 }
7104
7105 /* Emit a library call to launch the offloading region, or do data
7106 transfers. */
7107 tree t1, t2, t3, t4, device, cond, depend, c, clauses;
7108 enum built_in_function start_ix;
7109 location_t clause_loc;
7110 unsigned int flags_i = 0;
7111 bool oacc_kernels_p = false;
7112
7113 switch (gimple_omp_target_kind (entry_stmt))
7114 {
7115 case GF_OMP_TARGET_KIND_REGION:
7116 start_ix = BUILT_IN_GOMP_TARGET;
7117 break;
7118 case GF_OMP_TARGET_KIND_DATA:
7119 start_ix = BUILT_IN_GOMP_TARGET_DATA;
7120 break;
7121 case GF_OMP_TARGET_KIND_UPDATE:
7122 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
7123 break;
7124 case GF_OMP_TARGET_KIND_ENTER_DATA:
7125 start_ix = BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA;
7126 break;
7127 case GF_OMP_TARGET_KIND_EXIT_DATA:
7128 start_ix = BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA;
7129 flags_i |= GOMP_TARGET_FLAG_EXIT_DATA;
7130 break;
7131 case GF_OMP_TARGET_KIND_OACC_KERNELS:
7132 oacc_kernels_p = true;
7133 /* FALLTHROUGH */
7134 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
7135 start_ix = BUILT_IN_GOACC_PARALLEL;
7136 break;
7137 case GF_OMP_TARGET_KIND_OACC_DATA:
7138 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
7139 start_ix = BUILT_IN_GOACC_DATA_START;
7140 break;
7141 case GF_OMP_TARGET_KIND_OACC_UPDATE:
7142 start_ix = BUILT_IN_GOACC_UPDATE;
7143 break;
7144 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
7145 start_ix = BUILT_IN_GOACC_ENTER_EXIT_DATA;
7146 break;
7147 case GF_OMP_TARGET_KIND_OACC_DECLARE:
7148 start_ix = BUILT_IN_GOACC_DECLARE;
7149 break;
7150 default:
7151 gcc_unreachable ();
7152 }
7153
7154 clauses = gimple_omp_target_clauses (entry_stmt);
7155
7156 /* By default, the value of DEVICE is GOMP_DEVICE_ICV (let runtime
7157 library choose) and there is no conditional. */
7158 cond = NULL_TREE;
7159 device = build_int_cst (integer_type_node, GOMP_DEVICE_ICV);
7160
7161 c = omp_find_clause (clauses, OMP_CLAUSE_IF);
7162 if (c)
7163 cond = OMP_CLAUSE_IF_EXPR (c);
7164
7165 c = omp_find_clause (clauses, OMP_CLAUSE_DEVICE);
7166 if (c)
7167 {
7168 /* Even if we pass it to all library function calls, it is currently only
7169 defined/used for the OpenMP target ones. */
7170 gcc_checking_assert (start_ix == BUILT_IN_GOMP_TARGET
7171 || start_ix == BUILT_IN_GOMP_TARGET_DATA
7172 || start_ix == BUILT_IN_GOMP_TARGET_UPDATE
7173 || start_ix == BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA);
7174
7175 device = OMP_CLAUSE_DEVICE_ID (c);
7176 clause_loc = OMP_CLAUSE_LOCATION (c);
7177 }
7178 else
7179 clause_loc = gimple_location (entry_stmt);
7180
7181 c = omp_find_clause (clauses, OMP_CLAUSE_NOWAIT);
7182 if (c)
7183 flags_i |= GOMP_TARGET_FLAG_NOWAIT;
7184
7185 /* Ensure 'device' is of the correct type. */
7186 device = fold_convert_loc (clause_loc, integer_type_node, device);
7187
7188 /* If we found the clause 'if (cond)', build
7189 (cond ? device : GOMP_DEVICE_HOST_FALLBACK). */
7190 if (cond)
7191 {
7192 cond = gimple_boolify (cond);
7193
7194 basic_block cond_bb, then_bb, else_bb;
7195 edge e;
7196 tree tmp_var;
7197
7198 tmp_var = create_tmp_var (TREE_TYPE (device));
7199 if (offloaded)
7200 e = split_block_after_labels (new_bb);
7201 else
7202 {
7203 gsi = gsi_last_bb (new_bb);
7204 gsi_prev (&gsi);
7205 e = split_block (new_bb, gsi_stmt (gsi));
7206 }
7207 cond_bb = e->src;
7208 new_bb = e->dest;
7209 remove_edge (e);
7210
7211 then_bb = create_empty_bb (cond_bb);
7212 else_bb = create_empty_bb (then_bb);
7213 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
7214 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
7215
7216 stmt = gimple_build_cond_empty (cond);
7217 gsi = gsi_last_bb (cond_bb);
7218 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7219
7220 gsi = gsi_start_bb (then_bb);
7221 stmt = gimple_build_assign (tmp_var, device);
7222 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7223
7224 gsi = gsi_start_bb (else_bb);
7225 stmt = gimple_build_assign (tmp_var,
7226 build_int_cst (integer_type_node,
7227 GOMP_DEVICE_HOST_FALLBACK));
7228 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7229
7230 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
7231 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
7232 add_bb_to_loop (then_bb, cond_bb->loop_father);
7233 add_bb_to_loop (else_bb, cond_bb->loop_father);
7234 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
7235 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
7236
7237 device = tmp_var;
7238 gsi = gsi_last_bb (new_bb);
7239 }
7240 else
7241 {
7242 gsi = gsi_last_bb (new_bb);
7243 device = force_gimple_operand_gsi (&gsi, device, true, NULL_TREE,
7244 true, GSI_SAME_STMT);
7245 }
7246
7247 t = gimple_omp_target_data_arg (entry_stmt);
7248 if (t == NULL)
7249 {
7250 t1 = size_zero_node;
7251 t2 = build_zero_cst (ptr_type_node);
7252 t3 = t2;
7253 t4 = t2;
7254 }
7255 else
7256 {
7257 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
7258 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
7259 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
7260 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
7261 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
7262 }
7263
7264 gimple *g;
7265 bool tagging = false;
7266 /* The maximum number used by any start_ix, without varargs. */
7267 auto_vec<tree, 11> args;
7268 args.quick_push (device);
7269 if (offloaded)
7270 args.quick_push (build_fold_addr_expr (child_fn));
7271 args.quick_push (t1);
7272 args.quick_push (t2);
7273 args.quick_push (t3);
7274 args.quick_push (t4);
7275 switch (start_ix)
7276 {
7277 case BUILT_IN_GOACC_DATA_START:
7278 case BUILT_IN_GOACC_DECLARE:
7279 case BUILT_IN_GOMP_TARGET_DATA:
7280 break;
7281 case BUILT_IN_GOMP_TARGET:
7282 case BUILT_IN_GOMP_TARGET_UPDATE:
7283 case BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA:
7284 args.quick_push (build_int_cst (unsigned_type_node, flags_i));
7285 c = omp_find_clause (clauses, OMP_CLAUSE_DEPEND);
7286 if (c)
7287 depend = OMP_CLAUSE_DECL (c);
7288 else
7289 depend = build_int_cst (ptr_type_node, 0);
7290 args.quick_push (depend);
7291 if (start_ix == BUILT_IN_GOMP_TARGET)
7292 args.quick_push (get_target_arguments (&gsi, entry_stmt));
7293 break;
7294 case BUILT_IN_GOACC_PARALLEL:
7295 {
7296 oacc_set_fn_attrib (child_fn, clauses, oacc_kernels_p, &args);
7297 tagging = true;
7298 }
7299 /* FALLTHRU */
7300 case BUILT_IN_GOACC_ENTER_EXIT_DATA:
7301 case BUILT_IN_GOACC_UPDATE:
7302 {
7303 tree t_async = NULL_TREE;
7304
7305 /* If present, use the value specified by the respective
7306 clause, making sure that is of the correct type. */
7307 c = omp_find_clause (clauses, OMP_CLAUSE_ASYNC);
7308 if (c)
7309 t_async = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
7310 integer_type_node,
7311 OMP_CLAUSE_ASYNC_EXPR (c));
7312 else if (!tagging)
7313 /* Default values for t_async. */
7314 t_async = fold_convert_loc (gimple_location (entry_stmt),
7315 integer_type_node,
7316 build_int_cst (integer_type_node,
7317 GOMP_ASYNC_SYNC));
7318 if (tagging && t_async)
7319 {
7320 unsigned HOST_WIDE_INT i_async = GOMP_LAUNCH_OP_MAX;
7321
7322 if (TREE_CODE (t_async) == INTEGER_CST)
7323 {
7324 /* See if we can pack the async arg in to the tag's
7325 operand. */
7326 i_async = TREE_INT_CST_LOW (t_async);
7327 if (i_async < GOMP_LAUNCH_OP_MAX)
7328 t_async = NULL_TREE;
7329 else
7330 i_async = GOMP_LAUNCH_OP_MAX;
7331 }
7332 args.safe_push (oacc_launch_pack (GOMP_LAUNCH_ASYNC, NULL_TREE,
7333 i_async));
7334 }
7335 if (t_async)
7336 args.safe_push (t_async);
7337
7338 /* Save the argument index, and ... */
7339 unsigned t_wait_idx = args.length ();
7340 unsigned num_waits = 0;
7341 c = omp_find_clause (clauses, OMP_CLAUSE_WAIT);
7342 if (!tagging || c)
7343 /* ... push a placeholder. */
7344 args.safe_push (integer_zero_node);
7345
7346 for (; c; c = OMP_CLAUSE_CHAIN (c))
7347 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WAIT)
7348 {
7349 args.safe_push (fold_convert_loc (OMP_CLAUSE_LOCATION (c),
7350 integer_type_node,
7351 OMP_CLAUSE_WAIT_EXPR (c)));
7352 num_waits++;
7353 }
7354
7355 if (!tagging || num_waits)
7356 {
7357 tree len;
7358
7359 /* Now that we know the number, update the placeholder. */
7360 if (tagging)
7361 len = oacc_launch_pack (GOMP_LAUNCH_WAIT, NULL_TREE, num_waits);
7362 else
7363 len = build_int_cst (integer_type_node, num_waits);
7364 len = fold_convert_loc (gimple_location (entry_stmt),
7365 unsigned_type_node, len);
7366 args[t_wait_idx] = len;
7367 }
7368 }
7369 break;
7370 default:
7371 gcc_unreachable ();
7372 }
7373 if (tagging)
7374 /* Push terminal marker - zero. */
7375 args.safe_push (oacc_launch_pack (0, NULL_TREE, 0));
7376
7377 g = gimple_build_call_vec (builtin_decl_explicit (start_ix), args);
7378 gimple_set_location (g, gimple_location (entry_stmt));
7379 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
7380 if (!offloaded)
7381 {
7382 g = gsi_stmt (gsi);
7383 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
7384 gsi_remove (&gsi, true);
7385 }
7386 if (data_region && region->exit)
7387 {
7388 gsi = gsi_last_bb (region->exit);
7389 g = gsi_stmt (gsi);
7390 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
7391 gsi_remove (&gsi, true);
7392 }
7393 }
7394
7395 /* Expand KFOR loop as a HSA grifidied kernel, i.e. as a body only with
7396 iteration variable derived from the thread number. INTRA_GROUP means this
7397 is an expansion of a loop iterating over work-items within a separate
7398 iteration over groups. */
7399
7400 static void
7401 grid_expand_omp_for_loop (struct omp_region *kfor, bool intra_group)
7402 {
7403 gimple_stmt_iterator gsi;
7404 gomp_for *for_stmt = as_a <gomp_for *> (last_stmt (kfor->entry));
7405 gcc_checking_assert (gimple_omp_for_kind (for_stmt)
7406 == GF_OMP_FOR_KIND_GRID_LOOP);
7407 size_t collapse = gimple_omp_for_collapse (for_stmt);
7408 struct omp_for_data_loop *loops
7409 = XALLOCAVEC (struct omp_for_data_loop,
7410 gimple_omp_for_collapse (for_stmt));
7411 struct omp_for_data fd;
7412
7413 remove_edge (BRANCH_EDGE (kfor->entry));
7414 basic_block body_bb = FALLTHRU_EDGE (kfor->entry)->dest;
7415
7416 gcc_assert (kfor->cont);
7417 omp_extract_for_data (for_stmt, &fd, loops);
7418
7419 gsi = gsi_start_bb (body_bb);
7420
7421 for (size_t dim = 0; dim < collapse; dim++)
7422 {
7423 tree type, itype;
7424 itype = type = TREE_TYPE (fd.loops[dim].v);
7425 if (POINTER_TYPE_P (type))
7426 itype = signed_type_for (type);
7427
7428 tree n1 = fd.loops[dim].n1;
7429 tree step = fd.loops[dim].step;
7430 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
7431 true, NULL_TREE, true, GSI_SAME_STMT);
7432 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
7433 true, NULL_TREE, true, GSI_SAME_STMT);
7434 tree threadid;
7435 if (gimple_omp_for_grid_group_iter (for_stmt))
7436 {
7437 gcc_checking_assert (!intra_group);
7438 threadid = build_call_expr (builtin_decl_explicit
7439 (BUILT_IN_HSA_WORKGROUPID), 1,
7440 build_int_cstu (unsigned_type_node, dim));
7441 }
7442 else if (intra_group)
7443 threadid = build_call_expr (builtin_decl_explicit
7444 (BUILT_IN_HSA_WORKITEMID), 1,
7445 build_int_cstu (unsigned_type_node, dim));
7446 else
7447 threadid = build_call_expr (builtin_decl_explicit
7448 (BUILT_IN_HSA_WORKITEMABSID), 1,
7449 build_int_cstu (unsigned_type_node, dim));
7450 threadid = fold_convert (itype, threadid);
7451 threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
7452 true, GSI_SAME_STMT);
7453
7454 tree startvar = fd.loops[dim].v;
7455 tree t = fold_build2 (MULT_EXPR, itype, threadid, step);
7456 if (POINTER_TYPE_P (type))
7457 t = fold_build_pointer_plus (n1, t);
7458 else
7459 t = fold_build2 (PLUS_EXPR, type, t, n1);
7460 t = fold_convert (type, t);
7461 t = force_gimple_operand_gsi (&gsi, t,
7462 DECL_P (startvar)
7463 && TREE_ADDRESSABLE (startvar),
7464 NULL_TREE, true, GSI_SAME_STMT);
7465 gassign *assign_stmt = gimple_build_assign (startvar, t);
7466 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
7467 }
7468 /* Remove the omp for statement. */
7469 gsi = gsi_last_bb (kfor->entry);
7470 gsi_remove (&gsi, true);
7471
7472 /* Remove the GIMPLE_OMP_CONTINUE statement. */
7473 gsi = gsi_last_bb (kfor->cont);
7474 gcc_assert (!gsi_end_p (gsi)
7475 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_CONTINUE);
7476 gsi_remove (&gsi, true);
7477
7478 /* Replace the GIMPLE_OMP_RETURN with a barrier, if necessary. */
7479 gsi = gsi_last_bb (kfor->exit);
7480 gcc_assert (!gsi_end_p (gsi)
7481 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
7482 if (intra_group)
7483 gsi_insert_before (&gsi, omp_build_barrier (NULL_TREE), GSI_SAME_STMT);
7484 gsi_remove (&gsi, true);
7485
7486 /* Fixup the much simpler CFG. */
7487 remove_edge (find_edge (kfor->cont, body_bb));
7488
7489 if (kfor->cont != body_bb)
7490 set_immediate_dominator (CDI_DOMINATORS, kfor->cont, body_bb);
7491 set_immediate_dominator (CDI_DOMINATORS, kfor->exit, kfor->cont);
7492 }
7493
7494 /* Structure passed to grid_remap_kernel_arg_accesses so that it can remap
7495 argument_decls. */
7496
7497 struct grid_arg_decl_map
7498 {
7499 tree old_arg;
7500 tree new_arg;
7501 };
7502
7503 /* Invoked through walk_gimple_op, will remap all PARM_DECLs to the ones
7504 pertaining to kernel function. */
7505
7506 static tree
7507 grid_remap_kernel_arg_accesses (tree *tp, int *walk_subtrees, void *data)
7508 {
7509 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
7510 struct grid_arg_decl_map *adm = (struct grid_arg_decl_map *) wi->info;
7511 tree t = *tp;
7512
7513 if (t == adm->old_arg)
7514 *tp = adm->new_arg;
7515 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
7516 return NULL_TREE;
7517 }
7518
7519 /* If TARGET region contains a kernel body for loop, remove its region from the
7520 TARGET and expand it in HSA gridified kernel fashion. */
7521
7522 static void
7523 grid_expand_target_grid_body (struct omp_region *target)
7524 {
7525 if (!hsa_gen_requested_p ())
7526 return;
7527
7528 gomp_target *tgt_stmt = as_a <gomp_target *> (last_stmt (target->entry));
7529 struct omp_region **pp;
7530
7531 for (pp = &target->inner; *pp; pp = &(*pp)->next)
7532 if ((*pp)->type == GIMPLE_OMP_GRID_BODY)
7533 break;
7534
7535 struct omp_region *gpukernel = *pp;
7536
7537 tree orig_child_fndecl = gimple_omp_target_child_fn (tgt_stmt);
7538 if (!gpukernel)
7539 {
7540 /* HSA cannot handle OACC stuff. */
7541 if (gimple_omp_target_kind (tgt_stmt) != GF_OMP_TARGET_KIND_REGION)
7542 return;
7543 gcc_checking_assert (orig_child_fndecl);
7544 gcc_assert (!omp_find_clause (gimple_omp_target_clauses (tgt_stmt),
7545 OMP_CLAUSE__GRIDDIM_));
7546 cgraph_node *n = cgraph_node::get (orig_child_fndecl);
7547
7548 hsa_register_kernel (n);
7549 return;
7550 }
7551
7552 gcc_assert (omp_find_clause (gimple_omp_target_clauses (tgt_stmt),
7553 OMP_CLAUSE__GRIDDIM_));
7554 tree inside_block
7555 = gimple_block (first_stmt (single_succ (gpukernel->entry)));
7556 *pp = gpukernel->next;
7557 for (pp = &gpukernel->inner; *pp; pp = &(*pp)->next)
7558 if ((*pp)->type == GIMPLE_OMP_FOR)
7559 break;
7560
7561 struct omp_region *kfor = *pp;
7562 gcc_assert (kfor);
7563 gomp_for *for_stmt = as_a <gomp_for *> (last_stmt (kfor->entry));
7564 gcc_assert (gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_GRID_LOOP);
7565 *pp = kfor->next;
7566 if (kfor->inner)
7567 {
7568 if (gimple_omp_for_grid_group_iter (for_stmt))
7569 {
7570 struct omp_region **next_pp;
7571 for (pp = &kfor->inner; *pp; pp = next_pp)
7572 {
7573 next_pp = &(*pp)->next;
7574 if ((*pp)->type != GIMPLE_OMP_FOR)
7575 continue;
7576 gomp_for *inner = as_a <gomp_for *> (last_stmt ((*pp)->entry));
7577 gcc_assert (gimple_omp_for_kind (inner)
7578 == GF_OMP_FOR_KIND_GRID_LOOP);
7579 grid_expand_omp_for_loop (*pp, true);
7580 *pp = (*pp)->next;
7581 next_pp = pp;
7582 }
7583 }
7584 expand_omp (kfor->inner);
7585 }
7586 if (gpukernel->inner)
7587 expand_omp (gpukernel->inner);
7588
7589 tree kern_fndecl = copy_node (orig_child_fndecl);
7590 DECL_NAME (kern_fndecl) = clone_function_name (kern_fndecl, "kernel");
7591 SET_DECL_ASSEMBLER_NAME (kern_fndecl, DECL_NAME (kern_fndecl));
7592 tree tgtblock = gimple_block (tgt_stmt);
7593 tree fniniblock = make_node (BLOCK);
7594 BLOCK_ABSTRACT_ORIGIN (fniniblock) = tgtblock;
7595 BLOCK_SOURCE_LOCATION (fniniblock) = BLOCK_SOURCE_LOCATION (tgtblock);
7596 BLOCK_SOURCE_END_LOCATION (fniniblock) = BLOCK_SOURCE_END_LOCATION (tgtblock);
7597 BLOCK_SUPERCONTEXT (fniniblock) = kern_fndecl;
7598 DECL_INITIAL (kern_fndecl) = fniniblock;
7599 push_struct_function (kern_fndecl);
7600 cfun->function_end_locus = gimple_location (tgt_stmt);
7601 init_tree_ssa (cfun);
7602 pop_cfun ();
7603
7604 /* Make sure to generate early debug for the function before
7605 outlining anything. */
7606 if (! gimple_in_ssa_p (cfun))
7607 (*debug_hooks->early_global_decl) (cfun->decl);
7608
7609 tree old_parm_decl = DECL_ARGUMENTS (kern_fndecl);
7610 gcc_assert (!DECL_CHAIN (old_parm_decl));
7611 tree new_parm_decl = copy_node (DECL_ARGUMENTS (kern_fndecl));
7612 DECL_CONTEXT (new_parm_decl) = kern_fndecl;
7613 DECL_ARGUMENTS (kern_fndecl) = new_parm_decl;
7614 gcc_assert (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (kern_fndecl))));
7615 DECL_RESULT (kern_fndecl) = copy_node (DECL_RESULT (kern_fndecl));
7616 DECL_CONTEXT (DECL_RESULT (kern_fndecl)) = kern_fndecl;
7617 struct function *kern_cfun = DECL_STRUCT_FUNCTION (kern_fndecl);
7618 kern_cfun->curr_properties = cfun->curr_properties;
7619
7620 grid_expand_omp_for_loop (kfor, false);
7621
7622 /* Remove the omp for statement. */
7623 gimple_stmt_iterator gsi = gsi_last_bb (gpukernel->entry);
7624 gsi_remove (&gsi, true);
7625 /* Replace the GIMPLE_OMP_RETURN at the end of the kernel region with a real
7626 return. */
7627 gsi = gsi_last_bb (gpukernel->exit);
7628 gcc_assert (!gsi_end_p (gsi)
7629 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
7630 gimple *ret_stmt = gimple_build_return (NULL);
7631 gsi_insert_after (&gsi, ret_stmt, GSI_SAME_STMT);
7632 gsi_remove (&gsi, true);
7633
7634 /* Statements in the first BB in the target construct have been produced by
7635 target lowering and must be copied inside the GPUKERNEL, with the two
7636 exceptions of the first OMP statement and the OMP_DATA assignment
7637 statement. */
7638 gsi = gsi_start_bb (single_succ (gpukernel->entry));
7639 tree data_arg = gimple_omp_target_data_arg (tgt_stmt);
7640 tree sender = data_arg ? TREE_VEC_ELT (data_arg, 0) : NULL;
7641 for (gimple_stmt_iterator tsi = gsi_start_bb (single_succ (target->entry));
7642 !gsi_end_p (tsi); gsi_next (&tsi))
7643 {
7644 gimple *stmt = gsi_stmt (tsi);
7645 if (is_gimple_omp (stmt))
7646 break;
7647 if (sender
7648 && is_gimple_assign (stmt)
7649 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
7650 && TREE_OPERAND (gimple_assign_rhs1 (stmt), 0) == sender)
7651 continue;
7652 gimple *copy = gimple_copy (stmt);
7653 gsi_insert_before (&gsi, copy, GSI_SAME_STMT);
7654 gimple_set_block (copy, fniniblock);
7655 }
7656
7657 move_sese_region_to_fn (kern_cfun, single_succ (gpukernel->entry),
7658 gpukernel->exit, inside_block);
7659
7660 cgraph_node *kcn = cgraph_node::get_create (kern_fndecl);
7661 kcn->mark_force_output ();
7662 cgraph_node *orig_child = cgraph_node::get (orig_child_fndecl);
7663
7664 hsa_register_kernel (kcn, orig_child);
7665
7666 cgraph_node::add_new_function (kern_fndecl, true);
7667 push_cfun (kern_cfun);
7668 cgraph_edge::rebuild_edges ();
7669
7670 /* Re-map any mention of the PARM_DECL of the original function to the
7671 PARM_DECL of the new one.
7672
7673 TODO: It would be great if lowering produced references into the GPU
7674 kernel decl straight away and we did not have to do this. */
7675 struct grid_arg_decl_map adm;
7676 adm.old_arg = old_parm_decl;
7677 adm.new_arg = new_parm_decl;
7678 basic_block bb;
7679 FOR_EACH_BB_FN (bb, kern_cfun)
7680 {
7681 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
7682 {
7683 gimple *stmt = gsi_stmt (gsi);
7684 struct walk_stmt_info wi;
7685 memset (&wi, 0, sizeof (wi));
7686 wi.info = &adm;
7687 walk_gimple_op (stmt, grid_remap_kernel_arg_accesses, &wi);
7688 }
7689 }
7690 pop_cfun ();
7691
7692 return;
7693 }
7694
7695 /* Expand the parallel region tree rooted at REGION. Expansion
7696 proceeds in depth-first order. Innermost regions are expanded
7697 first. This way, parallel regions that require a new function to
7698 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
7699 internal dependencies in their body. */
7700
7701 static void
7702 expand_omp (struct omp_region *region)
7703 {
7704 omp_any_child_fn_dumped = false;
7705 while (region)
7706 {
7707 location_t saved_location;
7708 gimple *inner_stmt = NULL;
7709
7710 /* First, determine whether this is a combined parallel+workshare
7711 region. */
7712 if (region->type == GIMPLE_OMP_PARALLEL)
7713 determine_parallel_type (region);
7714 else if (region->type == GIMPLE_OMP_TARGET)
7715 grid_expand_target_grid_body (region);
7716
7717 if (region->type == GIMPLE_OMP_FOR
7718 && gimple_omp_for_combined_p (last_stmt (region->entry)))
7719 inner_stmt = last_stmt (region->inner->entry);
7720
7721 if (region->inner)
7722 expand_omp (region->inner);
7723
7724 saved_location = input_location;
7725 if (gimple_has_location (last_stmt (region->entry)))
7726 input_location = gimple_location (last_stmt (region->entry));
7727
7728 switch (region->type)
7729 {
7730 case GIMPLE_OMP_PARALLEL:
7731 case GIMPLE_OMP_TASK:
7732 expand_omp_taskreg (region);
7733 break;
7734
7735 case GIMPLE_OMP_FOR:
7736 expand_omp_for (region, inner_stmt);
7737 break;
7738
7739 case GIMPLE_OMP_SECTIONS:
7740 expand_omp_sections (region);
7741 break;
7742
7743 case GIMPLE_OMP_SECTION:
7744 /* Individual omp sections are handled together with their
7745 parent GIMPLE_OMP_SECTIONS region. */
7746 break;
7747
7748 case GIMPLE_OMP_SINGLE:
7749 expand_omp_single (region);
7750 break;
7751
7752 case GIMPLE_OMP_ORDERED:
7753 {
7754 gomp_ordered *ord_stmt
7755 = as_a <gomp_ordered *> (last_stmt (region->entry));
7756 if (omp_find_clause (gimple_omp_ordered_clauses (ord_stmt),
7757 OMP_CLAUSE_DEPEND))
7758 {
7759 /* We'll expand these when expanding corresponding
7760 worksharing region with ordered(n) clause. */
7761 gcc_assert (region->outer
7762 && region->outer->type == GIMPLE_OMP_FOR);
7763 region->ord_stmt = ord_stmt;
7764 break;
7765 }
7766 }
7767 /* FALLTHRU */
7768 case GIMPLE_OMP_MASTER:
7769 case GIMPLE_OMP_TASKGROUP:
7770 case GIMPLE_OMP_CRITICAL:
7771 case GIMPLE_OMP_TEAMS:
7772 expand_omp_synch (region);
7773 break;
7774
7775 case GIMPLE_OMP_ATOMIC_LOAD:
7776 expand_omp_atomic (region);
7777 break;
7778
7779 case GIMPLE_OMP_TARGET:
7780 expand_omp_target (region);
7781 break;
7782
7783 default:
7784 gcc_unreachable ();
7785 }
7786
7787 input_location = saved_location;
7788 region = region->next;
7789 }
7790 if (omp_any_child_fn_dumped)
7791 {
7792 if (dump_file)
7793 dump_function_header (dump_file, current_function_decl, dump_flags);
7794 omp_any_child_fn_dumped = false;
7795 }
7796 }
7797
7798 /* Helper for build_omp_regions. Scan the dominator tree starting at
7799 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
7800 true, the function ends once a single tree is built (otherwise, whole
7801 forest of OMP constructs may be built). */
7802
7803 static void
7804 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
7805 bool single_tree)
7806 {
7807 gimple_stmt_iterator gsi;
7808 gimple *stmt;
7809 basic_block son;
7810
7811 gsi = gsi_last_bb (bb);
7812 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
7813 {
7814 struct omp_region *region;
7815 enum gimple_code code;
7816
7817 stmt = gsi_stmt (gsi);
7818 code = gimple_code (stmt);
7819 if (code == GIMPLE_OMP_RETURN)
7820 {
7821 /* STMT is the return point out of region PARENT. Mark it
7822 as the exit point and make PARENT the immediately
7823 enclosing region. */
7824 gcc_assert (parent);
7825 region = parent;
7826 region->exit = bb;
7827 parent = parent->outer;
7828 }
7829 else if (code == GIMPLE_OMP_ATOMIC_STORE)
7830 {
7831 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
7832 GIMPLE_OMP_RETURN, but matches with
7833 GIMPLE_OMP_ATOMIC_LOAD. */
7834 gcc_assert (parent);
7835 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
7836 region = parent;
7837 region->exit = bb;
7838 parent = parent->outer;
7839 }
7840 else if (code == GIMPLE_OMP_CONTINUE)
7841 {
7842 gcc_assert (parent);
7843 parent->cont = bb;
7844 }
7845 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
7846 {
7847 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
7848 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
7849 }
7850 else
7851 {
7852 region = new_omp_region (bb, code, parent);
7853 /* Otherwise... */
7854 if (code == GIMPLE_OMP_TARGET)
7855 {
7856 switch (gimple_omp_target_kind (stmt))
7857 {
7858 case GF_OMP_TARGET_KIND_REGION:
7859 case GF_OMP_TARGET_KIND_DATA:
7860 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
7861 case GF_OMP_TARGET_KIND_OACC_KERNELS:
7862 case GF_OMP_TARGET_KIND_OACC_DATA:
7863 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
7864 break;
7865 case GF_OMP_TARGET_KIND_UPDATE:
7866 case GF_OMP_TARGET_KIND_ENTER_DATA:
7867 case GF_OMP_TARGET_KIND_EXIT_DATA:
7868 case GF_OMP_TARGET_KIND_OACC_UPDATE:
7869 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
7870 case GF_OMP_TARGET_KIND_OACC_DECLARE:
7871 /* ..., other than for those stand-alone directives... */
7872 region = NULL;
7873 break;
7874 default:
7875 gcc_unreachable ();
7876 }
7877 }
7878 else if (code == GIMPLE_OMP_ORDERED
7879 && omp_find_clause (gimple_omp_ordered_clauses
7880 (as_a <gomp_ordered *> (stmt)),
7881 OMP_CLAUSE_DEPEND))
7882 /* #pragma omp ordered depend is also just a stand-alone
7883 directive. */
7884 region = NULL;
7885 /* ..., this directive becomes the parent for a new region. */
7886 if (region)
7887 parent = region;
7888 }
7889 }
7890
7891 if (single_tree && !parent)
7892 return;
7893
7894 for (son = first_dom_son (CDI_DOMINATORS, bb);
7895 son;
7896 son = next_dom_son (CDI_DOMINATORS, son))
7897 build_omp_regions_1 (son, parent, single_tree);
7898 }
7899
7900 /* Builds the tree of OMP regions rooted at ROOT, storing it to
7901 root_omp_region. */
7902
7903 static void
7904 build_omp_regions_root (basic_block root)
7905 {
7906 gcc_assert (root_omp_region == NULL);
7907 build_omp_regions_1 (root, NULL, true);
7908 gcc_assert (root_omp_region != NULL);
7909 }
7910
7911 /* Expands omp construct (and its subconstructs) starting in HEAD. */
7912
7913 void
7914 omp_expand_local (basic_block head)
7915 {
7916 build_omp_regions_root (head);
7917 if (dump_file && (dump_flags & TDF_DETAILS))
7918 {
7919 fprintf (dump_file, "\nOMP region tree\n\n");
7920 dump_omp_region (dump_file, root_omp_region, 0);
7921 fprintf (dump_file, "\n");
7922 }
7923
7924 remove_exit_barriers (root_omp_region);
7925 expand_omp (root_omp_region);
7926
7927 omp_free_regions ();
7928 }
7929
7930 /* Scan the CFG and build a tree of OMP regions. Return the root of
7931 the OMP region tree. */
7932
7933 static void
7934 build_omp_regions (void)
7935 {
7936 gcc_assert (root_omp_region == NULL);
7937 calculate_dominance_info (CDI_DOMINATORS);
7938 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
7939 }
7940
7941 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
7942
7943 static unsigned int
7944 execute_expand_omp (void)
7945 {
7946 build_omp_regions ();
7947
7948 if (!root_omp_region)
7949 return 0;
7950
7951 if (dump_file)
7952 {
7953 fprintf (dump_file, "\nOMP region tree\n\n");
7954 dump_omp_region (dump_file, root_omp_region, 0);
7955 fprintf (dump_file, "\n");
7956 }
7957
7958 remove_exit_barriers (root_omp_region);
7959
7960 expand_omp (root_omp_region);
7961
7962 if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
7963 verify_loop_structure ();
7964 cleanup_tree_cfg ();
7965
7966 omp_free_regions ();
7967
7968 return 0;
7969 }
7970
7971 /* OMP expansion -- the default pass, run before creation of SSA form. */
7972
7973 namespace {
7974
7975 const pass_data pass_data_expand_omp =
7976 {
7977 GIMPLE_PASS, /* type */
7978 "ompexp", /* name */
7979 OPTGROUP_OPENMP, /* optinfo_flags */
7980 TV_NONE, /* tv_id */
7981 PROP_gimple_any, /* properties_required */
7982 PROP_gimple_eomp, /* properties_provided */
7983 0, /* properties_destroyed */
7984 0, /* todo_flags_start */
7985 0, /* todo_flags_finish */
7986 };
7987
7988 class pass_expand_omp : public gimple_opt_pass
7989 {
7990 public:
7991 pass_expand_omp (gcc::context *ctxt)
7992 : gimple_opt_pass (pass_data_expand_omp, ctxt)
7993 {}
7994
7995 /* opt_pass methods: */
7996 virtual unsigned int execute (function *)
7997 {
7998 bool gate = ((flag_cilkplus != 0 || flag_openacc != 0 || flag_openmp != 0
7999 || flag_openmp_simd != 0)
8000 && !seen_error ());
8001
8002 /* This pass always runs, to provide PROP_gimple_eomp.
8003 But often, there is nothing to do. */
8004 if (!gate)
8005 return 0;
8006
8007 return execute_expand_omp ();
8008 }
8009
8010 }; // class pass_expand_omp
8011
8012 } // anon namespace
8013
8014 gimple_opt_pass *
8015 make_pass_expand_omp (gcc::context *ctxt)
8016 {
8017 return new pass_expand_omp (ctxt);
8018 }
8019
8020 namespace {
8021
8022 const pass_data pass_data_expand_omp_ssa =
8023 {
8024 GIMPLE_PASS, /* type */
8025 "ompexpssa", /* name */
8026 OPTGROUP_OPENMP, /* optinfo_flags */
8027 TV_NONE, /* tv_id */
8028 PROP_cfg | PROP_ssa, /* properties_required */
8029 PROP_gimple_eomp, /* properties_provided */
8030 0, /* properties_destroyed */
8031 0, /* todo_flags_start */
8032 TODO_cleanup_cfg | TODO_rebuild_alias, /* todo_flags_finish */
8033 };
8034
8035 class pass_expand_omp_ssa : public gimple_opt_pass
8036 {
8037 public:
8038 pass_expand_omp_ssa (gcc::context *ctxt)
8039 : gimple_opt_pass (pass_data_expand_omp_ssa, ctxt)
8040 {}
8041
8042 /* opt_pass methods: */
8043 virtual bool gate (function *fun)
8044 {
8045 return !(fun->curr_properties & PROP_gimple_eomp);
8046 }
8047 virtual unsigned int execute (function *) { return execute_expand_omp (); }
8048 opt_pass * clone () { return new pass_expand_omp_ssa (m_ctxt); }
8049
8050 }; // class pass_expand_omp_ssa
8051
8052 } // anon namespace
8053
8054 gimple_opt_pass *
8055 make_pass_expand_omp_ssa (gcc::context *ctxt)
8056 {
8057 return new pass_expand_omp_ssa (ctxt);
8058 }
8059
8060 /* Called from tree-cfg.c::make_edges to create cfg edges for all relevant
8061 GIMPLE_* codes. */
8062
8063 bool
8064 omp_make_gimple_edges (basic_block bb, struct omp_region **region,
8065 int *region_idx)
8066 {
8067 gimple *last = last_stmt (bb);
8068 enum gimple_code code = gimple_code (last);
8069 struct omp_region *cur_region = *region;
8070 bool fallthru = false;
8071
8072 switch (code)
8073 {
8074 case GIMPLE_OMP_PARALLEL:
8075 case GIMPLE_OMP_TASK:
8076 case GIMPLE_OMP_FOR:
8077 case GIMPLE_OMP_SINGLE:
8078 case GIMPLE_OMP_TEAMS:
8079 case GIMPLE_OMP_MASTER:
8080 case GIMPLE_OMP_TASKGROUP:
8081 case GIMPLE_OMP_CRITICAL:
8082 case GIMPLE_OMP_SECTION:
8083 case GIMPLE_OMP_GRID_BODY:
8084 cur_region = new_omp_region (bb, code, cur_region);
8085 fallthru = true;
8086 break;
8087
8088 case GIMPLE_OMP_ORDERED:
8089 cur_region = new_omp_region (bb, code, cur_region);
8090 fallthru = true;
8091 if (omp_find_clause (gimple_omp_ordered_clauses
8092 (as_a <gomp_ordered *> (last)),
8093 OMP_CLAUSE_DEPEND))
8094 cur_region = cur_region->outer;
8095 break;
8096
8097 case GIMPLE_OMP_TARGET:
8098 cur_region = new_omp_region (bb, code, cur_region);
8099 fallthru = true;
8100 switch (gimple_omp_target_kind (last))
8101 {
8102 case GF_OMP_TARGET_KIND_REGION:
8103 case GF_OMP_TARGET_KIND_DATA:
8104 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
8105 case GF_OMP_TARGET_KIND_OACC_KERNELS:
8106 case GF_OMP_TARGET_KIND_OACC_DATA:
8107 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
8108 break;
8109 case GF_OMP_TARGET_KIND_UPDATE:
8110 case GF_OMP_TARGET_KIND_ENTER_DATA:
8111 case GF_OMP_TARGET_KIND_EXIT_DATA:
8112 case GF_OMP_TARGET_KIND_OACC_UPDATE:
8113 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
8114 case GF_OMP_TARGET_KIND_OACC_DECLARE:
8115 cur_region = cur_region->outer;
8116 break;
8117 default:
8118 gcc_unreachable ();
8119 }
8120 break;
8121
8122 case GIMPLE_OMP_SECTIONS:
8123 cur_region = new_omp_region (bb, code, cur_region);
8124 fallthru = true;
8125 break;
8126
8127 case GIMPLE_OMP_SECTIONS_SWITCH:
8128 fallthru = false;
8129 break;
8130
8131 case GIMPLE_OMP_ATOMIC_LOAD:
8132 case GIMPLE_OMP_ATOMIC_STORE:
8133 fallthru = true;
8134 break;
8135
8136 case GIMPLE_OMP_RETURN:
8137 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
8138 somewhere other than the next block. This will be
8139 created later. */
8140 cur_region->exit = bb;
8141 if (cur_region->type == GIMPLE_OMP_TASK)
8142 /* Add an edge corresponding to not scheduling the task
8143 immediately. */
8144 make_edge (cur_region->entry, bb, EDGE_ABNORMAL);
8145 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
8146 cur_region = cur_region->outer;
8147 break;
8148
8149 case GIMPLE_OMP_CONTINUE:
8150 cur_region->cont = bb;
8151 switch (cur_region->type)
8152 {
8153 case GIMPLE_OMP_FOR:
8154 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
8155 succs edges as abnormal to prevent splitting
8156 them. */
8157 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
8158 /* Make the loopback edge. */
8159 make_edge (bb, single_succ (cur_region->entry),
8160 EDGE_ABNORMAL);
8161
8162 /* Create an edge from GIMPLE_OMP_FOR to exit, which
8163 corresponds to the case that the body of the loop
8164 is not executed at all. */
8165 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
8166 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
8167 fallthru = false;
8168 break;
8169
8170 case GIMPLE_OMP_SECTIONS:
8171 /* Wire up the edges into and out of the nested sections. */
8172 {
8173 basic_block switch_bb = single_succ (cur_region->entry);
8174
8175 struct omp_region *i;
8176 for (i = cur_region->inner; i ; i = i->next)
8177 {
8178 gcc_assert (i->type == GIMPLE_OMP_SECTION);
8179 make_edge (switch_bb, i->entry, 0);
8180 make_edge (i->exit, bb, EDGE_FALLTHRU);
8181 }
8182
8183 /* Make the loopback edge to the block with
8184 GIMPLE_OMP_SECTIONS_SWITCH. */
8185 make_edge (bb, switch_bb, 0);
8186
8187 /* Make the edge from the switch to exit. */
8188 make_edge (switch_bb, bb->next_bb, 0);
8189 fallthru = false;
8190 }
8191 break;
8192
8193 case GIMPLE_OMP_TASK:
8194 fallthru = true;
8195 break;
8196
8197 default:
8198 gcc_unreachable ();
8199 }
8200 break;
8201
8202 default:
8203 gcc_unreachable ();
8204 }
8205
8206 if (*region != cur_region)
8207 {
8208 *region = cur_region;
8209 if (cur_region)
8210 *region_idx = cur_region->entry->index;
8211 else
8212 *region_idx = 0;
8213 }
8214
8215 return fallthru;
8216 }
8217
8218 #include "gt-omp-expand.h"