]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/omp-expand.c
Fix diff_type in expand_oacc_for char iter_type
[thirdparty/gcc.git] / gcc / omp-expand.c
1 /* Expansion pass for OMP directives. Outlines regions of certain OMP
2 directives to separate functions, converts others into explicit calls to the
3 runtime library (libgomp) and so forth
4
5 Copyright (C) 2005-2017 Free Software Foundation, Inc.
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "memmodel.h"
27 #include "backend.h"
28 #include "target.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "gimple.h"
32 #include "cfghooks.h"
33 #include "tree-pass.h"
34 #include "ssa.h"
35 #include "optabs.h"
36 #include "cgraph.h"
37 #include "pretty-print.h"
38 #include "diagnostic-core.h"
39 #include "fold-const.h"
40 #include "stor-layout.h"
41 #include "cfganal.h"
42 #include "internal-fn.h"
43 #include "gimplify.h"
44 #include "gimple-iterator.h"
45 #include "gimplify-me.h"
46 #include "gimple-walk.h"
47 #include "tree-cfg.h"
48 #include "tree-into-ssa.h"
49 #include "tree-ssa.h"
50 #include "splay-tree.h"
51 #include "cfgloop.h"
52 #include "omp-general.h"
53 #include "omp-offload.h"
54 #include "tree-cfgcleanup.h"
55 #include "symbol-summary.h"
56 #include "cilk.h"
57 #include "gomp-constants.h"
58 #include "gimple-pretty-print.h"
59 #include "hsa-common.h"
60 #include "debug.h"
61
62
63 /* OMP region information. Every parallel and workshare
64 directive is enclosed between two markers, the OMP_* directive
65 and a corresponding GIMPLE_OMP_RETURN statement. */
66
67 struct omp_region
68 {
69 /* The enclosing region. */
70 struct omp_region *outer;
71
72 /* First child region. */
73 struct omp_region *inner;
74
75 /* Next peer region. */
76 struct omp_region *next;
77
78 /* Block containing the omp directive as its last stmt. */
79 basic_block entry;
80
81 /* Block containing the GIMPLE_OMP_RETURN as its last stmt. */
82 basic_block exit;
83
84 /* Block containing the GIMPLE_OMP_CONTINUE as its last stmt. */
85 basic_block cont;
86
87 /* If this is a combined parallel+workshare region, this is a list
88 of additional arguments needed by the combined parallel+workshare
89 library call. */
90 vec<tree, va_gc> *ws_args;
91
92 /* The code for the omp directive of this region. */
93 enum gimple_code type;
94
95 /* Schedule kind, only used for GIMPLE_OMP_FOR type regions. */
96 enum omp_clause_schedule_kind sched_kind;
97
98 /* Schedule modifiers. */
99 unsigned char sched_modifiers;
100
101 /* True if this is a combined parallel+workshare region. */
102 bool is_combined_parallel;
103
104 /* The ordered stmt if type is GIMPLE_OMP_ORDERED and it has
105 a depend clause. */
106 gomp_ordered *ord_stmt;
107 };
108
109 static struct omp_region *root_omp_region;
110 static bool omp_any_child_fn_dumped;
111
112 static void expand_omp_build_assign (gimple_stmt_iterator *, tree, tree,
113 bool = false);
114 static gphi *find_phi_with_arg_on_edge (tree, edge);
115 static void expand_omp (struct omp_region *region);
116
117 /* Return true if REGION is a combined parallel+workshare region. */
118
119 static inline bool
120 is_combined_parallel (struct omp_region *region)
121 {
122 return region->is_combined_parallel;
123 }
124
125 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
126 is the immediate dominator of PAR_ENTRY_BB, return true if there
127 are no data dependencies that would prevent expanding the parallel
128 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
129
130 When expanding a combined parallel+workshare region, the call to
131 the child function may need additional arguments in the case of
132 GIMPLE_OMP_FOR regions. In some cases, these arguments are
133 computed out of variables passed in from the parent to the child
134 via 'struct .omp_data_s'. For instance:
135
136 #pragma omp parallel for schedule (guided, i * 4)
137 for (j ...)
138
139 Is lowered into:
140
141 # BLOCK 2 (PAR_ENTRY_BB)
142 .omp_data_o.i = i;
143 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
144
145 # BLOCK 3 (WS_ENTRY_BB)
146 .omp_data_i = &.omp_data_o;
147 D.1667 = .omp_data_i->i;
148 D.1598 = D.1667 * 4;
149 #pragma omp for schedule (guided, D.1598)
150
151 When we outline the parallel region, the call to the child function
152 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
153 that value is computed *after* the call site. So, in principle we
154 cannot do the transformation.
155
156 To see whether the code in WS_ENTRY_BB blocks the combined
157 parallel+workshare call, we collect all the variables used in the
158 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
159 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
160 call.
161
162 FIXME. If we had the SSA form built at this point, we could merely
163 hoist the code in block 3 into block 2 and be done with it. But at
164 this point we don't have dataflow information and though we could
165 hack something up here, it is really not worth the aggravation. */
166
167 static bool
168 workshare_safe_to_combine_p (basic_block ws_entry_bb)
169 {
170 struct omp_for_data fd;
171 gimple *ws_stmt = last_stmt (ws_entry_bb);
172
173 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
174 return true;
175
176 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
177
178 omp_extract_for_data (as_a <gomp_for *> (ws_stmt), &fd, NULL);
179
180 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
181 return false;
182 if (fd.iter_type != long_integer_type_node)
183 return false;
184
185 /* FIXME. We give up too easily here. If any of these arguments
186 are not constants, they will likely involve variables that have
187 been mapped into fields of .omp_data_s for sharing with the child
188 function. With appropriate data flow, it would be possible to
189 see through this. */
190 if (!is_gimple_min_invariant (fd.loop.n1)
191 || !is_gimple_min_invariant (fd.loop.n2)
192 || !is_gimple_min_invariant (fd.loop.step)
193 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
194 return false;
195
196 return true;
197 }
198
199 /* Adjust CHUNK_SIZE from SCHEDULE clause, depending on simd modifier
200 presence (SIMD_SCHEDULE). */
201
202 static tree
203 omp_adjust_chunk_size (tree chunk_size, bool simd_schedule)
204 {
205 if (!simd_schedule)
206 return chunk_size;
207
208 int vf = omp_max_vf ();
209 if (vf == 1)
210 return chunk_size;
211
212 tree type = TREE_TYPE (chunk_size);
213 chunk_size = fold_build2 (PLUS_EXPR, type, chunk_size,
214 build_int_cst (type, vf - 1));
215 return fold_build2 (BIT_AND_EXPR, type, chunk_size,
216 build_int_cst (type, -vf));
217 }
218
219 /* Collect additional arguments needed to emit a combined
220 parallel+workshare call. WS_STMT is the workshare directive being
221 expanded. */
222
223 static vec<tree, va_gc> *
224 get_ws_args_for (gimple *par_stmt, gimple *ws_stmt)
225 {
226 tree t;
227 location_t loc = gimple_location (ws_stmt);
228 vec<tree, va_gc> *ws_args;
229
230 if (gomp_for *for_stmt = dyn_cast <gomp_for *> (ws_stmt))
231 {
232 struct omp_for_data fd;
233 tree n1, n2;
234
235 omp_extract_for_data (for_stmt, &fd, NULL);
236 n1 = fd.loop.n1;
237 n2 = fd.loop.n2;
238
239 if (gimple_omp_for_combined_into_p (for_stmt))
240 {
241 tree innerc
242 = omp_find_clause (gimple_omp_parallel_clauses (par_stmt),
243 OMP_CLAUSE__LOOPTEMP_);
244 gcc_assert (innerc);
245 n1 = OMP_CLAUSE_DECL (innerc);
246 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
247 OMP_CLAUSE__LOOPTEMP_);
248 gcc_assert (innerc);
249 n2 = OMP_CLAUSE_DECL (innerc);
250 }
251
252 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
253
254 t = fold_convert_loc (loc, long_integer_type_node, n1);
255 ws_args->quick_push (t);
256
257 t = fold_convert_loc (loc, long_integer_type_node, n2);
258 ws_args->quick_push (t);
259
260 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
261 ws_args->quick_push (t);
262
263 if (fd.chunk_size)
264 {
265 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
266 t = omp_adjust_chunk_size (t, fd.simd_schedule);
267 ws_args->quick_push (t);
268 }
269
270 return ws_args;
271 }
272 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
273 {
274 /* Number of sections is equal to the number of edges from the
275 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
276 the exit of the sections region. */
277 basic_block bb = single_succ (gimple_bb (ws_stmt));
278 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
279 vec_alloc (ws_args, 1);
280 ws_args->quick_push (t);
281 return ws_args;
282 }
283
284 gcc_unreachable ();
285 }
286
287 /* Discover whether REGION is a combined parallel+workshare region. */
288
289 static void
290 determine_parallel_type (struct omp_region *region)
291 {
292 basic_block par_entry_bb, par_exit_bb;
293 basic_block ws_entry_bb, ws_exit_bb;
294
295 if (region == NULL || region->inner == NULL
296 || region->exit == NULL || region->inner->exit == NULL
297 || region->inner->cont == NULL)
298 return;
299
300 /* We only support parallel+for and parallel+sections. */
301 if (region->type != GIMPLE_OMP_PARALLEL
302 || (region->inner->type != GIMPLE_OMP_FOR
303 && region->inner->type != GIMPLE_OMP_SECTIONS))
304 return;
305
306 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
307 WS_EXIT_BB -> PAR_EXIT_BB. */
308 par_entry_bb = region->entry;
309 par_exit_bb = region->exit;
310 ws_entry_bb = region->inner->entry;
311 ws_exit_bb = region->inner->exit;
312
313 if (single_succ (par_entry_bb) == ws_entry_bb
314 && single_succ (ws_exit_bb) == par_exit_bb
315 && workshare_safe_to_combine_p (ws_entry_bb)
316 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
317 || (last_and_only_stmt (ws_entry_bb)
318 && last_and_only_stmt (par_exit_bb))))
319 {
320 gimple *par_stmt = last_stmt (par_entry_bb);
321 gimple *ws_stmt = last_stmt (ws_entry_bb);
322
323 if (region->inner->type == GIMPLE_OMP_FOR)
324 {
325 /* If this is a combined parallel loop, we need to determine
326 whether or not to use the combined library calls. There
327 are two cases where we do not apply the transformation:
328 static loops and any kind of ordered loop. In the first
329 case, we already open code the loop so there is no need
330 to do anything else. In the latter case, the combined
331 parallel loop call would still need extra synchronization
332 to implement ordered semantics, so there would not be any
333 gain in using the combined call. */
334 tree clauses = gimple_omp_for_clauses (ws_stmt);
335 tree c = omp_find_clause (clauses, OMP_CLAUSE_SCHEDULE);
336 if (c == NULL
337 || ((OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_MASK)
338 == OMP_CLAUSE_SCHEDULE_STATIC)
339 || omp_find_clause (clauses, OMP_CLAUSE_ORDERED))
340 {
341 region->is_combined_parallel = false;
342 region->inner->is_combined_parallel = false;
343 return;
344 }
345 }
346
347 region->is_combined_parallel = true;
348 region->inner->is_combined_parallel = true;
349 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
350 }
351 }
352
353 /* Debugging dumps for parallel regions. */
354 void dump_omp_region (FILE *, struct omp_region *, int);
355 void debug_omp_region (struct omp_region *);
356 void debug_all_omp_regions (void);
357
358 /* Dump the parallel region tree rooted at REGION. */
359
360 void
361 dump_omp_region (FILE *file, struct omp_region *region, int indent)
362 {
363 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
364 gimple_code_name[region->type]);
365
366 if (region->inner)
367 dump_omp_region (file, region->inner, indent + 4);
368
369 if (region->cont)
370 {
371 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
372 region->cont->index);
373 }
374
375 if (region->exit)
376 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
377 region->exit->index);
378 else
379 fprintf (file, "%*s[no exit marker]\n", indent, "");
380
381 if (region->next)
382 dump_omp_region (file, region->next, indent);
383 }
384
385 DEBUG_FUNCTION void
386 debug_omp_region (struct omp_region *region)
387 {
388 dump_omp_region (stderr, region, 0);
389 }
390
391 DEBUG_FUNCTION void
392 debug_all_omp_regions (void)
393 {
394 dump_omp_region (stderr, root_omp_region, 0);
395 }
396
397 /* Create a new parallel region starting at STMT inside region PARENT. */
398
399 static struct omp_region *
400 new_omp_region (basic_block bb, enum gimple_code type,
401 struct omp_region *parent)
402 {
403 struct omp_region *region = XCNEW (struct omp_region);
404
405 region->outer = parent;
406 region->entry = bb;
407 region->type = type;
408
409 if (parent)
410 {
411 /* This is a nested region. Add it to the list of inner
412 regions in PARENT. */
413 region->next = parent->inner;
414 parent->inner = region;
415 }
416 else
417 {
418 /* This is a toplevel region. Add it to the list of toplevel
419 regions in ROOT_OMP_REGION. */
420 region->next = root_omp_region;
421 root_omp_region = region;
422 }
423
424 return region;
425 }
426
427 /* Release the memory associated with the region tree rooted at REGION. */
428
429 static void
430 free_omp_region_1 (struct omp_region *region)
431 {
432 struct omp_region *i, *n;
433
434 for (i = region->inner; i ; i = n)
435 {
436 n = i->next;
437 free_omp_region_1 (i);
438 }
439
440 free (region);
441 }
442
443 /* Release the memory for the entire omp region tree. */
444
445 void
446 omp_free_regions (void)
447 {
448 struct omp_region *r, *n;
449 for (r = root_omp_region; r ; r = n)
450 {
451 n = r->next;
452 free_omp_region_1 (r);
453 }
454 root_omp_region = NULL;
455 }
456
457 /* A convenience function to build an empty GIMPLE_COND with just the
458 condition. */
459
460 static gcond *
461 gimple_build_cond_empty (tree cond)
462 {
463 enum tree_code pred_code;
464 tree lhs, rhs;
465
466 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
467 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
468 }
469
470 /* Return true if a parallel REGION is within a declare target function or
471 within a target region and is not a part of a gridified target. */
472
473 static bool
474 parallel_needs_hsa_kernel_p (struct omp_region *region)
475 {
476 bool indirect = false;
477 for (region = region->outer; region; region = region->outer)
478 {
479 if (region->type == GIMPLE_OMP_PARALLEL)
480 indirect = true;
481 else if (region->type == GIMPLE_OMP_TARGET)
482 {
483 gomp_target *tgt_stmt
484 = as_a <gomp_target *> (last_stmt (region->entry));
485
486 if (omp_find_clause (gimple_omp_target_clauses (tgt_stmt),
487 OMP_CLAUSE__GRIDDIM_))
488 return indirect;
489 else
490 return true;
491 }
492 }
493
494 if (lookup_attribute ("omp declare target",
495 DECL_ATTRIBUTES (current_function_decl)))
496 return true;
497
498 return false;
499 }
500
501 /* Build the function calls to GOMP_parallel_start etc to actually
502 generate the parallel operation. REGION is the parallel region
503 being expanded. BB is the block where to insert the code. WS_ARGS
504 will be set if this is a call to a combined parallel+workshare
505 construct, it contains the list of additional arguments needed by
506 the workshare construct. */
507
508 static void
509 expand_parallel_call (struct omp_region *region, basic_block bb,
510 gomp_parallel *entry_stmt,
511 vec<tree, va_gc> *ws_args)
512 {
513 tree t, t1, t2, val, cond, c, clauses, flags;
514 gimple_stmt_iterator gsi;
515 gimple *stmt;
516 enum built_in_function start_ix;
517 int start_ix2;
518 location_t clause_loc;
519 vec<tree, va_gc> *args;
520
521 clauses = gimple_omp_parallel_clauses (entry_stmt);
522
523 /* Determine what flavor of GOMP_parallel we will be
524 emitting. */
525 start_ix = BUILT_IN_GOMP_PARALLEL;
526 if (is_combined_parallel (region))
527 {
528 switch (region->inner->type)
529 {
530 case GIMPLE_OMP_FOR:
531 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
532 switch (region->inner->sched_kind)
533 {
534 case OMP_CLAUSE_SCHEDULE_RUNTIME:
535 start_ix2 = 3;
536 break;
537 case OMP_CLAUSE_SCHEDULE_DYNAMIC:
538 case OMP_CLAUSE_SCHEDULE_GUIDED:
539 if (region->inner->sched_modifiers
540 & OMP_CLAUSE_SCHEDULE_NONMONOTONIC)
541 {
542 start_ix2 = 3 + region->inner->sched_kind;
543 break;
544 }
545 /* FALLTHRU */
546 default:
547 start_ix2 = region->inner->sched_kind;
548 break;
549 }
550 start_ix2 += (int) BUILT_IN_GOMP_PARALLEL_LOOP_STATIC;
551 start_ix = (enum built_in_function) start_ix2;
552 break;
553 case GIMPLE_OMP_SECTIONS:
554 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
555 break;
556 default:
557 gcc_unreachable ();
558 }
559 }
560
561 /* By default, the value of NUM_THREADS is zero (selected at run time)
562 and there is no conditional. */
563 cond = NULL_TREE;
564 val = build_int_cst (unsigned_type_node, 0);
565 flags = build_int_cst (unsigned_type_node, 0);
566
567 c = omp_find_clause (clauses, OMP_CLAUSE_IF);
568 if (c)
569 cond = OMP_CLAUSE_IF_EXPR (c);
570
571 c = omp_find_clause (clauses, OMP_CLAUSE_NUM_THREADS);
572 if (c)
573 {
574 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
575 clause_loc = OMP_CLAUSE_LOCATION (c);
576 }
577 else
578 clause_loc = gimple_location (entry_stmt);
579
580 c = omp_find_clause (clauses, OMP_CLAUSE_PROC_BIND);
581 if (c)
582 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
583
584 /* Ensure 'val' is of the correct type. */
585 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
586
587 /* If we found the clause 'if (cond)', build either
588 (cond != 0) or (cond ? val : 1u). */
589 if (cond)
590 {
591 cond = gimple_boolify (cond);
592
593 if (integer_zerop (val))
594 val = fold_build2_loc (clause_loc,
595 EQ_EXPR, unsigned_type_node, cond,
596 build_int_cst (TREE_TYPE (cond), 0));
597 else
598 {
599 basic_block cond_bb, then_bb, else_bb;
600 edge e, e_then, e_else;
601 tree tmp_then, tmp_else, tmp_join, tmp_var;
602
603 tmp_var = create_tmp_var (TREE_TYPE (val));
604 if (gimple_in_ssa_p (cfun))
605 {
606 tmp_then = make_ssa_name (tmp_var);
607 tmp_else = make_ssa_name (tmp_var);
608 tmp_join = make_ssa_name (tmp_var);
609 }
610 else
611 {
612 tmp_then = tmp_var;
613 tmp_else = tmp_var;
614 tmp_join = tmp_var;
615 }
616
617 e = split_block_after_labels (bb);
618 cond_bb = e->src;
619 bb = e->dest;
620 remove_edge (e);
621
622 then_bb = create_empty_bb (cond_bb);
623 else_bb = create_empty_bb (then_bb);
624 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
625 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
626
627 stmt = gimple_build_cond_empty (cond);
628 gsi = gsi_start_bb (cond_bb);
629 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
630
631 gsi = gsi_start_bb (then_bb);
632 expand_omp_build_assign (&gsi, tmp_then, val, true);
633
634 gsi = gsi_start_bb (else_bb);
635 expand_omp_build_assign (&gsi, tmp_else,
636 build_int_cst (unsigned_type_node, 1),
637 true);
638
639 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
640 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
641 add_bb_to_loop (then_bb, cond_bb->loop_father);
642 add_bb_to_loop (else_bb, cond_bb->loop_father);
643 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
644 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
645
646 if (gimple_in_ssa_p (cfun))
647 {
648 gphi *phi = create_phi_node (tmp_join, bb);
649 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
650 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
651 }
652
653 val = tmp_join;
654 }
655
656 gsi = gsi_start_bb (bb);
657 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
658 false, GSI_CONTINUE_LINKING);
659 }
660
661 gsi = gsi_last_bb (bb);
662 t = gimple_omp_parallel_data_arg (entry_stmt);
663 if (t == NULL)
664 t1 = null_pointer_node;
665 else
666 t1 = build_fold_addr_expr (t);
667 tree child_fndecl = gimple_omp_parallel_child_fn (entry_stmt);
668 t2 = build_fold_addr_expr (child_fndecl);
669
670 vec_alloc (args, 4 + vec_safe_length (ws_args));
671 args->quick_push (t2);
672 args->quick_push (t1);
673 args->quick_push (val);
674 if (ws_args)
675 args->splice (*ws_args);
676 args->quick_push (flags);
677
678 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
679 builtin_decl_explicit (start_ix), args);
680
681 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
682 false, GSI_CONTINUE_LINKING);
683
684 if (hsa_gen_requested_p ()
685 && parallel_needs_hsa_kernel_p (region))
686 {
687 cgraph_node *child_cnode = cgraph_node::get (child_fndecl);
688 hsa_register_kernel (child_cnode);
689 }
690 }
691
692 /* Insert a function call whose name is FUNC_NAME with the information from
693 ENTRY_STMT into the basic_block BB. */
694
695 static void
696 expand_cilk_for_call (basic_block bb, gomp_parallel *entry_stmt,
697 vec <tree, va_gc> *ws_args)
698 {
699 tree t, t1, t2;
700 gimple_stmt_iterator gsi;
701 vec <tree, va_gc> *args;
702
703 gcc_assert (vec_safe_length (ws_args) == 2);
704 tree func_name = (*ws_args)[0];
705 tree grain = (*ws_args)[1];
706
707 tree clauses = gimple_omp_parallel_clauses (entry_stmt);
708 tree count = omp_find_clause (clauses, OMP_CLAUSE__CILK_FOR_COUNT_);
709 gcc_assert (count != NULL_TREE);
710 count = OMP_CLAUSE_OPERAND (count, 0);
711
712 gsi = gsi_last_bb (bb);
713 t = gimple_omp_parallel_data_arg (entry_stmt);
714 if (t == NULL)
715 t1 = null_pointer_node;
716 else
717 t1 = build_fold_addr_expr (t);
718 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
719
720 vec_alloc (args, 4);
721 args->quick_push (t2);
722 args->quick_push (t1);
723 args->quick_push (count);
724 args->quick_push (grain);
725 t = build_call_expr_loc_vec (UNKNOWN_LOCATION, func_name, args);
726
727 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false,
728 GSI_CONTINUE_LINKING);
729 }
730
731 /* Build the function call to GOMP_task to actually
732 generate the task operation. BB is the block where to insert the code. */
733
734 static void
735 expand_task_call (struct omp_region *region, basic_block bb,
736 gomp_task *entry_stmt)
737 {
738 tree t1, t2, t3;
739 gimple_stmt_iterator gsi;
740 location_t loc = gimple_location (entry_stmt);
741
742 tree clauses = gimple_omp_task_clauses (entry_stmt);
743
744 tree ifc = omp_find_clause (clauses, OMP_CLAUSE_IF);
745 tree untied = omp_find_clause (clauses, OMP_CLAUSE_UNTIED);
746 tree mergeable = omp_find_clause (clauses, OMP_CLAUSE_MERGEABLE);
747 tree depend = omp_find_clause (clauses, OMP_CLAUSE_DEPEND);
748 tree finalc = omp_find_clause (clauses, OMP_CLAUSE_FINAL);
749 tree priority = omp_find_clause (clauses, OMP_CLAUSE_PRIORITY);
750
751 unsigned int iflags
752 = (untied ? GOMP_TASK_FLAG_UNTIED : 0)
753 | (mergeable ? GOMP_TASK_FLAG_MERGEABLE : 0)
754 | (depend ? GOMP_TASK_FLAG_DEPEND : 0);
755
756 bool taskloop_p = gimple_omp_task_taskloop_p (entry_stmt);
757 tree startvar = NULL_TREE, endvar = NULL_TREE, step = NULL_TREE;
758 tree num_tasks = NULL_TREE;
759 bool ull = false;
760 if (taskloop_p)
761 {
762 gimple *g = last_stmt (region->outer->entry);
763 gcc_assert (gimple_code (g) == GIMPLE_OMP_FOR
764 && gimple_omp_for_kind (g) == GF_OMP_FOR_KIND_TASKLOOP);
765 struct omp_for_data fd;
766 omp_extract_for_data (as_a <gomp_for *> (g), &fd, NULL);
767 startvar = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
768 endvar = omp_find_clause (OMP_CLAUSE_CHAIN (startvar),
769 OMP_CLAUSE__LOOPTEMP_);
770 startvar = OMP_CLAUSE_DECL (startvar);
771 endvar = OMP_CLAUSE_DECL (endvar);
772 step = fold_convert_loc (loc, fd.iter_type, fd.loop.step);
773 if (fd.loop.cond_code == LT_EXPR)
774 iflags |= GOMP_TASK_FLAG_UP;
775 tree tclauses = gimple_omp_for_clauses (g);
776 num_tasks = omp_find_clause (tclauses, OMP_CLAUSE_NUM_TASKS);
777 if (num_tasks)
778 num_tasks = OMP_CLAUSE_NUM_TASKS_EXPR (num_tasks);
779 else
780 {
781 num_tasks = omp_find_clause (tclauses, OMP_CLAUSE_GRAINSIZE);
782 if (num_tasks)
783 {
784 iflags |= GOMP_TASK_FLAG_GRAINSIZE;
785 num_tasks = OMP_CLAUSE_GRAINSIZE_EXPR (num_tasks);
786 }
787 else
788 num_tasks = integer_zero_node;
789 }
790 num_tasks = fold_convert_loc (loc, long_integer_type_node, num_tasks);
791 if (ifc == NULL_TREE)
792 iflags |= GOMP_TASK_FLAG_IF;
793 if (omp_find_clause (tclauses, OMP_CLAUSE_NOGROUP))
794 iflags |= GOMP_TASK_FLAG_NOGROUP;
795 ull = fd.iter_type == long_long_unsigned_type_node;
796 }
797 else if (priority)
798 iflags |= GOMP_TASK_FLAG_PRIORITY;
799
800 tree flags = build_int_cst (unsigned_type_node, iflags);
801
802 tree cond = boolean_true_node;
803 if (ifc)
804 {
805 if (taskloop_p)
806 {
807 tree t = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc));
808 t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t,
809 build_int_cst (unsigned_type_node,
810 GOMP_TASK_FLAG_IF),
811 build_int_cst (unsigned_type_node, 0));
812 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node,
813 flags, t);
814 }
815 else
816 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc));
817 }
818
819 if (finalc)
820 {
821 tree t = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (finalc));
822 t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t,
823 build_int_cst (unsigned_type_node,
824 GOMP_TASK_FLAG_FINAL),
825 build_int_cst (unsigned_type_node, 0));
826 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, t);
827 }
828 if (depend)
829 depend = OMP_CLAUSE_DECL (depend);
830 else
831 depend = build_int_cst (ptr_type_node, 0);
832 if (priority)
833 priority = fold_convert (integer_type_node,
834 OMP_CLAUSE_PRIORITY_EXPR (priority));
835 else
836 priority = integer_zero_node;
837
838 gsi = gsi_last_bb (bb);
839 tree t = gimple_omp_task_data_arg (entry_stmt);
840 if (t == NULL)
841 t2 = null_pointer_node;
842 else
843 t2 = build_fold_addr_expr_loc (loc, t);
844 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
845 t = gimple_omp_task_copy_fn (entry_stmt);
846 if (t == NULL)
847 t3 = null_pointer_node;
848 else
849 t3 = build_fold_addr_expr_loc (loc, t);
850
851 if (taskloop_p)
852 t = build_call_expr (ull
853 ? builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP_ULL)
854 : builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP),
855 11, t1, t2, t3,
856 gimple_omp_task_arg_size (entry_stmt),
857 gimple_omp_task_arg_align (entry_stmt), flags,
858 num_tasks, priority, startvar, endvar, step);
859 else
860 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
861 9, t1, t2, t3,
862 gimple_omp_task_arg_size (entry_stmt),
863 gimple_omp_task_arg_align (entry_stmt), cond, flags,
864 depend, priority);
865
866 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
867 false, GSI_CONTINUE_LINKING);
868 }
869
870 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
871
872 static tree
873 vec2chain (vec<tree, va_gc> *v)
874 {
875 tree chain = NULL_TREE, t;
876 unsigned ix;
877
878 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
879 {
880 DECL_CHAIN (t) = chain;
881 chain = t;
882 }
883
884 return chain;
885 }
886
887 /* Remove barriers in REGION->EXIT's block. Note that this is only
888 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
889 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
890 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
891 removed. */
892
893 static void
894 remove_exit_barrier (struct omp_region *region)
895 {
896 gimple_stmt_iterator gsi;
897 basic_block exit_bb;
898 edge_iterator ei;
899 edge e;
900 gimple *stmt;
901 int any_addressable_vars = -1;
902
903 exit_bb = region->exit;
904
905 /* If the parallel region doesn't return, we don't have REGION->EXIT
906 block at all. */
907 if (! exit_bb)
908 return;
909
910 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
911 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
912 statements that can appear in between are extremely limited -- no
913 memory operations at all. Here, we allow nothing at all, so the
914 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
915 gsi = gsi_last_bb (exit_bb);
916 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
917 gsi_prev (&gsi);
918 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
919 return;
920
921 FOR_EACH_EDGE (e, ei, exit_bb->preds)
922 {
923 gsi = gsi_last_bb (e->src);
924 if (gsi_end_p (gsi))
925 continue;
926 stmt = gsi_stmt (gsi);
927 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
928 && !gimple_omp_return_nowait_p (stmt))
929 {
930 /* OpenMP 3.0 tasks unfortunately prevent this optimization
931 in many cases. If there could be tasks queued, the barrier
932 might be needed to let the tasks run before some local
933 variable of the parallel that the task uses as shared
934 runs out of scope. The task can be spawned either
935 from within current function (this would be easy to check)
936 or from some function it calls and gets passed an address
937 of such a variable. */
938 if (any_addressable_vars < 0)
939 {
940 gomp_parallel *parallel_stmt
941 = as_a <gomp_parallel *> (last_stmt (region->entry));
942 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
943 tree local_decls, block, decl;
944 unsigned ix;
945
946 any_addressable_vars = 0;
947 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
948 if (TREE_ADDRESSABLE (decl))
949 {
950 any_addressable_vars = 1;
951 break;
952 }
953 for (block = gimple_block (stmt);
954 !any_addressable_vars
955 && block
956 && TREE_CODE (block) == BLOCK;
957 block = BLOCK_SUPERCONTEXT (block))
958 {
959 for (local_decls = BLOCK_VARS (block);
960 local_decls;
961 local_decls = DECL_CHAIN (local_decls))
962 if (TREE_ADDRESSABLE (local_decls))
963 {
964 any_addressable_vars = 1;
965 break;
966 }
967 if (block == gimple_block (parallel_stmt))
968 break;
969 }
970 }
971 if (!any_addressable_vars)
972 gimple_omp_return_set_nowait (stmt);
973 }
974 }
975 }
976
977 static void
978 remove_exit_barriers (struct omp_region *region)
979 {
980 if (region->type == GIMPLE_OMP_PARALLEL)
981 remove_exit_barrier (region);
982
983 if (region->inner)
984 {
985 region = region->inner;
986 remove_exit_barriers (region);
987 while (region->next)
988 {
989 region = region->next;
990 remove_exit_barriers (region);
991 }
992 }
993 }
994
995 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
996 calls. These can't be declared as const functions, but
997 within one parallel body they are constant, so they can be
998 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
999 which are declared const. Similarly for task body, except
1000 that in untied task omp_get_thread_num () can change at any task
1001 scheduling point. */
1002
1003 static void
1004 optimize_omp_library_calls (gimple *entry_stmt)
1005 {
1006 basic_block bb;
1007 gimple_stmt_iterator gsi;
1008 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
1009 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
1010 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
1011 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
1012 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
1013 && omp_find_clause (gimple_omp_task_clauses (entry_stmt),
1014 OMP_CLAUSE_UNTIED) != NULL);
1015
1016 FOR_EACH_BB_FN (bb, cfun)
1017 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1018 {
1019 gimple *call = gsi_stmt (gsi);
1020 tree decl;
1021
1022 if (is_gimple_call (call)
1023 && (decl = gimple_call_fndecl (call))
1024 && DECL_EXTERNAL (decl)
1025 && TREE_PUBLIC (decl)
1026 && DECL_INITIAL (decl) == NULL)
1027 {
1028 tree built_in;
1029
1030 if (DECL_NAME (decl) == thr_num_id)
1031 {
1032 /* In #pragma omp task untied omp_get_thread_num () can change
1033 during the execution of the task region. */
1034 if (untied_task)
1035 continue;
1036 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
1037 }
1038 else if (DECL_NAME (decl) == num_thr_id)
1039 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
1040 else
1041 continue;
1042
1043 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
1044 || gimple_call_num_args (call) != 0)
1045 continue;
1046
1047 if (flag_exceptions && !TREE_NOTHROW (decl))
1048 continue;
1049
1050 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
1051 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
1052 TREE_TYPE (TREE_TYPE (built_in))))
1053 continue;
1054
1055 gimple_call_set_fndecl (call, built_in);
1056 }
1057 }
1058 }
1059
1060 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
1061 regimplified. */
1062
1063 static tree
1064 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
1065 {
1066 tree t = *tp;
1067
1068 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
1069 if (VAR_P (t) && DECL_HAS_VALUE_EXPR_P (t))
1070 return t;
1071
1072 if (TREE_CODE (t) == ADDR_EXPR)
1073 recompute_tree_invariant_for_addr_expr (t);
1074
1075 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
1076 return NULL_TREE;
1077 }
1078
1079 /* Prepend or append TO = FROM assignment before or after *GSI_P. */
1080
1081 static void
1082 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from,
1083 bool after)
1084 {
1085 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
1086 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
1087 !after, after ? GSI_CONTINUE_LINKING
1088 : GSI_SAME_STMT);
1089 gimple *stmt = gimple_build_assign (to, from);
1090 if (after)
1091 gsi_insert_after (gsi_p, stmt, GSI_CONTINUE_LINKING);
1092 else
1093 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
1094 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
1095 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
1096 {
1097 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1098 gimple_regimplify_operands (stmt, &gsi);
1099 }
1100 }
1101
1102 /* Expand the OpenMP parallel or task directive starting at REGION. */
1103
1104 static void
1105 expand_omp_taskreg (struct omp_region *region)
1106 {
1107 basic_block entry_bb, exit_bb, new_bb;
1108 struct function *child_cfun;
1109 tree child_fn, block, t;
1110 gimple_stmt_iterator gsi;
1111 gimple *entry_stmt, *stmt;
1112 edge e;
1113 vec<tree, va_gc> *ws_args;
1114
1115 entry_stmt = last_stmt (region->entry);
1116 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
1117 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1118
1119 entry_bb = region->entry;
1120 if (gimple_code (entry_stmt) == GIMPLE_OMP_TASK)
1121 exit_bb = region->cont;
1122 else
1123 exit_bb = region->exit;
1124
1125 bool is_cilk_for
1126 = (flag_cilkplus
1127 && gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL
1128 && omp_find_clause (gimple_omp_parallel_clauses (entry_stmt),
1129 OMP_CLAUSE__CILK_FOR_COUNT_) != NULL_TREE);
1130
1131 if (is_cilk_for)
1132 /* If it is a _Cilk_for statement, it is modelled *like* a parallel for,
1133 and the inner statement contains the name of the built-in function
1134 and grain. */
1135 ws_args = region->inner->ws_args;
1136 else if (is_combined_parallel (region))
1137 ws_args = region->ws_args;
1138 else
1139 ws_args = NULL;
1140
1141 if (child_cfun->cfg)
1142 {
1143 /* Due to inlining, it may happen that we have already outlined
1144 the region, in which case all we need to do is make the
1145 sub-graph unreachable and emit the parallel call. */
1146 edge entry_succ_e, exit_succ_e;
1147
1148 entry_succ_e = single_succ_edge (entry_bb);
1149
1150 gsi = gsi_last_bb (entry_bb);
1151 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
1152 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
1153 gsi_remove (&gsi, true);
1154
1155 new_bb = entry_bb;
1156 if (exit_bb)
1157 {
1158 exit_succ_e = single_succ_edge (exit_bb);
1159 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
1160 }
1161 remove_edge_and_dominated_blocks (entry_succ_e);
1162 }
1163 else
1164 {
1165 unsigned srcidx, dstidx, num;
1166
1167 /* If the parallel region needs data sent from the parent
1168 function, then the very first statement (except possible
1169 tree profile counter updates) of the parallel body
1170 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
1171 &.OMP_DATA_O is passed as an argument to the child function,
1172 we need to replace it with the argument as seen by the child
1173 function.
1174
1175 In most cases, this will end up being the identity assignment
1176 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
1177 a function call that has been inlined, the original PARM_DECL
1178 .OMP_DATA_I may have been converted into a different local
1179 variable. In which case, we need to keep the assignment. */
1180 if (gimple_omp_taskreg_data_arg (entry_stmt))
1181 {
1182 basic_block entry_succ_bb
1183 = single_succ_p (entry_bb) ? single_succ (entry_bb)
1184 : FALLTHRU_EDGE (entry_bb)->dest;
1185 tree arg;
1186 gimple *parcopy_stmt = NULL;
1187
1188 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
1189 {
1190 gimple *stmt;
1191
1192 gcc_assert (!gsi_end_p (gsi));
1193 stmt = gsi_stmt (gsi);
1194 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1195 continue;
1196
1197 if (gimple_num_ops (stmt) == 2)
1198 {
1199 tree arg = gimple_assign_rhs1 (stmt);
1200
1201 /* We're ignore the subcode because we're
1202 effectively doing a STRIP_NOPS. */
1203
1204 if (TREE_CODE (arg) == ADDR_EXPR
1205 && TREE_OPERAND (arg, 0)
1206 == gimple_omp_taskreg_data_arg (entry_stmt))
1207 {
1208 parcopy_stmt = stmt;
1209 break;
1210 }
1211 }
1212 }
1213
1214 gcc_assert (parcopy_stmt != NULL);
1215 arg = DECL_ARGUMENTS (child_fn);
1216
1217 if (!gimple_in_ssa_p (cfun))
1218 {
1219 if (gimple_assign_lhs (parcopy_stmt) == arg)
1220 gsi_remove (&gsi, true);
1221 else
1222 {
1223 /* ?? Is setting the subcode really necessary ?? */
1224 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
1225 gimple_assign_set_rhs1 (parcopy_stmt, arg);
1226 }
1227 }
1228 else
1229 {
1230 tree lhs = gimple_assign_lhs (parcopy_stmt);
1231 gcc_assert (SSA_NAME_VAR (lhs) == arg);
1232 /* We'd like to set the rhs to the default def in the child_fn,
1233 but it's too early to create ssa names in the child_fn.
1234 Instead, we set the rhs to the parm. In
1235 move_sese_region_to_fn, we introduce a default def for the
1236 parm, map the parm to it's default def, and once we encounter
1237 this stmt, replace the parm with the default def. */
1238 gimple_assign_set_rhs1 (parcopy_stmt, arg);
1239 update_stmt (parcopy_stmt);
1240 }
1241 }
1242
1243 /* Declare local variables needed in CHILD_CFUN. */
1244 block = DECL_INITIAL (child_fn);
1245 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
1246 /* The gimplifier could record temporaries in parallel/task block
1247 rather than in containing function's local_decls chain,
1248 which would mean cgraph missed finalizing them. Do it now. */
1249 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
1250 if (VAR_P (t) && TREE_STATIC (t) && !DECL_EXTERNAL (t))
1251 varpool_node::finalize_decl (t);
1252 DECL_SAVED_TREE (child_fn) = NULL;
1253 /* We'll create a CFG for child_fn, so no gimple body is needed. */
1254 gimple_set_body (child_fn, NULL);
1255 TREE_USED (block) = 1;
1256
1257 /* Reset DECL_CONTEXT on function arguments. */
1258 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
1259 DECL_CONTEXT (t) = child_fn;
1260
1261 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
1262 so that it can be moved to the child function. */
1263 gsi = gsi_last_bb (entry_bb);
1264 stmt = gsi_stmt (gsi);
1265 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
1266 || gimple_code (stmt) == GIMPLE_OMP_TASK));
1267 e = split_block (entry_bb, stmt);
1268 gsi_remove (&gsi, true);
1269 entry_bb = e->dest;
1270 edge e2 = NULL;
1271 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
1272 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
1273 else
1274 {
1275 e2 = make_edge (e->src, BRANCH_EDGE (entry_bb)->dest, EDGE_ABNORMAL);
1276 gcc_assert (e2->dest == region->exit);
1277 remove_edge (BRANCH_EDGE (entry_bb));
1278 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e->src);
1279 gsi = gsi_last_bb (region->exit);
1280 gcc_assert (!gsi_end_p (gsi)
1281 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
1282 gsi_remove (&gsi, true);
1283 }
1284
1285 /* Convert GIMPLE_OMP_{RETURN,CONTINUE} into a RETURN_EXPR. */
1286 if (exit_bb)
1287 {
1288 gsi = gsi_last_bb (exit_bb);
1289 gcc_assert (!gsi_end_p (gsi)
1290 && (gimple_code (gsi_stmt (gsi))
1291 == (e2 ? GIMPLE_OMP_CONTINUE : GIMPLE_OMP_RETURN)));
1292 stmt = gimple_build_return (NULL);
1293 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
1294 gsi_remove (&gsi, true);
1295 }
1296
1297 /* Move the parallel region into CHILD_CFUN. */
1298
1299 if (gimple_in_ssa_p (cfun))
1300 {
1301 init_tree_ssa (child_cfun);
1302 init_ssa_operands (child_cfun);
1303 child_cfun->gimple_df->in_ssa_p = true;
1304 block = NULL_TREE;
1305 }
1306 else
1307 block = gimple_block (entry_stmt);
1308
1309 /* Make sure to generate early debug for the function before
1310 outlining anything. */
1311 if (! gimple_in_ssa_p (cfun))
1312 (*debug_hooks->early_global_decl) (cfun->decl);
1313
1314 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
1315 if (exit_bb)
1316 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
1317 if (e2)
1318 {
1319 basic_block dest_bb = e2->dest;
1320 if (!exit_bb)
1321 make_edge (new_bb, dest_bb, EDGE_FALLTHRU);
1322 remove_edge (e2);
1323 set_immediate_dominator (CDI_DOMINATORS, dest_bb, new_bb);
1324 }
1325 /* When the OMP expansion process cannot guarantee an up-to-date
1326 loop tree arrange for the child function to fixup loops. */
1327 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1328 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
1329
1330 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
1331 num = vec_safe_length (child_cfun->local_decls);
1332 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
1333 {
1334 t = (*child_cfun->local_decls)[srcidx];
1335 if (DECL_CONTEXT (t) == cfun->decl)
1336 continue;
1337 if (srcidx != dstidx)
1338 (*child_cfun->local_decls)[dstidx] = t;
1339 dstidx++;
1340 }
1341 if (dstidx != num)
1342 vec_safe_truncate (child_cfun->local_decls, dstidx);
1343
1344 /* Inform the callgraph about the new function. */
1345 child_cfun->curr_properties = cfun->curr_properties;
1346 child_cfun->has_simduid_loops |= cfun->has_simduid_loops;
1347 child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops;
1348 cgraph_node *node = cgraph_node::get_create (child_fn);
1349 node->parallelized_function = 1;
1350 cgraph_node::add_new_function (child_fn, true);
1351
1352 bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl)
1353 && !DECL_ASSEMBLER_NAME_SET_P (child_fn);
1354
1355 /* Fix the callgraph edges for child_cfun. Those for cfun will be
1356 fixed in a following pass. */
1357 push_cfun (child_cfun);
1358 if (need_asm)
1359 assign_assembler_name_if_needed (child_fn);
1360
1361 if (optimize)
1362 optimize_omp_library_calls (entry_stmt);
1363 cgraph_edge::rebuild_edges ();
1364
1365 /* Some EH regions might become dead, see PR34608. If
1366 pass_cleanup_cfg isn't the first pass to happen with the
1367 new child, these dead EH edges might cause problems.
1368 Clean them up now. */
1369 if (flag_exceptions)
1370 {
1371 basic_block bb;
1372 bool changed = false;
1373
1374 FOR_EACH_BB_FN (bb, cfun)
1375 changed |= gimple_purge_dead_eh_edges (bb);
1376 if (changed)
1377 cleanup_tree_cfg ();
1378 }
1379 if (gimple_in_ssa_p (cfun))
1380 update_ssa (TODO_update_ssa);
1381 if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1382 verify_loop_structure ();
1383 pop_cfun ();
1384
1385 if (dump_file && !gimple_in_ssa_p (cfun))
1386 {
1387 omp_any_child_fn_dumped = true;
1388 dump_function_header (dump_file, child_fn, dump_flags);
1389 dump_function_to_file (child_fn, dump_file, dump_flags);
1390 }
1391 }
1392
1393 /* Emit a library call to launch the children threads. */
1394 if (is_cilk_for)
1395 expand_cilk_for_call (new_bb,
1396 as_a <gomp_parallel *> (entry_stmt), ws_args);
1397 else if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
1398 expand_parallel_call (region, new_bb,
1399 as_a <gomp_parallel *> (entry_stmt), ws_args);
1400 else
1401 expand_task_call (region, new_bb, as_a <gomp_task *> (entry_stmt));
1402 if (gimple_in_ssa_p (cfun))
1403 update_ssa (TODO_update_ssa_only_virtuals);
1404 }
1405
1406 /* Information about members of an OpenACC collapsed loop nest. */
1407
1408 struct oacc_collapse
1409 {
1410 tree base; /* Base value. */
1411 tree iters; /* Number of steps. */
1412 tree step; /* Step size. */
1413 tree tile; /* Tile increment (if tiled). */
1414 tree outer; /* Tile iterator var. */
1415 };
1416
1417 /* Helper for expand_oacc_for. Determine collapsed loop information.
1418 Fill in COUNTS array. Emit any initialization code before GSI.
1419 Return the calculated outer loop bound of BOUND_TYPE. */
1420
1421 static tree
1422 expand_oacc_collapse_init (const struct omp_for_data *fd,
1423 gimple_stmt_iterator *gsi,
1424 oacc_collapse *counts, tree bound_type,
1425 location_t loc)
1426 {
1427 tree tiling = fd->tiling;
1428 tree total = build_int_cst (bound_type, 1);
1429 int ix;
1430
1431 gcc_assert (integer_onep (fd->loop.step));
1432 gcc_assert (integer_zerop (fd->loop.n1));
1433
1434 /* When tiling, the first operand of the tile clause applies to the
1435 innermost loop, and we work outwards from there. Seems
1436 backwards, but whatever. */
1437 for (ix = fd->collapse; ix--;)
1438 {
1439 const omp_for_data_loop *loop = &fd->loops[ix];
1440
1441 tree iter_type = TREE_TYPE (loop->v);
1442 tree diff_type = iter_type;
1443 tree plus_type = iter_type;
1444
1445 gcc_assert (loop->cond_code == fd->loop.cond_code);
1446
1447 if (POINTER_TYPE_P (iter_type))
1448 plus_type = sizetype;
1449 if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type))
1450 diff_type = signed_type_for (diff_type);
1451
1452 if (tiling)
1453 {
1454 tree num = build_int_cst (integer_type_node, fd->collapse);
1455 tree loop_no = build_int_cst (integer_type_node, ix);
1456 tree tile = TREE_VALUE (tiling);
1457 gcall *call
1458 = gimple_build_call_internal (IFN_GOACC_TILE, 5, num, loop_no, tile,
1459 /* gwv-outer=*/integer_zero_node,
1460 /* gwv-inner=*/integer_zero_node);
1461
1462 counts[ix].outer = create_tmp_var (iter_type, ".outer");
1463 counts[ix].tile = create_tmp_var (diff_type, ".tile");
1464 gimple_call_set_lhs (call, counts[ix].tile);
1465 gimple_set_location (call, loc);
1466 gsi_insert_before (gsi, call, GSI_SAME_STMT);
1467
1468 tiling = TREE_CHAIN (tiling);
1469 }
1470 else
1471 {
1472 counts[ix].tile = NULL;
1473 counts[ix].outer = loop->v;
1474 }
1475
1476 tree b = loop->n1;
1477 tree e = loop->n2;
1478 tree s = loop->step;
1479 bool up = loop->cond_code == LT_EXPR;
1480 tree dir = build_int_cst (diff_type, up ? +1 : -1);
1481 bool negating;
1482 tree expr;
1483
1484 b = force_gimple_operand_gsi (gsi, b, true, NULL_TREE,
1485 true, GSI_SAME_STMT);
1486 e = force_gimple_operand_gsi (gsi, e, true, NULL_TREE,
1487 true, GSI_SAME_STMT);
1488
1489 /* Convert the step, avoiding possible unsigned->signed overflow. */
1490 negating = !up && TYPE_UNSIGNED (TREE_TYPE (s));
1491 if (negating)
1492 s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s);
1493 s = fold_convert (diff_type, s);
1494 if (negating)
1495 s = fold_build1 (NEGATE_EXPR, diff_type, s);
1496 s = force_gimple_operand_gsi (gsi, s, true, NULL_TREE,
1497 true, GSI_SAME_STMT);
1498
1499 /* Determine the range, avoiding possible unsigned->signed overflow. */
1500 negating = !up && TYPE_UNSIGNED (iter_type);
1501 expr = fold_build2 (MINUS_EXPR, plus_type,
1502 fold_convert (plus_type, negating ? b : e),
1503 fold_convert (plus_type, negating ? e : b));
1504 expr = fold_convert (diff_type, expr);
1505 if (negating)
1506 expr = fold_build1 (NEGATE_EXPR, diff_type, expr);
1507 tree range = force_gimple_operand_gsi
1508 (gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT);
1509
1510 /* Determine number of iterations. */
1511 expr = fold_build2 (MINUS_EXPR, diff_type, range, dir);
1512 expr = fold_build2 (PLUS_EXPR, diff_type, expr, s);
1513 expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s);
1514
1515 tree iters = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE,
1516 true, GSI_SAME_STMT);
1517
1518 counts[ix].base = b;
1519 counts[ix].iters = iters;
1520 counts[ix].step = s;
1521
1522 total = fold_build2 (MULT_EXPR, bound_type, total,
1523 fold_convert (bound_type, iters));
1524 }
1525
1526 return total;
1527 }
1528
1529 /* Emit initializers for collapsed loop members. INNER is true if
1530 this is for the element loop of a TILE. IVAR is the outer
1531 loop iteration variable, from which collapsed loop iteration values
1532 are calculated. COUNTS array has been initialized by
1533 expand_oacc_collapse_inits. */
1534
1535 static void
1536 expand_oacc_collapse_vars (const struct omp_for_data *fd, bool inner,
1537 gimple_stmt_iterator *gsi,
1538 const oacc_collapse *counts, tree ivar)
1539 {
1540 tree ivar_type = TREE_TYPE (ivar);
1541
1542 /* The most rapidly changing iteration variable is the innermost
1543 one. */
1544 for (int ix = fd->collapse; ix--;)
1545 {
1546 const omp_for_data_loop *loop = &fd->loops[ix];
1547 const oacc_collapse *collapse = &counts[ix];
1548 tree v = inner ? loop->v : collapse->outer;
1549 tree iter_type = TREE_TYPE (v);
1550 tree diff_type = TREE_TYPE (collapse->step);
1551 tree plus_type = iter_type;
1552 enum tree_code plus_code = PLUS_EXPR;
1553 tree expr;
1554
1555 if (POINTER_TYPE_P (iter_type))
1556 {
1557 plus_code = POINTER_PLUS_EXPR;
1558 plus_type = sizetype;
1559 }
1560
1561 expr = ivar;
1562 if (ix)
1563 {
1564 tree mod = fold_convert (ivar_type, collapse->iters);
1565 ivar = fold_build2 (TRUNC_DIV_EXPR, ivar_type, expr, mod);
1566 expr = fold_build2 (TRUNC_MOD_EXPR, ivar_type, expr, mod);
1567 ivar = force_gimple_operand_gsi (gsi, ivar, true, NULL_TREE,
1568 true, GSI_SAME_STMT);
1569 }
1570
1571 expr = fold_build2 (MULT_EXPR, diff_type, fold_convert (diff_type, expr),
1572 collapse->step);
1573 expr = fold_build2 (plus_code, iter_type,
1574 inner ? collapse->outer : collapse->base,
1575 fold_convert (plus_type, expr));
1576 expr = force_gimple_operand_gsi (gsi, expr, false, NULL_TREE,
1577 true, GSI_SAME_STMT);
1578 gassign *ass = gimple_build_assign (v, expr);
1579 gsi_insert_before (gsi, ass, GSI_SAME_STMT);
1580 }
1581 }
1582
1583 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
1584 of the combined collapse > 1 loop constructs, generate code like:
1585 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
1586 if (cond3 is <)
1587 adj = STEP3 - 1;
1588 else
1589 adj = STEP3 + 1;
1590 count3 = (adj + N32 - N31) / STEP3;
1591 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
1592 if (cond2 is <)
1593 adj = STEP2 - 1;
1594 else
1595 adj = STEP2 + 1;
1596 count2 = (adj + N22 - N21) / STEP2;
1597 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
1598 if (cond1 is <)
1599 adj = STEP1 - 1;
1600 else
1601 adj = STEP1 + 1;
1602 count1 = (adj + N12 - N11) / STEP1;
1603 count = count1 * count2 * count3;
1604 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
1605 count = 0;
1606 and set ZERO_ITER_BB to that bb. If this isn't the outermost
1607 of the combined loop constructs, just initialize COUNTS array
1608 from the _looptemp_ clauses. */
1609
1610 /* NOTE: It *could* be better to moosh all of the BBs together,
1611 creating one larger BB with all the computation and the unexpected
1612 jump at the end. I.e.
1613
1614 bool zero3, zero2, zero1, zero;
1615
1616 zero3 = N32 c3 N31;
1617 count3 = (N32 - N31) /[cl] STEP3;
1618 zero2 = N22 c2 N21;
1619 count2 = (N22 - N21) /[cl] STEP2;
1620 zero1 = N12 c1 N11;
1621 count1 = (N12 - N11) /[cl] STEP1;
1622 zero = zero3 || zero2 || zero1;
1623 count = count1 * count2 * count3;
1624 if (__builtin_expect(zero, false)) goto zero_iter_bb;
1625
1626 After all, we expect the zero=false, and thus we expect to have to
1627 evaluate all of the comparison expressions, so short-circuiting
1628 oughtn't be a win. Since the condition isn't protecting a
1629 denominator, we're not concerned about divide-by-zero, so we can
1630 fully evaluate count even if a numerator turned out to be wrong.
1631
1632 It seems like putting this all together would create much better
1633 scheduling opportunities, and less pressure on the chip's branch
1634 predictor. */
1635
1636 static void
1637 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
1638 basic_block &entry_bb, tree *counts,
1639 basic_block &zero_iter1_bb, int &first_zero_iter1,
1640 basic_block &zero_iter2_bb, int &first_zero_iter2,
1641 basic_block &l2_dom_bb)
1642 {
1643 tree t, type = TREE_TYPE (fd->loop.v);
1644 edge e, ne;
1645 int i;
1646
1647 /* Collapsed loops need work for expansion into SSA form. */
1648 gcc_assert (!gimple_in_ssa_p (cfun));
1649
1650 if (gimple_omp_for_combined_into_p (fd->for_stmt)
1651 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
1652 {
1653 gcc_assert (fd->ordered == 0);
1654 /* First two _looptemp_ clauses are for istart/iend, counts[0]
1655 isn't supposed to be handled, as the inner loop doesn't
1656 use it. */
1657 tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
1658 OMP_CLAUSE__LOOPTEMP_);
1659 gcc_assert (innerc);
1660 for (i = 0; i < fd->collapse; i++)
1661 {
1662 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
1663 OMP_CLAUSE__LOOPTEMP_);
1664 gcc_assert (innerc);
1665 if (i)
1666 counts[i] = OMP_CLAUSE_DECL (innerc);
1667 else
1668 counts[0] = NULL_TREE;
1669 }
1670 return;
1671 }
1672
1673 for (i = fd->collapse; i < fd->ordered; i++)
1674 {
1675 tree itype = TREE_TYPE (fd->loops[i].v);
1676 counts[i] = NULL_TREE;
1677 t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
1678 fold_convert (itype, fd->loops[i].n1),
1679 fold_convert (itype, fd->loops[i].n2));
1680 if (t && integer_zerop (t))
1681 {
1682 for (i = fd->collapse; i < fd->ordered; i++)
1683 counts[i] = build_int_cst (type, 0);
1684 break;
1685 }
1686 }
1687 for (i = 0; i < (fd->ordered ? fd->ordered : fd->collapse); i++)
1688 {
1689 tree itype = TREE_TYPE (fd->loops[i].v);
1690
1691 if (i >= fd->collapse && counts[i])
1692 continue;
1693 if ((SSA_VAR_P (fd->loop.n2) || i >= fd->collapse)
1694 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
1695 fold_convert (itype, fd->loops[i].n1),
1696 fold_convert (itype, fd->loops[i].n2)))
1697 == NULL_TREE || !integer_onep (t)))
1698 {
1699 gcond *cond_stmt;
1700 tree n1, n2;
1701 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
1702 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
1703 true, GSI_SAME_STMT);
1704 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
1705 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
1706 true, GSI_SAME_STMT);
1707 cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
1708 NULL_TREE, NULL_TREE);
1709 gsi_insert_before (gsi, cond_stmt, GSI_SAME_STMT);
1710 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
1711 expand_omp_regimplify_p, NULL, NULL)
1712 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
1713 expand_omp_regimplify_p, NULL, NULL))
1714 {
1715 *gsi = gsi_for_stmt (cond_stmt);
1716 gimple_regimplify_operands (cond_stmt, gsi);
1717 }
1718 e = split_block (entry_bb, cond_stmt);
1719 basic_block &zero_iter_bb
1720 = i < fd->collapse ? zero_iter1_bb : zero_iter2_bb;
1721 int &first_zero_iter
1722 = i < fd->collapse ? first_zero_iter1 : first_zero_iter2;
1723 if (zero_iter_bb == NULL)
1724 {
1725 gassign *assign_stmt;
1726 first_zero_iter = i;
1727 zero_iter_bb = create_empty_bb (entry_bb);
1728 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
1729 *gsi = gsi_after_labels (zero_iter_bb);
1730 if (i < fd->collapse)
1731 assign_stmt = gimple_build_assign (fd->loop.n2,
1732 build_zero_cst (type));
1733 else
1734 {
1735 counts[i] = create_tmp_reg (type, ".count");
1736 assign_stmt
1737 = gimple_build_assign (counts[i], build_zero_cst (type));
1738 }
1739 gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT);
1740 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
1741 entry_bb);
1742 }
1743 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
1744 ne->probability = profile_probability::very_unlikely ();
1745 e->flags = EDGE_TRUE_VALUE;
1746 e->probability = ne->probability.invert ();
1747 if (l2_dom_bb == NULL)
1748 l2_dom_bb = entry_bb;
1749 entry_bb = e->dest;
1750 *gsi = gsi_last_bb (entry_bb);
1751 }
1752
1753 if (POINTER_TYPE_P (itype))
1754 itype = signed_type_for (itype);
1755 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
1756 ? -1 : 1));
1757 t = fold_build2 (PLUS_EXPR, itype,
1758 fold_convert (itype, fd->loops[i].step), t);
1759 t = fold_build2 (PLUS_EXPR, itype, t,
1760 fold_convert (itype, fd->loops[i].n2));
1761 t = fold_build2 (MINUS_EXPR, itype, t,
1762 fold_convert (itype, fd->loops[i].n1));
1763 /* ?? We could probably use CEIL_DIV_EXPR instead of
1764 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
1765 generate the same code in the end because generically we
1766 don't know that the values involved must be negative for
1767 GT?? */
1768 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
1769 t = fold_build2 (TRUNC_DIV_EXPR, itype,
1770 fold_build1 (NEGATE_EXPR, itype, t),
1771 fold_build1 (NEGATE_EXPR, itype,
1772 fold_convert (itype,
1773 fd->loops[i].step)));
1774 else
1775 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
1776 fold_convert (itype, fd->loops[i].step));
1777 t = fold_convert (type, t);
1778 if (TREE_CODE (t) == INTEGER_CST)
1779 counts[i] = t;
1780 else
1781 {
1782 if (i < fd->collapse || i != first_zero_iter2)
1783 counts[i] = create_tmp_reg (type, ".count");
1784 expand_omp_build_assign (gsi, counts[i], t);
1785 }
1786 if (SSA_VAR_P (fd->loop.n2) && i < fd->collapse)
1787 {
1788 if (i == 0)
1789 t = counts[0];
1790 else
1791 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
1792 expand_omp_build_assign (gsi, fd->loop.n2, t);
1793 }
1794 }
1795 }
1796
1797 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
1798 T = V;
1799 V3 = N31 + (T % count3) * STEP3;
1800 T = T / count3;
1801 V2 = N21 + (T % count2) * STEP2;
1802 T = T / count2;
1803 V1 = N11 + T * STEP1;
1804 if this loop doesn't have an inner loop construct combined with it.
1805 If it does have an inner loop construct combined with it and the
1806 iteration count isn't known constant, store values from counts array
1807 into its _looptemp_ temporaries instead. */
1808
1809 static void
1810 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
1811 tree *counts, gimple *inner_stmt, tree startvar)
1812 {
1813 int i;
1814 if (gimple_omp_for_combined_p (fd->for_stmt))
1815 {
1816 /* If fd->loop.n2 is constant, then no propagation of the counts
1817 is needed, they are constant. */
1818 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
1819 return;
1820
1821 tree clauses = gimple_code (inner_stmt) != GIMPLE_OMP_FOR
1822 ? gimple_omp_taskreg_clauses (inner_stmt)
1823 : gimple_omp_for_clauses (inner_stmt);
1824 /* First two _looptemp_ clauses are for istart/iend, counts[0]
1825 isn't supposed to be handled, as the inner loop doesn't
1826 use it. */
1827 tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
1828 gcc_assert (innerc);
1829 for (i = 0; i < fd->collapse; i++)
1830 {
1831 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
1832 OMP_CLAUSE__LOOPTEMP_);
1833 gcc_assert (innerc);
1834 if (i)
1835 {
1836 tree tem = OMP_CLAUSE_DECL (innerc);
1837 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
1838 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
1839 false, GSI_CONTINUE_LINKING);
1840 gassign *stmt = gimple_build_assign (tem, t);
1841 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
1842 }
1843 }
1844 return;
1845 }
1846
1847 tree type = TREE_TYPE (fd->loop.v);
1848 tree tem = create_tmp_reg (type, ".tem");
1849 gassign *stmt = gimple_build_assign (tem, startvar);
1850 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
1851
1852 for (i = fd->collapse - 1; i >= 0; i--)
1853 {
1854 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
1855 itype = vtype;
1856 if (POINTER_TYPE_P (vtype))
1857 itype = signed_type_for (vtype);
1858 if (i != 0)
1859 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
1860 else
1861 t = tem;
1862 t = fold_convert (itype, t);
1863 t = fold_build2 (MULT_EXPR, itype, t,
1864 fold_convert (itype, fd->loops[i].step));
1865 if (POINTER_TYPE_P (vtype))
1866 t = fold_build_pointer_plus (fd->loops[i].n1, t);
1867 else
1868 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
1869 t = force_gimple_operand_gsi (gsi, t,
1870 DECL_P (fd->loops[i].v)
1871 && TREE_ADDRESSABLE (fd->loops[i].v),
1872 NULL_TREE, false,
1873 GSI_CONTINUE_LINKING);
1874 stmt = gimple_build_assign (fd->loops[i].v, t);
1875 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
1876 if (i != 0)
1877 {
1878 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
1879 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
1880 false, GSI_CONTINUE_LINKING);
1881 stmt = gimple_build_assign (tem, t);
1882 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
1883 }
1884 }
1885 }
1886
1887 /* Helper function for expand_omp_for_*. Generate code like:
1888 L10:
1889 V3 += STEP3;
1890 if (V3 cond3 N32) goto BODY_BB; else goto L11;
1891 L11:
1892 V3 = N31;
1893 V2 += STEP2;
1894 if (V2 cond2 N22) goto BODY_BB; else goto L12;
1895 L12:
1896 V2 = N21;
1897 V1 += STEP1;
1898 goto BODY_BB; */
1899
1900 static basic_block
1901 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
1902 basic_block body_bb)
1903 {
1904 basic_block last_bb, bb, collapse_bb = NULL;
1905 int i;
1906 gimple_stmt_iterator gsi;
1907 edge e;
1908 tree t;
1909 gimple *stmt;
1910
1911 last_bb = cont_bb;
1912 for (i = fd->collapse - 1; i >= 0; i--)
1913 {
1914 tree vtype = TREE_TYPE (fd->loops[i].v);
1915
1916 bb = create_empty_bb (last_bb);
1917 add_bb_to_loop (bb, last_bb->loop_father);
1918 gsi = gsi_start_bb (bb);
1919
1920 if (i < fd->collapse - 1)
1921 {
1922 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
1923 e->probability = profile_probability::guessed_always ().apply_scale (1, 8);
1924
1925 t = fd->loops[i + 1].n1;
1926 t = force_gimple_operand_gsi (&gsi, t,
1927 DECL_P (fd->loops[i + 1].v)
1928 && TREE_ADDRESSABLE (fd->loops[i
1929 + 1].v),
1930 NULL_TREE, false,
1931 GSI_CONTINUE_LINKING);
1932 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
1933 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1934 }
1935 else
1936 collapse_bb = bb;
1937
1938 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
1939
1940 if (POINTER_TYPE_P (vtype))
1941 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
1942 else
1943 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
1944 t = force_gimple_operand_gsi (&gsi, t,
1945 DECL_P (fd->loops[i].v)
1946 && TREE_ADDRESSABLE (fd->loops[i].v),
1947 NULL_TREE, false, GSI_CONTINUE_LINKING);
1948 stmt = gimple_build_assign (fd->loops[i].v, t);
1949 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1950
1951 if (i > 0)
1952 {
1953 t = fd->loops[i].n2;
1954 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
1955 false, GSI_CONTINUE_LINKING);
1956 tree v = fd->loops[i].v;
1957 if (DECL_P (v) && TREE_ADDRESSABLE (v))
1958 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
1959 false, GSI_CONTINUE_LINKING);
1960 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
1961 stmt = gimple_build_cond_empty (t);
1962 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1963 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
1964 e->probability = profile_probability::guessed_always ().apply_scale (7, 8);
1965 }
1966 else
1967 make_edge (bb, body_bb, EDGE_FALLTHRU);
1968 last_bb = bb;
1969 }
1970
1971 return collapse_bb;
1972 }
1973
1974 /* Expand #pragma omp ordered depend(source). */
1975
1976 static void
1977 expand_omp_ordered_source (gimple_stmt_iterator *gsi, struct omp_for_data *fd,
1978 tree *counts, location_t loc)
1979 {
1980 enum built_in_function source_ix
1981 = fd->iter_type == long_integer_type_node
1982 ? BUILT_IN_GOMP_DOACROSS_POST : BUILT_IN_GOMP_DOACROSS_ULL_POST;
1983 gimple *g
1984 = gimple_build_call (builtin_decl_explicit (source_ix), 1,
1985 build_fold_addr_expr (counts[fd->ordered]));
1986 gimple_set_location (g, loc);
1987 gsi_insert_before (gsi, g, GSI_SAME_STMT);
1988 }
1989
1990 /* Expand a single depend from #pragma omp ordered depend(sink:...). */
1991
1992 static void
1993 expand_omp_ordered_sink (gimple_stmt_iterator *gsi, struct omp_for_data *fd,
1994 tree *counts, tree c, location_t loc)
1995 {
1996 auto_vec<tree, 10> args;
1997 enum built_in_function sink_ix
1998 = fd->iter_type == long_integer_type_node
1999 ? BUILT_IN_GOMP_DOACROSS_WAIT : BUILT_IN_GOMP_DOACROSS_ULL_WAIT;
2000 tree t, off, coff = NULL_TREE, deps = OMP_CLAUSE_DECL (c), cond = NULL_TREE;
2001 int i;
2002 gimple_stmt_iterator gsi2 = *gsi;
2003 bool warned_step = false;
2004
2005 for (i = 0; i < fd->ordered; i++)
2006 {
2007 tree step = NULL_TREE;
2008 off = TREE_PURPOSE (deps);
2009 if (TREE_CODE (off) == TRUNC_DIV_EXPR)
2010 {
2011 step = TREE_OPERAND (off, 1);
2012 off = TREE_OPERAND (off, 0);
2013 }
2014 if (!integer_zerop (off))
2015 {
2016 gcc_assert (fd->loops[i].cond_code == LT_EXPR
2017 || fd->loops[i].cond_code == GT_EXPR);
2018 bool forward = fd->loops[i].cond_code == LT_EXPR;
2019 if (step)
2020 {
2021 /* Non-simple Fortran DO loops. If step is variable,
2022 we don't know at compile even the direction, so can't
2023 warn. */
2024 if (TREE_CODE (step) != INTEGER_CST)
2025 break;
2026 forward = tree_int_cst_sgn (step) != -1;
2027 }
2028 if (forward ^ OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2029 warning_at (loc, 0, "%<depend(sink)%> clause waiting for "
2030 "lexically later iteration");
2031 break;
2032 }
2033 deps = TREE_CHAIN (deps);
2034 }
2035 /* If all offsets corresponding to the collapsed loops are zero,
2036 this depend clause can be ignored. FIXME: but there is still a
2037 flush needed. We need to emit one __sync_synchronize () for it
2038 though (perhaps conditionally)? Solve this together with the
2039 conservative dependence folding optimization.
2040 if (i >= fd->collapse)
2041 return; */
2042
2043 deps = OMP_CLAUSE_DECL (c);
2044 gsi_prev (&gsi2);
2045 edge e1 = split_block (gsi_bb (gsi2), gsi_stmt (gsi2));
2046 edge e2 = split_block_after_labels (e1->dest);
2047
2048 gsi2 = gsi_after_labels (e1->dest);
2049 *gsi = gsi_last_bb (e1->src);
2050 for (i = 0; i < fd->ordered; i++)
2051 {
2052 tree itype = TREE_TYPE (fd->loops[i].v);
2053 tree step = NULL_TREE;
2054 tree orig_off = NULL_TREE;
2055 if (POINTER_TYPE_P (itype))
2056 itype = sizetype;
2057 if (i)
2058 deps = TREE_CHAIN (deps);
2059 off = TREE_PURPOSE (deps);
2060 if (TREE_CODE (off) == TRUNC_DIV_EXPR)
2061 {
2062 step = TREE_OPERAND (off, 1);
2063 off = TREE_OPERAND (off, 0);
2064 gcc_assert (fd->loops[i].cond_code == LT_EXPR
2065 && integer_onep (fd->loops[i].step)
2066 && !POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)));
2067 }
2068 tree s = fold_convert_loc (loc, itype, step ? step : fd->loops[i].step);
2069 if (step)
2070 {
2071 off = fold_convert_loc (loc, itype, off);
2072 orig_off = off;
2073 off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s);
2074 }
2075
2076 if (integer_zerop (off))
2077 t = boolean_true_node;
2078 else
2079 {
2080 tree a;
2081 tree co = fold_convert_loc (loc, itype, off);
2082 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
2083 {
2084 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2085 co = fold_build1_loc (loc, NEGATE_EXPR, itype, co);
2086 a = fold_build2_loc (loc, POINTER_PLUS_EXPR,
2087 TREE_TYPE (fd->loops[i].v), fd->loops[i].v,
2088 co);
2089 }
2090 else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2091 a = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
2092 fd->loops[i].v, co);
2093 else
2094 a = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
2095 fd->loops[i].v, co);
2096 if (step)
2097 {
2098 tree t1, t2;
2099 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2100 t1 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a,
2101 fd->loops[i].n1);
2102 else
2103 t1 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a,
2104 fd->loops[i].n2);
2105 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2106 t2 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a,
2107 fd->loops[i].n2);
2108 else
2109 t2 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a,
2110 fd->loops[i].n1);
2111 t = fold_build2_loc (loc, LT_EXPR, boolean_type_node,
2112 step, build_int_cst (TREE_TYPE (step), 0));
2113 if (TREE_CODE (step) != INTEGER_CST)
2114 {
2115 t1 = unshare_expr (t1);
2116 t1 = force_gimple_operand_gsi (gsi, t1, true, NULL_TREE,
2117 false, GSI_CONTINUE_LINKING);
2118 t2 = unshare_expr (t2);
2119 t2 = force_gimple_operand_gsi (gsi, t2, true, NULL_TREE,
2120 false, GSI_CONTINUE_LINKING);
2121 }
2122 t = fold_build3_loc (loc, COND_EXPR, boolean_type_node,
2123 t, t2, t1);
2124 }
2125 else if (fd->loops[i].cond_code == LT_EXPR)
2126 {
2127 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2128 t = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a,
2129 fd->loops[i].n1);
2130 else
2131 t = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a,
2132 fd->loops[i].n2);
2133 }
2134 else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2135 t = fold_build2_loc (loc, GT_EXPR, boolean_type_node, a,
2136 fd->loops[i].n2);
2137 else
2138 t = fold_build2_loc (loc, LE_EXPR, boolean_type_node, a,
2139 fd->loops[i].n1);
2140 }
2141 if (cond)
2142 cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node, cond, t);
2143 else
2144 cond = t;
2145
2146 off = fold_convert_loc (loc, itype, off);
2147
2148 if (step
2149 || (fd->loops[i].cond_code == LT_EXPR
2150 ? !integer_onep (fd->loops[i].step)
2151 : !integer_minus_onep (fd->loops[i].step)))
2152 {
2153 if (step == NULL_TREE
2154 && TYPE_UNSIGNED (itype)
2155 && fd->loops[i].cond_code == GT_EXPR)
2156 t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype, off,
2157 fold_build1_loc (loc, NEGATE_EXPR, itype,
2158 s));
2159 else
2160 t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype,
2161 orig_off ? orig_off : off, s);
2162 t = fold_build2_loc (loc, EQ_EXPR, boolean_type_node, t,
2163 build_int_cst (itype, 0));
2164 if (integer_zerop (t) && !warned_step)
2165 {
2166 warning_at (loc, 0, "%<depend(sink)%> refers to iteration never "
2167 "in the iteration space");
2168 warned_step = true;
2169 }
2170 cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node,
2171 cond, t);
2172 }
2173
2174 if (i <= fd->collapse - 1 && fd->collapse > 1)
2175 t = fd->loop.v;
2176 else if (counts[i])
2177 t = counts[i];
2178 else
2179 {
2180 t = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
2181 fd->loops[i].v, fd->loops[i].n1);
2182 t = fold_convert_loc (loc, fd->iter_type, t);
2183 }
2184 if (step)
2185 /* We have divided off by step already earlier. */;
2186 else if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
2187 off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off,
2188 fold_build1_loc (loc, NEGATE_EXPR, itype,
2189 s));
2190 else
2191 off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s);
2192 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2193 off = fold_build1_loc (loc, NEGATE_EXPR, itype, off);
2194 off = fold_convert_loc (loc, fd->iter_type, off);
2195 if (i <= fd->collapse - 1 && fd->collapse > 1)
2196 {
2197 if (i)
2198 off = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, coff,
2199 off);
2200 if (i < fd->collapse - 1)
2201 {
2202 coff = fold_build2_loc (loc, MULT_EXPR, fd->iter_type, off,
2203 counts[i]);
2204 continue;
2205 }
2206 }
2207 off = unshare_expr (off);
2208 t = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, t, off);
2209 t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
2210 true, GSI_SAME_STMT);
2211 args.safe_push (t);
2212 }
2213 gimple *g = gimple_build_call_vec (builtin_decl_explicit (sink_ix), args);
2214 gimple_set_location (g, loc);
2215 gsi_insert_before (&gsi2, g, GSI_SAME_STMT);
2216
2217 cond = unshare_expr (cond);
2218 cond = force_gimple_operand_gsi (gsi, cond, true, NULL_TREE, false,
2219 GSI_CONTINUE_LINKING);
2220 gsi_insert_after (gsi, gimple_build_cond_empty (cond), GSI_NEW_STMT);
2221 edge e3 = make_edge (e1->src, e2->dest, EDGE_FALSE_VALUE);
2222 e3->probability = profile_probability::guessed_always ().apply_scale (1, 8);
2223 e1->probability = e3->probability.invert ();
2224 e1->flags = EDGE_TRUE_VALUE;
2225 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src);
2226
2227 *gsi = gsi_after_labels (e2->dest);
2228 }
2229
2230 /* Expand all #pragma omp ordered depend(source) and
2231 #pragma omp ordered depend(sink:...) constructs in the current
2232 #pragma omp for ordered(n) region. */
2233
2234 static void
2235 expand_omp_ordered_source_sink (struct omp_region *region,
2236 struct omp_for_data *fd, tree *counts,
2237 basic_block cont_bb)
2238 {
2239 struct omp_region *inner;
2240 int i;
2241 for (i = fd->collapse - 1; i < fd->ordered; i++)
2242 if (i == fd->collapse - 1 && fd->collapse > 1)
2243 counts[i] = NULL_TREE;
2244 else if (i >= fd->collapse && !cont_bb)
2245 counts[i] = build_zero_cst (fd->iter_type);
2246 else if (!POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))
2247 && integer_onep (fd->loops[i].step))
2248 counts[i] = NULL_TREE;
2249 else
2250 counts[i] = create_tmp_var (fd->iter_type, ".orditer");
2251 tree atype
2252 = build_array_type_nelts (fd->iter_type, fd->ordered - fd->collapse + 1);
2253 counts[fd->ordered] = create_tmp_var (atype, ".orditera");
2254 TREE_ADDRESSABLE (counts[fd->ordered]) = 1;
2255
2256 for (inner = region->inner; inner; inner = inner->next)
2257 if (inner->type == GIMPLE_OMP_ORDERED)
2258 {
2259 gomp_ordered *ord_stmt = inner->ord_stmt;
2260 gimple_stmt_iterator gsi = gsi_for_stmt (ord_stmt);
2261 location_t loc = gimple_location (ord_stmt);
2262 tree c;
2263 for (c = gimple_omp_ordered_clauses (ord_stmt);
2264 c; c = OMP_CLAUSE_CHAIN (c))
2265 if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE)
2266 break;
2267 if (c)
2268 expand_omp_ordered_source (&gsi, fd, counts, loc);
2269 for (c = gimple_omp_ordered_clauses (ord_stmt);
2270 c; c = OMP_CLAUSE_CHAIN (c))
2271 if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
2272 expand_omp_ordered_sink (&gsi, fd, counts, c, loc);
2273 gsi_remove (&gsi, true);
2274 }
2275 }
2276
2277 /* Wrap the body into fd->ordered - fd->collapse loops that aren't
2278 collapsed. */
2279
2280 static basic_block
2281 expand_omp_for_ordered_loops (struct omp_for_data *fd, tree *counts,
2282 basic_block cont_bb, basic_block body_bb,
2283 bool ordered_lastprivate)
2284 {
2285 if (fd->ordered == fd->collapse)
2286 return cont_bb;
2287
2288 if (!cont_bb)
2289 {
2290 gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
2291 for (int i = fd->collapse; i < fd->ordered; i++)
2292 {
2293 tree type = TREE_TYPE (fd->loops[i].v);
2294 tree n1 = fold_convert (type, fd->loops[i].n1);
2295 expand_omp_build_assign (&gsi, fd->loops[i].v, n1);
2296 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
2297 size_int (i - fd->collapse + 1),
2298 NULL_TREE, NULL_TREE);
2299 expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type));
2300 }
2301 return NULL;
2302 }
2303
2304 for (int i = fd->ordered - 1; i >= fd->collapse; i--)
2305 {
2306 tree t, type = TREE_TYPE (fd->loops[i].v);
2307 gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
2308 expand_omp_build_assign (&gsi, fd->loops[i].v,
2309 fold_convert (type, fd->loops[i].n1));
2310 if (counts[i])
2311 expand_omp_build_assign (&gsi, counts[i],
2312 build_zero_cst (fd->iter_type));
2313 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
2314 size_int (i - fd->collapse + 1),
2315 NULL_TREE, NULL_TREE);
2316 expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type));
2317 if (!gsi_end_p (gsi))
2318 gsi_prev (&gsi);
2319 else
2320 gsi = gsi_last_bb (body_bb);
2321 edge e1 = split_block (body_bb, gsi_stmt (gsi));
2322 basic_block new_body = e1->dest;
2323 if (body_bb == cont_bb)
2324 cont_bb = new_body;
2325 edge e2 = NULL;
2326 basic_block new_header;
2327 if (EDGE_COUNT (cont_bb->preds) > 0)
2328 {
2329 gsi = gsi_last_bb (cont_bb);
2330 if (POINTER_TYPE_P (type))
2331 t = fold_build_pointer_plus (fd->loops[i].v,
2332 fold_convert (sizetype,
2333 fd->loops[i].step));
2334 else
2335 t = fold_build2 (PLUS_EXPR, type, fd->loops[i].v,
2336 fold_convert (type, fd->loops[i].step));
2337 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
2338 if (counts[i])
2339 {
2340 t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[i],
2341 build_int_cst (fd->iter_type, 1));
2342 expand_omp_build_assign (&gsi, counts[i], t);
2343 t = counts[i];
2344 }
2345 else
2346 {
2347 t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
2348 fd->loops[i].v, fd->loops[i].n1);
2349 t = fold_convert (fd->iter_type, t);
2350 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
2351 true, GSI_SAME_STMT);
2352 }
2353 aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
2354 size_int (i - fd->collapse + 1),
2355 NULL_TREE, NULL_TREE);
2356 expand_omp_build_assign (&gsi, aref, t);
2357 gsi_prev (&gsi);
2358 e2 = split_block (cont_bb, gsi_stmt (gsi));
2359 new_header = e2->dest;
2360 }
2361 else
2362 new_header = cont_bb;
2363 gsi = gsi_after_labels (new_header);
2364 tree v = force_gimple_operand_gsi (&gsi, fd->loops[i].v, true, NULL_TREE,
2365 true, GSI_SAME_STMT);
2366 tree n2
2367 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loops[i].n2),
2368 true, NULL_TREE, true, GSI_SAME_STMT);
2369 t = build2 (fd->loops[i].cond_code, boolean_type_node, v, n2);
2370 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_NEW_STMT);
2371 edge e3 = split_block (new_header, gsi_stmt (gsi));
2372 cont_bb = e3->dest;
2373 remove_edge (e1);
2374 make_edge (body_bb, new_header, EDGE_FALLTHRU);
2375 e3->flags = EDGE_FALSE_VALUE;
2376 e3->probability = profile_probability::guessed_always ().apply_scale (1, 8);
2377 e1 = make_edge (new_header, new_body, EDGE_TRUE_VALUE);
2378 e1->probability = e3->probability.invert ();
2379
2380 set_immediate_dominator (CDI_DOMINATORS, new_header, body_bb);
2381 set_immediate_dominator (CDI_DOMINATORS, new_body, new_header);
2382
2383 if (e2)
2384 {
2385 struct loop *loop = alloc_loop ();
2386 loop->header = new_header;
2387 loop->latch = e2->src;
2388 add_loop (loop, body_bb->loop_father);
2389 }
2390 }
2391
2392 /* If there are any lastprivate clauses and it is possible some loops
2393 might have zero iterations, ensure all the decls are initialized,
2394 otherwise we could crash evaluating C++ class iterators with lastprivate
2395 clauses. */
2396 bool need_inits = false;
2397 for (int i = fd->collapse; ordered_lastprivate && i < fd->ordered; i++)
2398 if (need_inits)
2399 {
2400 tree type = TREE_TYPE (fd->loops[i].v);
2401 gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
2402 expand_omp_build_assign (&gsi, fd->loops[i].v,
2403 fold_convert (type, fd->loops[i].n1));
2404 }
2405 else
2406 {
2407 tree type = TREE_TYPE (fd->loops[i].v);
2408 tree this_cond = fold_build2 (fd->loops[i].cond_code,
2409 boolean_type_node,
2410 fold_convert (type, fd->loops[i].n1),
2411 fold_convert (type, fd->loops[i].n2));
2412 if (!integer_onep (this_cond))
2413 need_inits = true;
2414 }
2415
2416 return cont_bb;
2417 }
2418
2419 /* A subroutine of expand_omp_for. Generate code for a parallel
2420 loop with any schedule. Given parameters:
2421
2422 for (V = N1; V cond N2; V += STEP) BODY;
2423
2424 where COND is "<" or ">", we generate pseudocode
2425
2426 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
2427 if (more) goto L0; else goto L3;
2428 L0:
2429 V = istart0;
2430 iend = iend0;
2431 L1:
2432 BODY;
2433 V += STEP;
2434 if (V cond iend) goto L1; else goto L2;
2435 L2:
2436 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
2437 L3:
2438
2439 If this is a combined omp parallel loop, instead of the call to
2440 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
2441 If this is gimple_omp_for_combined_p loop, then instead of assigning
2442 V and iend in L0 we assign the first two _looptemp_ clause decls of the
2443 inner GIMPLE_OMP_FOR and V += STEP; and
2444 if (V cond iend) goto L1; else goto L2; are removed.
2445
2446 For collapsed loops, given parameters:
2447 collapse(3)
2448 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
2449 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
2450 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
2451 BODY;
2452
2453 we generate pseudocode
2454
2455 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
2456 if (cond3 is <)
2457 adj = STEP3 - 1;
2458 else
2459 adj = STEP3 + 1;
2460 count3 = (adj + N32 - N31) / STEP3;
2461 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
2462 if (cond2 is <)
2463 adj = STEP2 - 1;
2464 else
2465 adj = STEP2 + 1;
2466 count2 = (adj + N22 - N21) / STEP2;
2467 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
2468 if (cond1 is <)
2469 adj = STEP1 - 1;
2470 else
2471 adj = STEP1 + 1;
2472 count1 = (adj + N12 - N11) / STEP1;
2473 count = count1 * count2 * count3;
2474 goto Z1;
2475 Z0:
2476 count = 0;
2477 Z1:
2478 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
2479 if (more) goto L0; else goto L3;
2480 L0:
2481 V = istart0;
2482 T = V;
2483 V3 = N31 + (T % count3) * STEP3;
2484 T = T / count3;
2485 V2 = N21 + (T % count2) * STEP2;
2486 T = T / count2;
2487 V1 = N11 + T * STEP1;
2488 iend = iend0;
2489 L1:
2490 BODY;
2491 V += 1;
2492 if (V < iend) goto L10; else goto L2;
2493 L10:
2494 V3 += STEP3;
2495 if (V3 cond3 N32) goto L1; else goto L11;
2496 L11:
2497 V3 = N31;
2498 V2 += STEP2;
2499 if (V2 cond2 N22) goto L1; else goto L12;
2500 L12:
2501 V2 = N21;
2502 V1 += STEP1;
2503 goto L1;
2504 L2:
2505 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
2506 L3:
2507
2508 */
2509
2510 static void
2511 expand_omp_for_generic (struct omp_region *region,
2512 struct omp_for_data *fd,
2513 enum built_in_function start_fn,
2514 enum built_in_function next_fn,
2515 gimple *inner_stmt)
2516 {
2517 tree type, istart0, iend0, iend;
2518 tree t, vmain, vback, bias = NULL_TREE;
2519 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
2520 basic_block l2_bb = NULL, l3_bb = NULL;
2521 gimple_stmt_iterator gsi;
2522 gassign *assign_stmt;
2523 bool in_combined_parallel = is_combined_parallel (region);
2524 bool broken_loop = region->cont == NULL;
2525 edge e, ne;
2526 tree *counts = NULL;
2527 int i;
2528 bool ordered_lastprivate = false;
2529
2530 gcc_assert (!broken_loop || !in_combined_parallel);
2531 gcc_assert (fd->iter_type == long_integer_type_node
2532 || !in_combined_parallel);
2533
2534 entry_bb = region->entry;
2535 cont_bb = region->cont;
2536 collapse_bb = NULL;
2537 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
2538 gcc_assert (broken_loop
2539 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
2540 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
2541 l1_bb = single_succ (l0_bb);
2542 if (!broken_loop)
2543 {
2544 l2_bb = create_empty_bb (cont_bb);
2545 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb
2546 || (single_succ_edge (BRANCH_EDGE (cont_bb)->dest)->dest
2547 == l1_bb));
2548 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
2549 }
2550 else
2551 l2_bb = NULL;
2552 l3_bb = BRANCH_EDGE (entry_bb)->dest;
2553 exit_bb = region->exit;
2554
2555 gsi = gsi_last_bb (entry_bb);
2556
2557 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
2558 if (fd->ordered
2559 && omp_find_clause (gimple_omp_for_clauses (gsi_stmt (gsi)),
2560 OMP_CLAUSE_LASTPRIVATE))
2561 ordered_lastprivate = false;
2562 if (fd->collapse > 1 || fd->ordered)
2563 {
2564 int first_zero_iter1 = -1, first_zero_iter2 = -1;
2565 basic_block zero_iter1_bb = NULL, zero_iter2_bb = NULL, l2_dom_bb = NULL;
2566
2567 counts = XALLOCAVEC (tree, fd->ordered ? fd->ordered + 1 : fd->collapse);
2568 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
2569 zero_iter1_bb, first_zero_iter1,
2570 zero_iter2_bb, first_zero_iter2, l2_dom_bb);
2571
2572 if (zero_iter1_bb)
2573 {
2574 /* Some counts[i] vars might be uninitialized if
2575 some loop has zero iterations. But the body shouldn't
2576 be executed in that case, so just avoid uninit warnings. */
2577 for (i = first_zero_iter1;
2578 i < (fd->ordered ? fd->ordered : fd->collapse); i++)
2579 if (SSA_VAR_P (counts[i]))
2580 TREE_NO_WARNING (counts[i]) = 1;
2581 gsi_prev (&gsi);
2582 e = split_block (entry_bb, gsi_stmt (gsi));
2583 entry_bb = e->dest;
2584 make_edge (zero_iter1_bb, entry_bb, EDGE_FALLTHRU);
2585 gsi = gsi_last_bb (entry_bb);
2586 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
2587 get_immediate_dominator (CDI_DOMINATORS,
2588 zero_iter1_bb));
2589 }
2590 if (zero_iter2_bb)
2591 {
2592 /* Some counts[i] vars might be uninitialized if
2593 some loop has zero iterations. But the body shouldn't
2594 be executed in that case, so just avoid uninit warnings. */
2595 for (i = first_zero_iter2; i < fd->ordered; i++)
2596 if (SSA_VAR_P (counts[i]))
2597 TREE_NO_WARNING (counts[i]) = 1;
2598 if (zero_iter1_bb)
2599 make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU);
2600 else
2601 {
2602 gsi_prev (&gsi);
2603 e = split_block (entry_bb, gsi_stmt (gsi));
2604 entry_bb = e->dest;
2605 make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU);
2606 gsi = gsi_last_bb (entry_bb);
2607 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
2608 get_immediate_dominator
2609 (CDI_DOMINATORS, zero_iter2_bb));
2610 }
2611 }
2612 if (fd->collapse == 1)
2613 {
2614 counts[0] = fd->loop.n2;
2615 fd->loop = fd->loops[0];
2616 }
2617 }
2618
2619 type = TREE_TYPE (fd->loop.v);
2620 istart0 = create_tmp_var (fd->iter_type, ".istart0");
2621 iend0 = create_tmp_var (fd->iter_type, ".iend0");
2622 TREE_ADDRESSABLE (istart0) = 1;
2623 TREE_ADDRESSABLE (iend0) = 1;
2624
2625 /* See if we need to bias by LLONG_MIN. */
2626 if (fd->iter_type == long_long_unsigned_type_node
2627 && TREE_CODE (type) == INTEGER_TYPE
2628 && !TYPE_UNSIGNED (type)
2629 && fd->ordered == 0)
2630 {
2631 tree n1, n2;
2632
2633 if (fd->loop.cond_code == LT_EXPR)
2634 {
2635 n1 = fd->loop.n1;
2636 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
2637 }
2638 else
2639 {
2640 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
2641 n2 = fd->loop.n1;
2642 }
2643 if (TREE_CODE (n1) != INTEGER_CST
2644 || TREE_CODE (n2) != INTEGER_CST
2645 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
2646 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
2647 }
2648
2649 gimple_stmt_iterator gsif = gsi;
2650 gsi_prev (&gsif);
2651
2652 tree arr = NULL_TREE;
2653 if (in_combined_parallel)
2654 {
2655 gcc_assert (fd->ordered == 0);
2656 /* In a combined parallel loop, emit a call to
2657 GOMP_loop_foo_next. */
2658 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
2659 build_fold_addr_expr (istart0),
2660 build_fold_addr_expr (iend0));
2661 }
2662 else
2663 {
2664 tree t0, t1, t2, t3, t4;
2665 /* If this is not a combined parallel loop, emit a call to
2666 GOMP_loop_foo_start in ENTRY_BB. */
2667 t4 = build_fold_addr_expr (iend0);
2668 t3 = build_fold_addr_expr (istart0);
2669 if (fd->ordered)
2670 {
2671 t0 = build_int_cst (unsigned_type_node,
2672 fd->ordered - fd->collapse + 1);
2673 arr = create_tmp_var (build_array_type_nelts (fd->iter_type,
2674 fd->ordered
2675 - fd->collapse + 1),
2676 ".omp_counts");
2677 DECL_NAMELESS (arr) = 1;
2678 TREE_ADDRESSABLE (arr) = 1;
2679 TREE_STATIC (arr) = 1;
2680 vec<constructor_elt, va_gc> *v;
2681 vec_alloc (v, fd->ordered - fd->collapse + 1);
2682 int idx;
2683
2684 for (idx = 0; idx < fd->ordered - fd->collapse + 1; idx++)
2685 {
2686 tree c;
2687 if (idx == 0 && fd->collapse > 1)
2688 c = fd->loop.n2;
2689 else
2690 c = counts[idx + fd->collapse - 1];
2691 tree purpose = size_int (idx);
2692 CONSTRUCTOR_APPEND_ELT (v, purpose, c);
2693 if (TREE_CODE (c) != INTEGER_CST)
2694 TREE_STATIC (arr) = 0;
2695 }
2696
2697 DECL_INITIAL (arr) = build_constructor (TREE_TYPE (arr), v);
2698 if (!TREE_STATIC (arr))
2699 force_gimple_operand_gsi (&gsi, build1 (DECL_EXPR,
2700 void_type_node, arr),
2701 true, NULL_TREE, true, GSI_SAME_STMT);
2702 t1 = build_fold_addr_expr (arr);
2703 t2 = NULL_TREE;
2704 }
2705 else
2706 {
2707 t2 = fold_convert (fd->iter_type, fd->loop.step);
2708 t1 = fd->loop.n2;
2709 t0 = fd->loop.n1;
2710 if (gimple_omp_for_combined_into_p (fd->for_stmt))
2711 {
2712 tree innerc
2713 = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
2714 OMP_CLAUSE__LOOPTEMP_);
2715 gcc_assert (innerc);
2716 t0 = OMP_CLAUSE_DECL (innerc);
2717 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
2718 OMP_CLAUSE__LOOPTEMP_);
2719 gcc_assert (innerc);
2720 t1 = OMP_CLAUSE_DECL (innerc);
2721 }
2722 if (POINTER_TYPE_P (TREE_TYPE (t0))
2723 && TYPE_PRECISION (TREE_TYPE (t0))
2724 != TYPE_PRECISION (fd->iter_type))
2725 {
2726 /* Avoid casting pointers to integer of a different size. */
2727 tree itype = signed_type_for (type);
2728 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
2729 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
2730 }
2731 else
2732 {
2733 t1 = fold_convert (fd->iter_type, t1);
2734 t0 = fold_convert (fd->iter_type, t0);
2735 }
2736 if (bias)
2737 {
2738 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
2739 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
2740 }
2741 }
2742 if (fd->iter_type == long_integer_type_node || fd->ordered)
2743 {
2744 if (fd->chunk_size)
2745 {
2746 t = fold_convert (fd->iter_type, fd->chunk_size);
2747 t = omp_adjust_chunk_size (t, fd->simd_schedule);
2748 if (fd->ordered)
2749 t = build_call_expr (builtin_decl_explicit (start_fn),
2750 5, t0, t1, t, t3, t4);
2751 else
2752 t = build_call_expr (builtin_decl_explicit (start_fn),
2753 6, t0, t1, t2, t, t3, t4);
2754 }
2755 else if (fd->ordered)
2756 t = build_call_expr (builtin_decl_explicit (start_fn),
2757 4, t0, t1, t3, t4);
2758 else
2759 t = build_call_expr (builtin_decl_explicit (start_fn),
2760 5, t0, t1, t2, t3, t4);
2761 }
2762 else
2763 {
2764 tree t5;
2765 tree c_bool_type;
2766 tree bfn_decl;
2767
2768 /* The GOMP_loop_ull_*start functions have additional boolean
2769 argument, true for < loops and false for > loops.
2770 In Fortran, the C bool type can be different from
2771 boolean_type_node. */
2772 bfn_decl = builtin_decl_explicit (start_fn);
2773 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
2774 t5 = build_int_cst (c_bool_type,
2775 fd->loop.cond_code == LT_EXPR ? 1 : 0);
2776 if (fd->chunk_size)
2777 {
2778 tree bfn_decl = builtin_decl_explicit (start_fn);
2779 t = fold_convert (fd->iter_type, fd->chunk_size);
2780 t = omp_adjust_chunk_size (t, fd->simd_schedule);
2781 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
2782 }
2783 else
2784 t = build_call_expr (builtin_decl_explicit (start_fn),
2785 6, t5, t0, t1, t2, t3, t4);
2786 }
2787 }
2788 if (TREE_TYPE (t) != boolean_type_node)
2789 t = fold_build2 (NE_EXPR, boolean_type_node,
2790 t, build_int_cst (TREE_TYPE (t), 0));
2791 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
2792 true, GSI_SAME_STMT);
2793 if (arr && !TREE_STATIC (arr))
2794 {
2795 tree clobber = build_constructor (TREE_TYPE (arr), NULL);
2796 TREE_THIS_VOLATILE (clobber) = 1;
2797 gsi_insert_before (&gsi, gimple_build_assign (arr, clobber),
2798 GSI_SAME_STMT);
2799 }
2800 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
2801
2802 /* Remove the GIMPLE_OMP_FOR statement. */
2803 gsi_remove (&gsi, true);
2804
2805 if (gsi_end_p (gsif))
2806 gsif = gsi_after_labels (gsi_bb (gsif));
2807 gsi_next (&gsif);
2808
2809 /* Iteration setup for sequential loop goes in L0_BB. */
2810 tree startvar = fd->loop.v;
2811 tree endvar = NULL_TREE;
2812
2813 if (gimple_omp_for_combined_p (fd->for_stmt))
2814 {
2815 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
2816 && gimple_omp_for_kind (inner_stmt)
2817 == GF_OMP_FOR_KIND_SIMD);
2818 tree innerc = omp_find_clause (gimple_omp_for_clauses (inner_stmt),
2819 OMP_CLAUSE__LOOPTEMP_);
2820 gcc_assert (innerc);
2821 startvar = OMP_CLAUSE_DECL (innerc);
2822 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
2823 OMP_CLAUSE__LOOPTEMP_);
2824 gcc_assert (innerc);
2825 endvar = OMP_CLAUSE_DECL (innerc);
2826 }
2827
2828 gsi = gsi_start_bb (l0_bb);
2829 t = istart0;
2830 if (fd->ordered && fd->collapse == 1)
2831 t = fold_build2 (MULT_EXPR, fd->iter_type, t,
2832 fold_convert (fd->iter_type, fd->loop.step));
2833 else if (bias)
2834 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
2835 if (fd->ordered && fd->collapse == 1)
2836 {
2837 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
2838 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar),
2839 fd->loop.n1, fold_convert (sizetype, t));
2840 else
2841 {
2842 t = fold_convert (TREE_TYPE (startvar), t);
2843 t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar),
2844 fd->loop.n1, t);
2845 }
2846 }
2847 else
2848 {
2849 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
2850 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
2851 t = fold_convert (TREE_TYPE (startvar), t);
2852 }
2853 t = force_gimple_operand_gsi (&gsi, t,
2854 DECL_P (startvar)
2855 && TREE_ADDRESSABLE (startvar),
2856 NULL_TREE, false, GSI_CONTINUE_LINKING);
2857 assign_stmt = gimple_build_assign (startvar, t);
2858 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
2859
2860 t = iend0;
2861 if (fd->ordered && fd->collapse == 1)
2862 t = fold_build2 (MULT_EXPR, fd->iter_type, t,
2863 fold_convert (fd->iter_type, fd->loop.step));
2864 else if (bias)
2865 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
2866 if (fd->ordered && fd->collapse == 1)
2867 {
2868 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
2869 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar),
2870 fd->loop.n1, fold_convert (sizetype, t));
2871 else
2872 {
2873 t = fold_convert (TREE_TYPE (startvar), t);
2874 t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar),
2875 fd->loop.n1, t);
2876 }
2877 }
2878 else
2879 {
2880 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
2881 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
2882 t = fold_convert (TREE_TYPE (startvar), t);
2883 }
2884 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
2885 false, GSI_CONTINUE_LINKING);
2886 if (endvar)
2887 {
2888 assign_stmt = gimple_build_assign (endvar, iend);
2889 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
2890 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
2891 assign_stmt = gimple_build_assign (fd->loop.v, iend);
2892 else
2893 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, iend);
2894 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
2895 }
2896 /* Handle linear clause adjustments. */
2897 tree itercnt = NULL_TREE;
2898 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR)
2899 for (tree c = gimple_omp_for_clauses (fd->for_stmt);
2900 c; c = OMP_CLAUSE_CHAIN (c))
2901 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2902 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
2903 {
2904 tree d = OMP_CLAUSE_DECL (c);
2905 bool is_ref = omp_is_reference (d);
2906 tree t = d, a, dest;
2907 if (is_ref)
2908 t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t);
2909 tree type = TREE_TYPE (t);
2910 if (POINTER_TYPE_P (type))
2911 type = sizetype;
2912 dest = unshare_expr (t);
2913 tree v = create_tmp_var (TREE_TYPE (t), NULL);
2914 expand_omp_build_assign (&gsif, v, t);
2915 if (itercnt == NULL_TREE)
2916 {
2917 itercnt = startvar;
2918 tree n1 = fd->loop.n1;
2919 if (POINTER_TYPE_P (TREE_TYPE (itercnt)))
2920 {
2921 itercnt
2922 = fold_convert (signed_type_for (TREE_TYPE (itercnt)),
2923 itercnt);
2924 n1 = fold_convert (TREE_TYPE (itercnt), n1);
2925 }
2926 itercnt = fold_build2 (MINUS_EXPR, TREE_TYPE (itercnt),
2927 itercnt, n1);
2928 itercnt = fold_build2 (EXACT_DIV_EXPR, TREE_TYPE (itercnt),
2929 itercnt, fd->loop.step);
2930 itercnt = force_gimple_operand_gsi (&gsi, itercnt, true,
2931 NULL_TREE, false,
2932 GSI_CONTINUE_LINKING);
2933 }
2934 a = fold_build2 (MULT_EXPR, type,
2935 fold_convert (type, itercnt),
2936 fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c)));
2937 t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR
2938 : POINTER_PLUS_EXPR, TREE_TYPE (t), v, a);
2939 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
2940 false, GSI_CONTINUE_LINKING);
2941 assign_stmt = gimple_build_assign (dest, t);
2942 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
2943 }
2944 if (fd->collapse > 1)
2945 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
2946
2947 if (fd->ordered)
2948 {
2949 /* Until now, counts array contained number of iterations or
2950 variable containing it for ith loop. From now on, we need
2951 those counts only for collapsed loops, and only for the 2nd
2952 till the last collapsed one. Move those one element earlier,
2953 we'll use counts[fd->collapse - 1] for the first source/sink
2954 iteration counter and so on and counts[fd->ordered]
2955 as the array holding the current counter values for
2956 depend(source). */
2957 if (fd->collapse > 1)
2958 memmove (counts, counts + 1, (fd->collapse - 1) * sizeof (counts[0]));
2959 if (broken_loop)
2960 {
2961 int i;
2962 for (i = fd->collapse; i < fd->ordered; i++)
2963 {
2964 tree type = TREE_TYPE (fd->loops[i].v);
2965 tree this_cond
2966 = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
2967 fold_convert (type, fd->loops[i].n1),
2968 fold_convert (type, fd->loops[i].n2));
2969 if (!integer_onep (this_cond))
2970 break;
2971 }
2972 if (i < fd->ordered)
2973 {
2974 cont_bb
2975 = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
2976 add_bb_to_loop (cont_bb, l1_bb->loop_father);
2977 gimple_stmt_iterator gsi = gsi_after_labels (cont_bb);
2978 gimple *g = gimple_build_omp_continue (fd->loop.v, fd->loop.v);
2979 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2980 make_edge (cont_bb, l3_bb, EDGE_FALLTHRU);
2981 make_edge (cont_bb, l1_bb, 0);
2982 l2_bb = create_empty_bb (cont_bb);
2983 broken_loop = false;
2984 }
2985 }
2986 expand_omp_ordered_source_sink (region, fd, counts, cont_bb);
2987 cont_bb = expand_omp_for_ordered_loops (fd, counts, cont_bb, l1_bb,
2988 ordered_lastprivate);
2989 if (counts[fd->collapse - 1])
2990 {
2991 gcc_assert (fd->collapse == 1);
2992 gsi = gsi_last_bb (l0_bb);
2993 expand_omp_build_assign (&gsi, counts[fd->collapse - 1],
2994 istart0, true);
2995 gsi = gsi_last_bb (cont_bb);
2996 t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[fd->collapse - 1],
2997 build_int_cst (fd->iter_type, 1));
2998 expand_omp_build_assign (&gsi, counts[fd->collapse - 1], t);
2999 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
3000 size_zero_node, NULL_TREE, NULL_TREE);
3001 expand_omp_build_assign (&gsi, aref, counts[fd->collapse - 1]);
3002 t = counts[fd->collapse - 1];
3003 }
3004 else if (fd->collapse > 1)
3005 t = fd->loop.v;
3006 else
3007 {
3008 t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v),
3009 fd->loops[0].v, fd->loops[0].n1);
3010 t = fold_convert (fd->iter_type, t);
3011 }
3012 gsi = gsi_last_bb (l0_bb);
3013 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
3014 size_zero_node, NULL_TREE, NULL_TREE);
3015 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3016 false, GSI_CONTINUE_LINKING);
3017 expand_omp_build_assign (&gsi, aref, t, true);
3018 }
3019
3020 if (!broken_loop)
3021 {
3022 /* Code to control the increment and predicate for the sequential
3023 loop goes in the CONT_BB. */
3024 gsi = gsi_last_bb (cont_bb);
3025 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
3026 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
3027 vmain = gimple_omp_continue_control_use (cont_stmt);
3028 vback = gimple_omp_continue_control_def (cont_stmt);
3029
3030 if (!gimple_omp_for_combined_p (fd->for_stmt))
3031 {
3032 if (POINTER_TYPE_P (type))
3033 t = fold_build_pointer_plus (vmain, fd->loop.step);
3034 else
3035 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3036 t = force_gimple_operand_gsi (&gsi, t,
3037 DECL_P (vback)
3038 && TREE_ADDRESSABLE (vback),
3039 NULL_TREE, true, GSI_SAME_STMT);
3040 assign_stmt = gimple_build_assign (vback, t);
3041 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
3042
3043 if (fd->ordered && counts[fd->collapse - 1] == NULL_TREE)
3044 {
3045 if (fd->collapse > 1)
3046 t = fd->loop.v;
3047 else
3048 {
3049 t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v),
3050 fd->loops[0].v, fd->loops[0].n1);
3051 t = fold_convert (fd->iter_type, t);
3052 }
3053 tree aref = build4 (ARRAY_REF, fd->iter_type,
3054 counts[fd->ordered], size_zero_node,
3055 NULL_TREE, NULL_TREE);
3056 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3057 true, GSI_SAME_STMT);
3058 expand_omp_build_assign (&gsi, aref, t);
3059 }
3060
3061 t = build2 (fd->loop.cond_code, boolean_type_node,
3062 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
3063 iend);
3064 gcond *cond_stmt = gimple_build_cond_empty (t);
3065 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
3066 }
3067
3068 /* Remove GIMPLE_OMP_CONTINUE. */
3069 gsi_remove (&gsi, true);
3070
3071 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
3072 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
3073
3074 /* Emit code to get the next parallel iteration in L2_BB. */
3075 gsi = gsi_start_bb (l2_bb);
3076
3077 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
3078 build_fold_addr_expr (istart0),
3079 build_fold_addr_expr (iend0));
3080 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3081 false, GSI_CONTINUE_LINKING);
3082 if (TREE_TYPE (t) != boolean_type_node)
3083 t = fold_build2 (NE_EXPR, boolean_type_node,
3084 t, build_int_cst (TREE_TYPE (t), 0));
3085 gcond *cond_stmt = gimple_build_cond_empty (t);
3086 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
3087 }
3088
3089 /* Add the loop cleanup function. */
3090 gsi = gsi_last_bb (exit_bb);
3091 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
3092 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
3093 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
3094 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
3095 else
3096 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
3097 gcall *call_stmt = gimple_build_call (t, 0);
3098 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
3099 gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
3100 gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT);
3101 if (fd->ordered)
3102 {
3103 tree arr = counts[fd->ordered];
3104 tree clobber = build_constructor (TREE_TYPE (arr), NULL);
3105 TREE_THIS_VOLATILE (clobber) = 1;
3106 gsi_insert_after (&gsi, gimple_build_assign (arr, clobber),
3107 GSI_SAME_STMT);
3108 }
3109 gsi_remove (&gsi, true);
3110
3111 /* Connect the new blocks. */
3112 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
3113 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
3114
3115 if (!broken_loop)
3116 {
3117 gimple_seq phis;
3118
3119 e = find_edge (cont_bb, l3_bb);
3120 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
3121
3122 phis = phi_nodes (l3_bb);
3123 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
3124 {
3125 gimple *phi = gsi_stmt (gsi);
3126 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
3127 PHI_ARG_DEF_FROM_EDGE (phi, e));
3128 }
3129 remove_edge (e);
3130
3131 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
3132 e = find_edge (cont_bb, l1_bb);
3133 if (e == NULL)
3134 {
3135 e = BRANCH_EDGE (cont_bb);
3136 gcc_assert (single_succ (e->dest) == l1_bb);
3137 }
3138 if (gimple_omp_for_combined_p (fd->for_stmt))
3139 {
3140 remove_edge (e);
3141 e = NULL;
3142 }
3143 else if (fd->collapse > 1)
3144 {
3145 remove_edge (e);
3146 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
3147 }
3148 else
3149 e->flags = EDGE_TRUE_VALUE;
3150 if (e)
3151 {
3152 e->probability = profile_probability::guessed_always ().apply_scale (7, 8);
3153 find_edge (cont_bb, l2_bb)->probability = e->probability.invert ();
3154 }
3155 else
3156 {
3157 e = find_edge (cont_bb, l2_bb);
3158 e->flags = EDGE_FALLTHRU;
3159 }
3160 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
3161
3162 if (gimple_in_ssa_p (cfun))
3163 {
3164 /* Add phis to the outer loop that connect to the phis in the inner,
3165 original loop, and move the loop entry value of the inner phi to
3166 the loop entry value of the outer phi. */
3167 gphi_iterator psi;
3168 for (psi = gsi_start_phis (l3_bb); !gsi_end_p (psi); gsi_next (&psi))
3169 {
3170 source_location locus;
3171 gphi *nphi;
3172 gphi *exit_phi = psi.phi ();
3173
3174 edge l2_to_l3 = find_edge (l2_bb, l3_bb);
3175 tree exit_res = PHI_ARG_DEF_FROM_EDGE (exit_phi, l2_to_l3);
3176
3177 basic_block latch = BRANCH_EDGE (cont_bb)->dest;
3178 edge latch_to_l1 = find_edge (latch, l1_bb);
3179 gphi *inner_phi
3180 = find_phi_with_arg_on_edge (exit_res, latch_to_l1);
3181
3182 tree t = gimple_phi_result (exit_phi);
3183 tree new_res = copy_ssa_name (t, NULL);
3184 nphi = create_phi_node (new_res, l0_bb);
3185
3186 edge l0_to_l1 = find_edge (l0_bb, l1_bb);
3187 t = PHI_ARG_DEF_FROM_EDGE (inner_phi, l0_to_l1);
3188 locus = gimple_phi_arg_location_from_edge (inner_phi, l0_to_l1);
3189 edge entry_to_l0 = find_edge (entry_bb, l0_bb);
3190 add_phi_arg (nphi, t, entry_to_l0, locus);
3191
3192 edge l2_to_l0 = find_edge (l2_bb, l0_bb);
3193 add_phi_arg (nphi, exit_res, l2_to_l0, UNKNOWN_LOCATION);
3194
3195 add_phi_arg (inner_phi, new_res, l0_to_l1, UNKNOWN_LOCATION);
3196 };
3197 }
3198
3199 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
3200 recompute_dominator (CDI_DOMINATORS, l2_bb));
3201 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
3202 recompute_dominator (CDI_DOMINATORS, l3_bb));
3203 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
3204 recompute_dominator (CDI_DOMINATORS, l0_bb));
3205 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
3206 recompute_dominator (CDI_DOMINATORS, l1_bb));
3207
3208 /* We enter expand_omp_for_generic with a loop. This original loop may
3209 have its own loop struct, or it may be part of an outer loop struct
3210 (which may be the fake loop). */
3211 struct loop *outer_loop = entry_bb->loop_father;
3212 bool orig_loop_has_loop_struct = l1_bb->loop_father != outer_loop;
3213
3214 add_bb_to_loop (l2_bb, outer_loop);
3215
3216 /* We've added a new loop around the original loop. Allocate the
3217 corresponding loop struct. */
3218 struct loop *new_loop = alloc_loop ();
3219 new_loop->header = l0_bb;
3220 new_loop->latch = l2_bb;
3221 add_loop (new_loop, outer_loop);
3222
3223 /* Allocate a loop structure for the original loop unless we already
3224 had one. */
3225 if (!orig_loop_has_loop_struct
3226 && !gimple_omp_for_combined_p (fd->for_stmt))
3227 {
3228 struct loop *orig_loop = alloc_loop ();
3229 orig_loop->header = l1_bb;
3230 /* The loop may have multiple latches. */
3231 add_loop (orig_loop, new_loop);
3232 }
3233 }
3234 }
3235
3236 /* A subroutine of expand_omp_for. Generate code for a parallel
3237 loop with static schedule and no specified chunk size. Given
3238 parameters:
3239
3240 for (V = N1; V cond N2; V += STEP) BODY;
3241
3242 where COND is "<" or ">", we generate pseudocode
3243
3244 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
3245 if (cond is <)
3246 adj = STEP - 1;
3247 else
3248 adj = STEP + 1;
3249 if ((__typeof (V)) -1 > 0 && cond is >)
3250 n = -(adj + N2 - N1) / -STEP;
3251 else
3252 n = (adj + N2 - N1) / STEP;
3253 q = n / nthreads;
3254 tt = n % nthreads;
3255 if (threadid < tt) goto L3; else goto L4;
3256 L3:
3257 tt = 0;
3258 q = q + 1;
3259 L4:
3260 s0 = q * threadid + tt;
3261 e0 = s0 + q;
3262 V = s0 * STEP + N1;
3263 if (s0 >= e0) goto L2; else goto L0;
3264 L0:
3265 e = e0 * STEP + N1;
3266 L1:
3267 BODY;
3268 V += STEP;
3269 if (V cond e) goto L1;
3270 L2:
3271 */
3272
3273 static void
3274 expand_omp_for_static_nochunk (struct omp_region *region,
3275 struct omp_for_data *fd,
3276 gimple *inner_stmt)
3277 {
3278 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
3279 tree type, itype, vmain, vback;
3280 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
3281 basic_block body_bb, cont_bb, collapse_bb = NULL;
3282 basic_block fin_bb;
3283 gimple_stmt_iterator gsi;
3284 edge ep;
3285 bool broken_loop = region->cont == NULL;
3286 tree *counts = NULL;
3287 tree n1, n2, step;
3288
3289 itype = type = TREE_TYPE (fd->loop.v);
3290 if (POINTER_TYPE_P (type))
3291 itype = signed_type_for (type);
3292
3293 entry_bb = region->entry;
3294 cont_bb = region->cont;
3295 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3296 fin_bb = BRANCH_EDGE (entry_bb)->dest;
3297 gcc_assert (broken_loop
3298 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
3299 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3300 body_bb = single_succ (seq_start_bb);
3301 if (!broken_loop)
3302 {
3303 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb
3304 || single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb);
3305 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3306 }
3307 exit_bb = region->exit;
3308
3309 /* Iteration space partitioning goes in ENTRY_BB. */
3310 gsi = gsi_last_bb (entry_bb);
3311 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3312
3313 if (fd->collapse > 1)
3314 {
3315 int first_zero_iter = -1, dummy = -1;
3316 basic_block l2_dom_bb = NULL, dummy_bb = NULL;
3317
3318 counts = XALLOCAVEC (tree, fd->collapse);
3319 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
3320 fin_bb, first_zero_iter,
3321 dummy_bb, dummy, l2_dom_bb);
3322 t = NULL_TREE;
3323 }
3324 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
3325 t = integer_one_node;
3326 else
3327 t = fold_binary (fd->loop.cond_code, boolean_type_node,
3328 fold_convert (type, fd->loop.n1),
3329 fold_convert (type, fd->loop.n2));
3330 if (fd->collapse == 1
3331 && TYPE_UNSIGNED (type)
3332 && (t == NULL_TREE || !integer_onep (t)))
3333 {
3334 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
3335 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
3336 true, GSI_SAME_STMT);
3337 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
3338 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
3339 true, GSI_SAME_STMT);
3340 gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
3341 NULL_TREE, NULL_TREE);
3342 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
3343 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
3344 expand_omp_regimplify_p, NULL, NULL)
3345 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
3346 expand_omp_regimplify_p, NULL, NULL))
3347 {
3348 gsi = gsi_for_stmt (cond_stmt);
3349 gimple_regimplify_operands (cond_stmt, &gsi);
3350 }
3351 ep = split_block (entry_bb, cond_stmt);
3352 ep->flags = EDGE_TRUE_VALUE;
3353 entry_bb = ep->dest;
3354 ep->probability = profile_probability::very_likely ();
3355 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
3356 ep->probability = profile_probability::very_unlikely ();
3357 if (gimple_in_ssa_p (cfun))
3358 {
3359 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
3360 for (gphi_iterator gpi = gsi_start_phis (fin_bb);
3361 !gsi_end_p (gpi); gsi_next (&gpi))
3362 {
3363 gphi *phi = gpi.phi ();
3364 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
3365 ep, UNKNOWN_LOCATION);
3366 }
3367 }
3368 gsi = gsi_last_bb (entry_bb);
3369 }
3370
3371 switch (gimple_omp_for_kind (fd->for_stmt))
3372 {
3373 case GF_OMP_FOR_KIND_FOR:
3374 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3375 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3376 break;
3377 case GF_OMP_FOR_KIND_DISTRIBUTE:
3378 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS);
3379 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM);
3380 break;
3381 default:
3382 gcc_unreachable ();
3383 }
3384 nthreads = build_call_expr (nthreads, 0);
3385 nthreads = fold_convert (itype, nthreads);
3386 nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE,
3387 true, GSI_SAME_STMT);
3388 threadid = build_call_expr (threadid, 0);
3389 threadid = fold_convert (itype, threadid);
3390 threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
3391 true, GSI_SAME_STMT);
3392
3393 n1 = fd->loop.n1;
3394 n2 = fd->loop.n2;
3395 step = fd->loop.step;
3396 if (gimple_omp_for_combined_into_p (fd->for_stmt))
3397 {
3398 tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
3399 OMP_CLAUSE__LOOPTEMP_);
3400 gcc_assert (innerc);
3401 n1 = OMP_CLAUSE_DECL (innerc);
3402 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
3403 OMP_CLAUSE__LOOPTEMP_);
3404 gcc_assert (innerc);
3405 n2 = OMP_CLAUSE_DECL (innerc);
3406 }
3407 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
3408 true, NULL_TREE, true, GSI_SAME_STMT);
3409 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
3410 true, NULL_TREE, true, GSI_SAME_STMT);
3411 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
3412 true, NULL_TREE, true, GSI_SAME_STMT);
3413
3414 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
3415 t = fold_build2 (PLUS_EXPR, itype, step, t);
3416 t = fold_build2 (PLUS_EXPR, itype, t, n2);
3417 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
3418 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
3419 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3420 fold_build1 (NEGATE_EXPR, itype, t),
3421 fold_build1 (NEGATE_EXPR, itype, step));
3422 else
3423 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
3424 t = fold_convert (itype, t);
3425 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
3426
3427 q = create_tmp_reg (itype, "q");
3428 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
3429 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
3430 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
3431
3432 tt = create_tmp_reg (itype, "tt");
3433 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
3434 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
3435 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
3436
3437 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
3438 gcond *cond_stmt = gimple_build_cond_empty (t);
3439 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
3440
3441 second_bb = split_block (entry_bb, cond_stmt)->dest;
3442 gsi = gsi_last_bb (second_bb);
3443 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3444
3445 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
3446 GSI_SAME_STMT);
3447 gassign *assign_stmt
3448 = gimple_build_assign (q, PLUS_EXPR, q, build_int_cst (itype, 1));
3449 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
3450
3451 third_bb = split_block (second_bb, assign_stmt)->dest;
3452 gsi = gsi_last_bb (third_bb);
3453 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3454
3455 t = build2 (MULT_EXPR, itype, q, threadid);
3456 t = build2 (PLUS_EXPR, itype, t, tt);
3457 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
3458
3459 t = fold_build2 (PLUS_EXPR, itype, s0, q);
3460 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
3461
3462 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
3463 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3464
3465 /* Remove the GIMPLE_OMP_FOR statement. */
3466 gsi_remove (&gsi, true);
3467
3468 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3469 gsi = gsi_start_bb (seq_start_bb);
3470
3471 tree startvar = fd->loop.v;
3472 tree endvar = NULL_TREE;
3473
3474 if (gimple_omp_for_combined_p (fd->for_stmt))
3475 {
3476 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
3477 ? gimple_omp_parallel_clauses (inner_stmt)
3478 : gimple_omp_for_clauses (inner_stmt);
3479 tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
3480 gcc_assert (innerc);
3481 startvar = OMP_CLAUSE_DECL (innerc);
3482 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
3483 OMP_CLAUSE__LOOPTEMP_);
3484 gcc_assert (innerc);
3485 endvar = OMP_CLAUSE_DECL (innerc);
3486 if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST
3487 && gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
3488 {
3489 int i;
3490 for (i = 1; i < fd->collapse; i++)
3491 {
3492 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
3493 OMP_CLAUSE__LOOPTEMP_);
3494 gcc_assert (innerc);
3495 }
3496 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
3497 OMP_CLAUSE__LOOPTEMP_);
3498 if (innerc)
3499 {
3500 /* If needed (distribute parallel for with lastprivate),
3501 propagate down the total number of iterations. */
3502 tree t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc)),
3503 fd->loop.n2);
3504 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false,
3505 GSI_CONTINUE_LINKING);
3506 assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t);
3507 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
3508 }
3509 }
3510 }
3511 t = fold_convert (itype, s0);
3512 t = fold_build2 (MULT_EXPR, itype, t, step);
3513 if (POINTER_TYPE_P (type))
3514 t = fold_build_pointer_plus (n1, t);
3515 else
3516 t = fold_build2 (PLUS_EXPR, type, t, n1);
3517 t = fold_convert (TREE_TYPE (startvar), t);
3518 t = force_gimple_operand_gsi (&gsi, t,
3519 DECL_P (startvar)
3520 && TREE_ADDRESSABLE (startvar),
3521 NULL_TREE, false, GSI_CONTINUE_LINKING);
3522 assign_stmt = gimple_build_assign (startvar, t);
3523 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
3524
3525 t = fold_convert (itype, e0);
3526 t = fold_build2 (MULT_EXPR, itype, t, step);
3527 if (POINTER_TYPE_P (type))
3528 t = fold_build_pointer_plus (n1, t);
3529 else
3530 t = fold_build2 (PLUS_EXPR, type, t, n1);
3531 t = fold_convert (TREE_TYPE (startvar), t);
3532 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3533 false, GSI_CONTINUE_LINKING);
3534 if (endvar)
3535 {
3536 assign_stmt = gimple_build_assign (endvar, e);
3537 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
3538 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
3539 assign_stmt = gimple_build_assign (fd->loop.v, e);
3540 else
3541 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
3542 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
3543 }
3544 /* Handle linear clause adjustments. */
3545 tree itercnt = NULL_TREE;
3546 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR)
3547 for (tree c = gimple_omp_for_clauses (fd->for_stmt);
3548 c; c = OMP_CLAUSE_CHAIN (c))
3549 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3550 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
3551 {
3552 tree d = OMP_CLAUSE_DECL (c);
3553 bool is_ref = omp_is_reference (d);
3554 tree t = d, a, dest;
3555 if (is_ref)
3556 t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t);
3557 if (itercnt == NULL_TREE)
3558 {
3559 if (gimple_omp_for_combined_into_p (fd->for_stmt))
3560 {
3561 itercnt = fold_build2 (MINUS_EXPR, itype,
3562 fold_convert (itype, n1),
3563 fold_convert (itype, fd->loop.n1));
3564 itercnt = fold_build2 (EXACT_DIV_EXPR, itype, itercnt, step);
3565 itercnt = fold_build2 (PLUS_EXPR, itype, itercnt, s0);
3566 itercnt = force_gimple_operand_gsi (&gsi, itercnt, true,
3567 NULL_TREE, false,
3568 GSI_CONTINUE_LINKING);
3569 }
3570 else
3571 itercnt = s0;
3572 }
3573 tree type = TREE_TYPE (t);
3574 if (POINTER_TYPE_P (type))
3575 type = sizetype;
3576 a = fold_build2 (MULT_EXPR, type,
3577 fold_convert (type, itercnt),
3578 fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c)));
3579 dest = unshare_expr (t);
3580 t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR
3581 : POINTER_PLUS_EXPR, TREE_TYPE (t), t, a);
3582 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3583 false, GSI_CONTINUE_LINKING);
3584 assign_stmt = gimple_build_assign (dest, t);
3585 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
3586 }
3587 if (fd->collapse > 1)
3588 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
3589
3590 if (!broken_loop)
3591 {
3592 /* The code controlling the sequential loop replaces the
3593 GIMPLE_OMP_CONTINUE. */
3594 gsi = gsi_last_bb (cont_bb);
3595 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
3596 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
3597 vmain = gimple_omp_continue_control_use (cont_stmt);
3598 vback = gimple_omp_continue_control_def (cont_stmt);
3599
3600 if (!gimple_omp_for_combined_p (fd->for_stmt))
3601 {
3602 if (POINTER_TYPE_P (type))
3603 t = fold_build_pointer_plus (vmain, step);
3604 else
3605 t = fold_build2 (PLUS_EXPR, type, vmain, step);
3606 t = force_gimple_operand_gsi (&gsi, t,
3607 DECL_P (vback)
3608 && TREE_ADDRESSABLE (vback),
3609 NULL_TREE, true, GSI_SAME_STMT);
3610 assign_stmt = gimple_build_assign (vback, t);
3611 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
3612
3613 t = build2 (fd->loop.cond_code, boolean_type_node,
3614 DECL_P (vback) && TREE_ADDRESSABLE (vback)
3615 ? t : vback, e);
3616 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3617 }
3618
3619 /* Remove the GIMPLE_OMP_CONTINUE statement. */
3620 gsi_remove (&gsi, true);
3621
3622 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
3623 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
3624 }
3625
3626 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
3627 gsi = gsi_last_bb (exit_bb);
3628 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
3629 {
3630 t = gimple_omp_return_lhs (gsi_stmt (gsi));
3631 gsi_insert_after (&gsi, omp_build_barrier (t), GSI_SAME_STMT);
3632 }
3633 gsi_remove (&gsi, true);
3634
3635 /* Connect all the blocks. */
3636 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
3637 ep->probability = profile_probability::guessed_always ().apply_scale (3, 4);
3638 ep = find_edge (entry_bb, second_bb);
3639 ep->flags = EDGE_TRUE_VALUE;
3640 ep->probability = profile_probability::guessed_always ().apply_scale (1, 4);
3641 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
3642 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
3643
3644 if (!broken_loop)
3645 {
3646 ep = find_edge (cont_bb, body_bb);
3647 if (ep == NULL)
3648 {
3649 ep = BRANCH_EDGE (cont_bb);
3650 gcc_assert (single_succ (ep->dest) == body_bb);
3651 }
3652 if (gimple_omp_for_combined_p (fd->for_stmt))
3653 {
3654 remove_edge (ep);
3655 ep = NULL;
3656 }
3657 else if (fd->collapse > 1)
3658 {
3659 remove_edge (ep);
3660 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
3661 }
3662 else
3663 ep->flags = EDGE_TRUE_VALUE;
3664 find_edge (cont_bb, fin_bb)->flags
3665 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
3666 }
3667
3668 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
3669 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
3670 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
3671
3672 set_immediate_dominator (CDI_DOMINATORS, body_bb,
3673 recompute_dominator (CDI_DOMINATORS, body_bb));
3674 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
3675 recompute_dominator (CDI_DOMINATORS, fin_bb));
3676
3677 struct loop *loop = body_bb->loop_father;
3678 if (loop != entry_bb->loop_father)
3679 {
3680 gcc_assert (broken_loop || loop->header == body_bb);
3681 gcc_assert (broken_loop
3682 || loop->latch == region->cont
3683 || single_pred (loop->latch) == region->cont);
3684 return;
3685 }
3686
3687 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
3688 {
3689 loop = alloc_loop ();
3690 loop->header = body_bb;
3691 if (collapse_bb == NULL)
3692 loop->latch = cont_bb;
3693 add_loop (loop, body_bb->loop_father);
3694 }
3695 }
3696
3697 /* Return phi in E->DEST with ARG on edge E. */
3698
3699 static gphi *
3700 find_phi_with_arg_on_edge (tree arg, edge e)
3701 {
3702 basic_block bb = e->dest;
3703
3704 for (gphi_iterator gpi = gsi_start_phis (bb);
3705 !gsi_end_p (gpi);
3706 gsi_next (&gpi))
3707 {
3708 gphi *phi = gpi.phi ();
3709 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == arg)
3710 return phi;
3711 }
3712
3713 return NULL;
3714 }
3715
3716 /* A subroutine of expand_omp_for. Generate code for a parallel
3717 loop with static schedule and a specified chunk size. Given
3718 parameters:
3719
3720 for (V = N1; V cond N2; V += STEP) BODY;
3721
3722 where COND is "<" or ">", we generate pseudocode
3723
3724 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
3725 if (cond is <)
3726 adj = STEP - 1;
3727 else
3728 adj = STEP + 1;
3729 if ((__typeof (V)) -1 > 0 && cond is >)
3730 n = -(adj + N2 - N1) / -STEP;
3731 else
3732 n = (adj + N2 - N1) / STEP;
3733 trip = 0;
3734 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
3735 here so that V is defined
3736 if the loop is not entered
3737 L0:
3738 s0 = (trip * nthreads + threadid) * CHUNK;
3739 e0 = min (s0 + CHUNK, n);
3740 if (s0 < n) goto L1; else goto L4;
3741 L1:
3742 V = s0 * STEP + N1;
3743 e = e0 * STEP + N1;
3744 L2:
3745 BODY;
3746 V += STEP;
3747 if (V cond e) goto L2; else goto L3;
3748 L3:
3749 trip += 1;
3750 goto L0;
3751 L4:
3752 */
3753
3754 static void
3755 expand_omp_for_static_chunk (struct omp_region *region,
3756 struct omp_for_data *fd, gimple *inner_stmt)
3757 {
3758 tree n, s0, e0, e, t;
3759 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
3760 tree type, itype, vmain, vback, vextra;
3761 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
3762 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
3763 gimple_stmt_iterator gsi;
3764 edge se;
3765 bool broken_loop = region->cont == NULL;
3766 tree *counts = NULL;
3767 tree n1, n2, step;
3768
3769 itype = type = TREE_TYPE (fd->loop.v);
3770 if (POINTER_TYPE_P (type))
3771 itype = signed_type_for (type);
3772
3773 entry_bb = region->entry;
3774 se = split_block (entry_bb, last_stmt (entry_bb));
3775 entry_bb = se->src;
3776 iter_part_bb = se->dest;
3777 cont_bb = region->cont;
3778 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
3779 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
3780 gcc_assert (broken_loop
3781 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
3782 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
3783 body_bb = single_succ (seq_start_bb);
3784 if (!broken_loop)
3785 {
3786 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb
3787 || single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb);
3788 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3789 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
3790 }
3791 exit_bb = region->exit;
3792
3793 /* Trip and adjustment setup goes in ENTRY_BB. */
3794 gsi = gsi_last_bb (entry_bb);
3795 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3796
3797 if (fd->collapse > 1)
3798 {
3799 int first_zero_iter = -1, dummy = -1;
3800 basic_block l2_dom_bb = NULL, dummy_bb = NULL;
3801
3802 counts = XALLOCAVEC (tree, fd->collapse);
3803 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
3804 fin_bb, first_zero_iter,
3805 dummy_bb, dummy, l2_dom_bb);
3806 t = NULL_TREE;
3807 }
3808 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
3809 t = integer_one_node;
3810 else
3811 t = fold_binary (fd->loop.cond_code, boolean_type_node,
3812 fold_convert (type, fd->loop.n1),
3813 fold_convert (type, fd->loop.n2));
3814 if (fd->collapse == 1
3815 && TYPE_UNSIGNED (type)
3816 && (t == NULL_TREE || !integer_onep (t)))
3817 {
3818 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
3819 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
3820 true, GSI_SAME_STMT);
3821 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
3822 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
3823 true, GSI_SAME_STMT);
3824 gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
3825 NULL_TREE, NULL_TREE);
3826 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
3827 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
3828 expand_omp_regimplify_p, NULL, NULL)
3829 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
3830 expand_omp_regimplify_p, NULL, NULL))
3831 {
3832 gsi = gsi_for_stmt (cond_stmt);
3833 gimple_regimplify_operands (cond_stmt, &gsi);
3834 }
3835 se = split_block (entry_bb, cond_stmt);
3836 se->flags = EDGE_TRUE_VALUE;
3837 entry_bb = se->dest;
3838 se->probability = profile_probability::very_likely ();
3839 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
3840 se->probability = profile_probability::very_unlikely ();
3841 if (gimple_in_ssa_p (cfun))
3842 {
3843 int dest_idx = find_edge (iter_part_bb, fin_bb)->dest_idx;
3844 for (gphi_iterator gpi = gsi_start_phis (fin_bb);
3845 !gsi_end_p (gpi); gsi_next (&gpi))
3846 {
3847 gphi *phi = gpi.phi ();
3848 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
3849 se, UNKNOWN_LOCATION);
3850 }
3851 }
3852 gsi = gsi_last_bb (entry_bb);
3853 }
3854
3855 switch (gimple_omp_for_kind (fd->for_stmt))
3856 {
3857 case GF_OMP_FOR_KIND_FOR:
3858 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3859 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3860 break;
3861 case GF_OMP_FOR_KIND_DISTRIBUTE:
3862 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS);
3863 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM);
3864 break;
3865 default:
3866 gcc_unreachable ();
3867 }
3868 nthreads = build_call_expr (nthreads, 0);
3869 nthreads = fold_convert (itype, nthreads);
3870 nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE,
3871 true, GSI_SAME_STMT);
3872 threadid = build_call_expr (threadid, 0);
3873 threadid = fold_convert (itype, threadid);
3874 threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
3875 true, GSI_SAME_STMT);
3876
3877 n1 = fd->loop.n1;
3878 n2 = fd->loop.n2;
3879 step = fd->loop.step;
3880 if (gimple_omp_for_combined_into_p (fd->for_stmt))
3881 {
3882 tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
3883 OMP_CLAUSE__LOOPTEMP_);
3884 gcc_assert (innerc);
3885 n1 = OMP_CLAUSE_DECL (innerc);
3886 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
3887 OMP_CLAUSE__LOOPTEMP_);
3888 gcc_assert (innerc);
3889 n2 = OMP_CLAUSE_DECL (innerc);
3890 }
3891 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
3892 true, NULL_TREE, true, GSI_SAME_STMT);
3893 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
3894 true, NULL_TREE, true, GSI_SAME_STMT);
3895 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
3896 true, NULL_TREE, true, GSI_SAME_STMT);
3897 tree chunk_size = fold_convert (itype, fd->chunk_size);
3898 chunk_size = omp_adjust_chunk_size (chunk_size, fd->simd_schedule);
3899 chunk_size
3900 = force_gimple_operand_gsi (&gsi, chunk_size, true, NULL_TREE, true,
3901 GSI_SAME_STMT);
3902
3903 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
3904 t = fold_build2 (PLUS_EXPR, itype, step, t);
3905 t = fold_build2 (PLUS_EXPR, itype, t, n2);
3906 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
3907 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
3908 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3909 fold_build1 (NEGATE_EXPR, itype, t),
3910 fold_build1 (NEGATE_EXPR, itype, step));
3911 else
3912 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
3913 t = fold_convert (itype, t);
3914 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3915 true, GSI_SAME_STMT);
3916
3917 trip_var = create_tmp_reg (itype, ".trip");
3918 if (gimple_in_ssa_p (cfun))
3919 {
3920 trip_init = make_ssa_name (trip_var);
3921 trip_main = make_ssa_name (trip_var);
3922 trip_back = make_ssa_name (trip_var);
3923 }
3924 else
3925 {
3926 trip_init = trip_var;
3927 trip_main = trip_var;
3928 trip_back = trip_var;
3929 }
3930
3931 gassign *assign_stmt
3932 = gimple_build_assign (trip_init, build_int_cst (itype, 0));
3933 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
3934
3935 t = fold_build2 (MULT_EXPR, itype, threadid, chunk_size);
3936 t = fold_build2 (MULT_EXPR, itype, t, step);
3937 if (POINTER_TYPE_P (type))
3938 t = fold_build_pointer_plus (n1, t);
3939 else
3940 t = fold_build2 (PLUS_EXPR, type, t, n1);
3941 vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3942 true, GSI_SAME_STMT);
3943
3944 /* Remove the GIMPLE_OMP_FOR. */
3945 gsi_remove (&gsi, true);
3946
3947 gimple_stmt_iterator gsif = gsi;
3948
3949 /* Iteration space partitioning goes in ITER_PART_BB. */
3950 gsi = gsi_last_bb (iter_part_bb);
3951
3952 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
3953 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
3954 t = fold_build2 (MULT_EXPR, itype, t, chunk_size);
3955 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3956 false, GSI_CONTINUE_LINKING);
3957
3958 t = fold_build2 (PLUS_EXPR, itype, s0, chunk_size);
3959 t = fold_build2 (MIN_EXPR, itype, t, n);
3960 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3961 false, GSI_CONTINUE_LINKING);
3962
3963 t = build2 (LT_EXPR, boolean_type_node, s0, n);
3964 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
3965
3966 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3967 gsi = gsi_start_bb (seq_start_bb);
3968
3969 tree startvar = fd->loop.v;
3970 tree endvar = NULL_TREE;
3971
3972 if (gimple_omp_for_combined_p (fd->for_stmt))
3973 {
3974 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
3975 ? gimple_omp_parallel_clauses (inner_stmt)
3976 : gimple_omp_for_clauses (inner_stmt);
3977 tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
3978 gcc_assert (innerc);
3979 startvar = OMP_CLAUSE_DECL (innerc);
3980 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
3981 OMP_CLAUSE__LOOPTEMP_);
3982 gcc_assert (innerc);
3983 endvar = OMP_CLAUSE_DECL (innerc);
3984 if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST
3985 && gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
3986 {
3987 int i;
3988 for (i = 1; i < fd->collapse; i++)
3989 {
3990 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
3991 OMP_CLAUSE__LOOPTEMP_);
3992 gcc_assert (innerc);
3993 }
3994 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
3995 OMP_CLAUSE__LOOPTEMP_);
3996 if (innerc)
3997 {
3998 /* If needed (distribute parallel for with lastprivate),
3999 propagate down the total number of iterations. */
4000 tree t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc)),
4001 fd->loop.n2);
4002 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false,
4003 GSI_CONTINUE_LINKING);
4004 assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t);
4005 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
4006 }
4007 }
4008 }
4009
4010 t = fold_convert (itype, s0);
4011 t = fold_build2 (MULT_EXPR, itype, t, step);
4012 if (POINTER_TYPE_P (type))
4013 t = fold_build_pointer_plus (n1, t);
4014 else
4015 t = fold_build2 (PLUS_EXPR, type, t, n1);
4016 t = fold_convert (TREE_TYPE (startvar), t);
4017 t = force_gimple_operand_gsi (&gsi, t,
4018 DECL_P (startvar)
4019 && TREE_ADDRESSABLE (startvar),
4020 NULL_TREE, false, GSI_CONTINUE_LINKING);
4021 assign_stmt = gimple_build_assign (startvar, t);
4022 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
4023
4024 t = fold_convert (itype, e0);
4025 t = fold_build2 (MULT_EXPR, itype, t, step);
4026 if (POINTER_TYPE_P (type))
4027 t = fold_build_pointer_plus (n1, t);
4028 else
4029 t = fold_build2 (PLUS_EXPR, type, t, n1);
4030 t = fold_convert (TREE_TYPE (startvar), t);
4031 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4032 false, GSI_CONTINUE_LINKING);
4033 if (endvar)
4034 {
4035 assign_stmt = gimple_build_assign (endvar, e);
4036 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
4037 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
4038 assign_stmt = gimple_build_assign (fd->loop.v, e);
4039 else
4040 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
4041 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
4042 }
4043 /* Handle linear clause adjustments. */
4044 tree itercnt = NULL_TREE, itercntbias = NULL_TREE;
4045 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR)
4046 for (tree c = gimple_omp_for_clauses (fd->for_stmt);
4047 c; c = OMP_CLAUSE_CHAIN (c))
4048 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
4049 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
4050 {
4051 tree d = OMP_CLAUSE_DECL (c);
4052 bool is_ref = omp_is_reference (d);
4053 tree t = d, a, dest;
4054 if (is_ref)
4055 t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t);
4056 tree type = TREE_TYPE (t);
4057 if (POINTER_TYPE_P (type))
4058 type = sizetype;
4059 dest = unshare_expr (t);
4060 tree v = create_tmp_var (TREE_TYPE (t), NULL);
4061 expand_omp_build_assign (&gsif, v, t);
4062 if (itercnt == NULL_TREE)
4063 {
4064 if (gimple_omp_for_combined_into_p (fd->for_stmt))
4065 {
4066 itercntbias
4067 = fold_build2 (MINUS_EXPR, itype, fold_convert (itype, n1),
4068 fold_convert (itype, fd->loop.n1));
4069 itercntbias = fold_build2 (EXACT_DIV_EXPR, itype,
4070 itercntbias, step);
4071 itercntbias
4072 = force_gimple_operand_gsi (&gsif, itercntbias, true,
4073 NULL_TREE, true,
4074 GSI_SAME_STMT);
4075 itercnt = fold_build2 (PLUS_EXPR, itype, itercntbias, s0);
4076 itercnt = force_gimple_operand_gsi (&gsi, itercnt, true,
4077 NULL_TREE, false,
4078 GSI_CONTINUE_LINKING);
4079 }
4080 else
4081 itercnt = s0;
4082 }
4083 a = fold_build2 (MULT_EXPR, type,
4084 fold_convert (type, itercnt),
4085 fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c)));
4086 t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR
4087 : POINTER_PLUS_EXPR, TREE_TYPE (t), v, a);
4088 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4089 false, GSI_CONTINUE_LINKING);
4090 assign_stmt = gimple_build_assign (dest, t);
4091 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
4092 }
4093 if (fd->collapse > 1)
4094 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
4095
4096 if (!broken_loop)
4097 {
4098 /* The code controlling the sequential loop goes in CONT_BB,
4099 replacing the GIMPLE_OMP_CONTINUE. */
4100 gsi = gsi_last_bb (cont_bb);
4101 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
4102 vmain = gimple_omp_continue_control_use (cont_stmt);
4103 vback = gimple_omp_continue_control_def (cont_stmt);
4104
4105 if (!gimple_omp_for_combined_p (fd->for_stmt))
4106 {
4107 if (POINTER_TYPE_P (type))
4108 t = fold_build_pointer_plus (vmain, step);
4109 else
4110 t = fold_build2 (PLUS_EXPR, type, vmain, step);
4111 if (DECL_P (vback) && TREE_ADDRESSABLE (vback))
4112 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4113 true, GSI_SAME_STMT);
4114 assign_stmt = gimple_build_assign (vback, t);
4115 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
4116
4117 if (tree_int_cst_equal (fd->chunk_size, integer_one_node))
4118 t = build2 (EQ_EXPR, boolean_type_node,
4119 build_int_cst (itype, 0),
4120 build_int_cst (itype, 1));
4121 else
4122 t = build2 (fd->loop.cond_code, boolean_type_node,
4123 DECL_P (vback) && TREE_ADDRESSABLE (vback)
4124 ? t : vback, e);
4125 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4126 }
4127
4128 /* Remove GIMPLE_OMP_CONTINUE. */
4129 gsi_remove (&gsi, true);
4130
4131 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
4132 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
4133
4134 /* Trip update code goes into TRIP_UPDATE_BB. */
4135 gsi = gsi_start_bb (trip_update_bb);
4136
4137 t = build_int_cst (itype, 1);
4138 t = build2 (PLUS_EXPR, itype, trip_main, t);
4139 assign_stmt = gimple_build_assign (trip_back, t);
4140 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
4141 }
4142
4143 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4144 gsi = gsi_last_bb (exit_bb);
4145 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4146 {
4147 t = gimple_omp_return_lhs (gsi_stmt (gsi));
4148 gsi_insert_after (&gsi, omp_build_barrier (t), GSI_SAME_STMT);
4149 }
4150 gsi_remove (&gsi, true);
4151
4152 /* Connect the new blocks. */
4153 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4154 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4155
4156 if (!broken_loop)
4157 {
4158 se = find_edge (cont_bb, body_bb);
4159 if (se == NULL)
4160 {
4161 se = BRANCH_EDGE (cont_bb);
4162 gcc_assert (single_succ (se->dest) == body_bb);
4163 }
4164 if (gimple_omp_for_combined_p (fd->for_stmt))
4165 {
4166 remove_edge (se);
4167 se = NULL;
4168 }
4169 else if (fd->collapse > 1)
4170 {
4171 remove_edge (se);
4172 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4173 }
4174 else
4175 se->flags = EDGE_TRUE_VALUE;
4176 find_edge (cont_bb, trip_update_bb)->flags
4177 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
4178
4179 redirect_edge_and_branch (single_succ_edge (trip_update_bb),
4180 iter_part_bb);
4181 }
4182
4183 if (gimple_in_ssa_p (cfun))
4184 {
4185 gphi_iterator psi;
4186 gphi *phi;
4187 edge re, ene;
4188 edge_var_map *vm;
4189 size_t i;
4190
4191 gcc_assert (fd->collapse == 1 && !broken_loop);
4192
4193 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4194 remove arguments of the phi nodes in fin_bb. We need to create
4195 appropriate phi nodes in iter_part_bb instead. */
4196 se = find_edge (iter_part_bb, fin_bb);
4197 re = single_succ_edge (trip_update_bb);
4198 vec<edge_var_map> *head = redirect_edge_var_map_vector (re);
4199 ene = single_succ_edge (entry_bb);
4200
4201 psi = gsi_start_phis (fin_bb);
4202 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
4203 gsi_next (&psi), ++i)
4204 {
4205 gphi *nphi;
4206 source_location locus;
4207
4208 phi = psi.phi ();
4209 if (operand_equal_p (gimple_phi_arg_def (phi, 0),
4210 redirect_edge_var_map_def (vm), 0))
4211 continue;
4212
4213 t = gimple_phi_result (phi);
4214 gcc_assert (t == redirect_edge_var_map_result (vm));
4215
4216 if (!single_pred_p (fin_bb))
4217 t = copy_ssa_name (t, phi);
4218
4219 nphi = create_phi_node (t, iter_part_bb);
4220
4221 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4222 locus = gimple_phi_arg_location_from_edge (phi, se);
4223
4224 /* A special case -- fd->loop.v is not yet computed in
4225 iter_part_bb, we need to use vextra instead. */
4226 if (t == fd->loop.v)
4227 t = vextra;
4228 add_phi_arg (nphi, t, ene, locus);
4229 locus = redirect_edge_var_map_location (vm);
4230 tree back_arg = redirect_edge_var_map_def (vm);
4231 add_phi_arg (nphi, back_arg, re, locus);
4232 edge ce = find_edge (cont_bb, body_bb);
4233 if (ce == NULL)
4234 {
4235 ce = BRANCH_EDGE (cont_bb);
4236 gcc_assert (single_succ (ce->dest) == body_bb);
4237 ce = single_succ_edge (ce->dest);
4238 }
4239 gphi *inner_loop_phi = find_phi_with_arg_on_edge (back_arg, ce);
4240 gcc_assert (inner_loop_phi != NULL);
4241 add_phi_arg (inner_loop_phi, gimple_phi_result (nphi),
4242 find_edge (seq_start_bb, body_bb), locus);
4243
4244 if (!single_pred_p (fin_bb))
4245 add_phi_arg (phi, gimple_phi_result (nphi), se, locus);
4246 }
4247 gcc_assert (gsi_end_p (psi) && (head == NULL || i == head->length ()));
4248 redirect_edge_var_map_clear (re);
4249 if (single_pred_p (fin_bb))
4250 while (1)
4251 {
4252 psi = gsi_start_phis (fin_bb);
4253 if (gsi_end_p (psi))
4254 break;
4255 remove_phi_node (&psi, false);
4256 }
4257
4258 /* Make phi node for trip. */
4259 phi = create_phi_node (trip_main, iter_part_bb);
4260 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4261 UNKNOWN_LOCATION);
4262 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4263 UNKNOWN_LOCATION);
4264 }
4265
4266 if (!broken_loop)
4267 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4268 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4269 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4270 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4271 recompute_dominator (CDI_DOMINATORS, fin_bb));
4272 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4273 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4274 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4275 recompute_dominator (CDI_DOMINATORS, body_bb));
4276
4277 if (!broken_loop)
4278 {
4279 struct loop *loop = body_bb->loop_father;
4280 struct loop *trip_loop = alloc_loop ();
4281 trip_loop->header = iter_part_bb;
4282 trip_loop->latch = trip_update_bb;
4283 add_loop (trip_loop, iter_part_bb->loop_father);
4284
4285 if (loop != entry_bb->loop_father)
4286 {
4287 gcc_assert (loop->header == body_bb);
4288 gcc_assert (loop->latch == region->cont
4289 || single_pred (loop->latch) == region->cont);
4290 trip_loop->inner = loop;
4291 return;
4292 }
4293
4294 if (!gimple_omp_for_combined_p (fd->for_stmt))
4295 {
4296 loop = alloc_loop ();
4297 loop->header = body_bb;
4298 if (collapse_bb == NULL)
4299 loop->latch = cont_bb;
4300 add_loop (loop, trip_loop);
4301 }
4302 }
4303 }
4304
4305 /* A subroutine of expand_omp_for. Generate code for _Cilk_for loop.
4306 Given parameters:
4307 for (V = N1; V cond N2; V += STEP) BODY;
4308
4309 where COND is "<" or ">" or "!=", we generate pseudocode
4310
4311 for (ind_var = low; ind_var < high; ind_var++)
4312 {
4313 V = n1 + (ind_var * STEP)
4314
4315 <BODY>
4316 }
4317
4318 In the above pseudocode, low and high are function parameters of the
4319 child function. In the function below, we are inserting a temp.
4320 variable that will be making a call to two OMP functions that will not be
4321 found in the body of _Cilk_for (since OMP_FOR cannot be mixed
4322 with _Cilk_for). These functions are replaced with low and high
4323 by the function that handles taskreg. */
4324
4325
4326 static void
4327 expand_cilk_for (struct omp_region *region, struct omp_for_data *fd)
4328 {
4329 bool broken_loop = region->cont == NULL;
4330 basic_block entry_bb = region->entry;
4331 basic_block cont_bb = region->cont;
4332
4333 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4334 gcc_assert (broken_loop
4335 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4336 basic_block l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
4337 basic_block l1_bb, l2_bb;
4338
4339 if (!broken_loop)
4340 {
4341 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
4342 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4343 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
4344 l2_bb = BRANCH_EDGE (entry_bb)->dest;
4345 }
4346 else
4347 {
4348 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
4349 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
4350 l2_bb = single_succ (l1_bb);
4351 }
4352 basic_block exit_bb = region->exit;
4353 basic_block l2_dom_bb = NULL;
4354
4355 gimple_stmt_iterator gsi = gsi_last_bb (entry_bb);
4356
4357 /* Below statements until the "tree high_val = ..." are pseudo statements
4358 used to pass information to be used by expand_omp_taskreg.
4359 low_val and high_val will be replaced by the __low and __high
4360 parameter from the child function.
4361
4362 The call_exprs part is a place-holder, it is mainly used
4363 to distinctly identify to the top-level part that this is
4364 where we should put low and high (reasoning given in header
4365 comment). */
4366
4367 gomp_parallel *par_stmt
4368 = as_a <gomp_parallel *> (last_stmt (region->outer->entry));
4369 tree child_fndecl = gimple_omp_parallel_child_fn (par_stmt);
4370 tree t, low_val = NULL_TREE, high_val = NULL_TREE;
4371 for (t = DECL_ARGUMENTS (child_fndecl); t; t = TREE_CHAIN (t))
4372 {
4373 if (id_equal (DECL_NAME (t), "__high"))
4374 high_val = t;
4375 else if (id_equal (DECL_NAME (t), "__low"))
4376 low_val = t;
4377 }
4378 gcc_assert (low_val && high_val);
4379
4380 tree type = TREE_TYPE (low_val);
4381 tree ind_var = create_tmp_reg (type, "__cilk_ind_var");
4382 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4383
4384 /* Not needed in SSA form right now. */
4385 gcc_assert (!gimple_in_ssa_p (cfun));
4386 if (l2_dom_bb == NULL)
4387 l2_dom_bb = l1_bb;
4388
4389 tree n1 = low_val;
4390 tree n2 = high_val;
4391
4392 gimple *stmt = gimple_build_assign (ind_var, n1);
4393
4394 /* Replace the GIMPLE_OMP_FOR statement. */
4395 gsi_replace (&gsi, stmt, true);
4396
4397 if (!broken_loop)
4398 {
4399 /* Code to control the increment goes in the CONT_BB. */
4400 gsi = gsi_last_bb (cont_bb);
4401 stmt = gsi_stmt (gsi);
4402 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4403 stmt = gimple_build_assign (ind_var, PLUS_EXPR, ind_var,
4404 build_one_cst (type));
4405
4406 /* Replace GIMPLE_OMP_CONTINUE. */
4407 gsi_replace (&gsi, stmt, true);
4408 }
4409
4410 /* Emit the condition in L1_BB. */
4411 gsi = gsi_after_labels (l1_bb);
4412 t = fold_build2 (MULT_EXPR, TREE_TYPE (fd->loop.step),
4413 fold_convert (TREE_TYPE (fd->loop.step), ind_var),
4414 fd->loop.step);
4415 if (POINTER_TYPE_P (TREE_TYPE (fd->loop.n1)))
4416 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (fd->loop.n1),
4417 fd->loop.n1, fold_convert (sizetype, t));
4418 else
4419 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loop.n1),
4420 fd->loop.n1, fold_convert (TREE_TYPE (fd->loop.n1), t));
4421 t = fold_convert (TREE_TYPE (fd->loop.v), t);
4422 expand_omp_build_assign (&gsi, fd->loop.v, t);
4423
4424 /* The condition is always '<' since the runtime will fill in the low
4425 and high values. */
4426 stmt = gimple_build_cond (LT_EXPR, ind_var, n2, NULL_TREE, NULL_TREE);
4427 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4428
4429 /* Remove GIMPLE_OMP_RETURN. */
4430 gsi = gsi_last_bb (exit_bb);
4431 gsi_remove (&gsi, true);
4432
4433 /* Connect the new blocks. */
4434 remove_edge (FALLTHRU_EDGE (entry_bb));
4435
4436 edge e, ne;
4437 if (!broken_loop)
4438 {
4439 remove_edge (BRANCH_EDGE (entry_bb));
4440 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
4441
4442 e = BRANCH_EDGE (l1_bb);
4443 ne = FALLTHRU_EDGE (l1_bb);
4444 e->flags = EDGE_TRUE_VALUE;
4445 }
4446 else
4447 {
4448 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4449
4450 ne = single_succ_edge (l1_bb);
4451 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
4452
4453 }
4454 ne->flags = EDGE_FALSE_VALUE;
4455 e->probability = profile_probability::guessed_always ().apply_scale (7, 8);
4456 ne->probability = e->probability.invert ();
4457
4458 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
4459 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
4460 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
4461
4462 if (!broken_loop)
4463 {
4464 struct loop *loop = alloc_loop ();
4465 loop->header = l1_bb;
4466 loop->latch = cont_bb;
4467 add_loop (loop, l1_bb->loop_father);
4468 loop->safelen = INT_MAX;
4469 }
4470
4471 /* Pick the correct library function based on the precision of the
4472 induction variable type. */
4473 tree lib_fun = NULL_TREE;
4474 if (TYPE_PRECISION (type) == 32)
4475 lib_fun = cilk_for_32_fndecl;
4476 else if (TYPE_PRECISION (type) == 64)
4477 lib_fun = cilk_for_64_fndecl;
4478 else
4479 gcc_unreachable ();
4480
4481 gcc_assert (fd->sched_kind == OMP_CLAUSE_SCHEDULE_CILKFOR);
4482
4483 /* WS_ARGS contains the library function flavor to call:
4484 __libcilkrts_cilk_for_64 or __libcilkrts_cilk_for_32), and the
4485 user-defined grain value. If the user does not define one, then zero
4486 is passed in by the parser. */
4487 vec_alloc (region->ws_args, 2);
4488 region->ws_args->quick_push (lib_fun);
4489 region->ws_args->quick_push (fd->chunk_size);
4490 }
4491
4492 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
4493 loop. Given parameters:
4494
4495 for (V = N1; V cond N2; V += STEP) BODY;
4496
4497 where COND is "<" or ">", we generate pseudocode
4498
4499 V = N1;
4500 goto L1;
4501 L0:
4502 BODY;
4503 V += STEP;
4504 L1:
4505 if (V cond N2) goto L0; else goto L2;
4506 L2:
4507
4508 For collapsed loops, given parameters:
4509 collapse(3)
4510 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
4511 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
4512 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
4513 BODY;
4514
4515 we generate pseudocode
4516
4517 if (cond3 is <)
4518 adj = STEP3 - 1;
4519 else
4520 adj = STEP3 + 1;
4521 count3 = (adj + N32 - N31) / STEP3;
4522 if (cond2 is <)
4523 adj = STEP2 - 1;
4524 else
4525 adj = STEP2 + 1;
4526 count2 = (adj + N22 - N21) / STEP2;
4527 if (cond1 is <)
4528 adj = STEP1 - 1;
4529 else
4530 adj = STEP1 + 1;
4531 count1 = (adj + N12 - N11) / STEP1;
4532 count = count1 * count2 * count3;
4533 V = 0;
4534 V1 = N11;
4535 V2 = N21;
4536 V3 = N31;
4537 goto L1;
4538 L0:
4539 BODY;
4540 V += 1;
4541 V3 += STEP3;
4542 V2 += (V3 cond3 N32) ? 0 : STEP2;
4543 V3 = (V3 cond3 N32) ? V3 : N31;
4544 V1 += (V2 cond2 N22) ? 0 : STEP1;
4545 V2 = (V2 cond2 N22) ? V2 : N21;
4546 L1:
4547 if (V < count) goto L0; else goto L2;
4548 L2:
4549
4550 */
4551
4552 static void
4553 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
4554 {
4555 tree type, t;
4556 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
4557 gimple_stmt_iterator gsi;
4558 gimple *stmt;
4559 gcond *cond_stmt;
4560 bool broken_loop = region->cont == NULL;
4561 edge e, ne;
4562 tree *counts = NULL;
4563 int i;
4564 int safelen_int = INT_MAX;
4565 tree safelen = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
4566 OMP_CLAUSE_SAFELEN);
4567 tree simduid = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
4568 OMP_CLAUSE__SIMDUID_);
4569 tree n1, n2;
4570
4571 if (safelen)
4572 {
4573 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
4574 if (TREE_CODE (safelen) != INTEGER_CST)
4575 safelen_int = 0;
4576 else if (tree_fits_uhwi_p (safelen) && tree_to_uhwi (safelen) < INT_MAX)
4577 safelen_int = tree_to_uhwi (safelen);
4578 if (safelen_int == 1)
4579 safelen_int = 0;
4580 }
4581 type = TREE_TYPE (fd->loop.v);
4582 entry_bb = region->entry;
4583 cont_bb = region->cont;
4584 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4585 gcc_assert (broken_loop
4586 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4587 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
4588 if (!broken_loop)
4589 {
4590 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
4591 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4592 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
4593 l2_bb = BRANCH_EDGE (entry_bb)->dest;
4594 }
4595 else
4596 {
4597 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
4598 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
4599 l2_bb = single_succ (l1_bb);
4600 }
4601 exit_bb = region->exit;
4602 l2_dom_bb = NULL;
4603
4604 gsi = gsi_last_bb (entry_bb);
4605
4606 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4607 /* Not needed in SSA form right now. */
4608 gcc_assert (!gimple_in_ssa_p (cfun));
4609 if (fd->collapse > 1)
4610 {
4611 int first_zero_iter = -1, dummy = -1;
4612 basic_block zero_iter_bb = l2_bb, dummy_bb = NULL;
4613
4614 counts = XALLOCAVEC (tree, fd->collapse);
4615 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
4616 zero_iter_bb, first_zero_iter,
4617 dummy_bb, dummy, l2_dom_bb);
4618 }
4619 if (l2_dom_bb == NULL)
4620 l2_dom_bb = l1_bb;
4621
4622 n1 = fd->loop.n1;
4623 n2 = fd->loop.n2;
4624 if (gimple_omp_for_combined_into_p (fd->for_stmt))
4625 {
4626 tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
4627 OMP_CLAUSE__LOOPTEMP_);
4628 gcc_assert (innerc);
4629 n1 = OMP_CLAUSE_DECL (innerc);
4630 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
4631 OMP_CLAUSE__LOOPTEMP_);
4632 gcc_assert (innerc);
4633 n2 = OMP_CLAUSE_DECL (innerc);
4634 }
4635 tree step = fd->loop.step;
4636
4637 bool is_simt = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
4638 OMP_CLAUSE__SIMT_);
4639 if (is_simt)
4640 {
4641 cfun->curr_properties &= ~PROP_gimple_lomp_dev;
4642 is_simt = safelen_int > 1;
4643 }
4644 tree simt_lane = NULL_TREE, simt_maxlane = NULL_TREE;
4645 if (is_simt)
4646 {
4647 simt_lane = create_tmp_var (unsigned_type_node);
4648 gimple *g = gimple_build_call_internal (IFN_GOMP_SIMT_LANE, 0);
4649 gimple_call_set_lhs (g, simt_lane);
4650 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
4651 tree offset = fold_build2 (MULT_EXPR, TREE_TYPE (step), step,
4652 fold_convert (TREE_TYPE (step), simt_lane));
4653 n1 = fold_convert (type, n1);
4654 if (POINTER_TYPE_P (type))
4655 n1 = fold_build_pointer_plus (n1, offset);
4656 else
4657 n1 = fold_build2 (PLUS_EXPR, type, n1, fold_convert (type, offset));
4658
4659 /* Collapsed loops not handled for SIMT yet: limit to one lane only. */
4660 if (fd->collapse > 1)
4661 simt_maxlane = build_one_cst (unsigned_type_node);
4662 else if (safelen_int < omp_max_simt_vf ())
4663 simt_maxlane = build_int_cst (unsigned_type_node, safelen_int);
4664 tree vf
4665 = build_call_expr_internal_loc (UNKNOWN_LOCATION, IFN_GOMP_SIMT_VF,
4666 unsigned_type_node, 0);
4667 if (simt_maxlane)
4668 vf = fold_build2 (MIN_EXPR, unsigned_type_node, vf, simt_maxlane);
4669 vf = fold_convert (TREE_TYPE (step), vf);
4670 step = fold_build2 (MULT_EXPR, TREE_TYPE (step), step, vf);
4671 }
4672
4673 expand_omp_build_assign (&gsi, fd->loop.v, fold_convert (type, n1));
4674 if (fd->collapse > 1)
4675 {
4676 if (gimple_omp_for_combined_into_p (fd->for_stmt))
4677 {
4678 gsi_prev (&gsi);
4679 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
4680 gsi_next (&gsi);
4681 }
4682 else
4683 for (i = 0; i < fd->collapse; i++)
4684 {
4685 tree itype = TREE_TYPE (fd->loops[i].v);
4686 if (POINTER_TYPE_P (itype))
4687 itype = signed_type_for (itype);
4688 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
4689 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
4690 }
4691 }
4692
4693 /* Remove the GIMPLE_OMP_FOR statement. */
4694 gsi_remove (&gsi, true);
4695
4696 if (!broken_loop)
4697 {
4698 /* Code to control the increment goes in the CONT_BB. */
4699 gsi = gsi_last_bb (cont_bb);
4700 stmt = gsi_stmt (gsi);
4701 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4702
4703 if (POINTER_TYPE_P (type))
4704 t = fold_build_pointer_plus (fd->loop.v, step);
4705 else
4706 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, step);
4707 expand_omp_build_assign (&gsi, fd->loop.v, t);
4708
4709 if (fd->collapse > 1)
4710 {
4711 i = fd->collapse - 1;
4712 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
4713 {
4714 t = fold_convert (sizetype, fd->loops[i].step);
4715 t = fold_build_pointer_plus (fd->loops[i].v, t);
4716 }
4717 else
4718 {
4719 t = fold_convert (TREE_TYPE (fd->loops[i].v),
4720 fd->loops[i].step);
4721 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
4722 fd->loops[i].v, t);
4723 }
4724 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
4725
4726 for (i = fd->collapse - 1; i > 0; i--)
4727 {
4728 tree itype = TREE_TYPE (fd->loops[i].v);
4729 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
4730 if (POINTER_TYPE_P (itype2))
4731 itype2 = signed_type_for (itype2);
4732 t = build3 (COND_EXPR, itype2,
4733 build2 (fd->loops[i].cond_code, boolean_type_node,
4734 fd->loops[i].v,
4735 fold_convert (itype, fd->loops[i].n2)),
4736 build_int_cst (itype2, 0),
4737 fold_convert (itype2, fd->loops[i - 1].step));
4738 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
4739 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
4740 else
4741 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
4742 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
4743
4744 t = build3 (COND_EXPR, itype,
4745 build2 (fd->loops[i].cond_code, boolean_type_node,
4746 fd->loops[i].v,
4747 fold_convert (itype, fd->loops[i].n2)),
4748 fd->loops[i].v,
4749 fold_convert (itype, fd->loops[i].n1));
4750 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
4751 }
4752 }
4753
4754 /* Remove GIMPLE_OMP_CONTINUE. */
4755 gsi_remove (&gsi, true);
4756 }
4757
4758 /* Emit the condition in L1_BB. */
4759 gsi = gsi_start_bb (l1_bb);
4760
4761 t = fold_convert (type, n2);
4762 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4763 false, GSI_CONTINUE_LINKING);
4764 tree v = fd->loop.v;
4765 if (DECL_P (v) && TREE_ADDRESSABLE (v))
4766 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
4767 false, GSI_CONTINUE_LINKING);
4768 t = build2 (fd->loop.cond_code, boolean_type_node, v, t);
4769 cond_stmt = gimple_build_cond_empty (t);
4770 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
4771 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p,
4772 NULL, NULL)
4773 || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p,
4774 NULL, NULL))
4775 {
4776 gsi = gsi_for_stmt (cond_stmt);
4777 gimple_regimplify_operands (cond_stmt, &gsi);
4778 }
4779
4780 /* Add 'V -= STEP * (SIMT_VF - 1)' after the loop. */
4781 if (is_simt)
4782 {
4783 gsi = gsi_start_bb (l2_bb);
4784 step = fold_build2 (MINUS_EXPR, TREE_TYPE (step), fd->loop.step, step);
4785 if (POINTER_TYPE_P (type))
4786 t = fold_build_pointer_plus (fd->loop.v, step);
4787 else
4788 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, step);
4789 expand_omp_build_assign (&gsi, fd->loop.v, t);
4790 }
4791
4792 /* Remove GIMPLE_OMP_RETURN. */
4793 gsi = gsi_last_bb (exit_bb);
4794 gsi_remove (&gsi, true);
4795
4796 /* Connect the new blocks. */
4797 remove_edge (FALLTHRU_EDGE (entry_bb));
4798
4799 if (!broken_loop)
4800 {
4801 remove_edge (BRANCH_EDGE (entry_bb));
4802 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
4803
4804 e = BRANCH_EDGE (l1_bb);
4805 ne = FALLTHRU_EDGE (l1_bb);
4806 e->flags = EDGE_TRUE_VALUE;
4807 }
4808 else
4809 {
4810 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4811
4812 ne = single_succ_edge (l1_bb);
4813 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
4814
4815 }
4816 ne->flags = EDGE_FALSE_VALUE;
4817 e->probability = profile_probability::guessed_always ().apply_scale (7, 8);
4818 ne->probability = e->probability.invert ();
4819
4820 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
4821 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
4822
4823 if (simt_maxlane)
4824 {
4825 cond_stmt = gimple_build_cond (LT_EXPR, simt_lane, simt_maxlane,
4826 NULL_TREE, NULL_TREE);
4827 gsi = gsi_last_bb (entry_bb);
4828 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
4829 make_edge (entry_bb, l2_bb, EDGE_FALSE_VALUE);
4830 FALLTHRU_EDGE (entry_bb)->flags = EDGE_TRUE_VALUE;
4831 FALLTHRU_EDGE (entry_bb)->probability
4832 = profile_probability::guessed_always ().apply_scale (7, 8);
4833 BRANCH_EDGE (entry_bb)->probability
4834 = FALLTHRU_EDGE (entry_bb)->probability.invert ();
4835 l2_dom_bb = entry_bb;
4836 }
4837 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
4838
4839 if (!broken_loop)
4840 {
4841 struct loop *loop = alloc_loop ();
4842 loop->header = l1_bb;
4843 loop->latch = cont_bb;
4844 add_loop (loop, l1_bb->loop_father);
4845 loop->safelen = safelen_int;
4846 if (simduid)
4847 {
4848 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
4849 cfun->has_simduid_loops = true;
4850 }
4851 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
4852 the loop. */
4853 if ((flag_tree_loop_vectorize
4854 || !global_options_set.x_flag_tree_loop_vectorize)
4855 && flag_tree_loop_optimize
4856 && loop->safelen > 1)
4857 {
4858 loop->force_vectorize = true;
4859 cfun->has_force_vectorize_loops = true;
4860 }
4861 }
4862 else if (simduid)
4863 cfun->has_simduid_loops = true;
4864 }
4865
4866 /* Taskloop construct is represented after gimplification with
4867 two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched
4868 in between them. This routine expands the outer GIMPLE_OMP_FOR,
4869 which should just compute all the needed loop temporaries
4870 for GIMPLE_OMP_TASK. */
4871
4872 static void
4873 expand_omp_taskloop_for_outer (struct omp_region *region,
4874 struct omp_for_data *fd,
4875 gimple *inner_stmt)
4876 {
4877 tree type, bias = NULL_TREE;
4878 basic_block entry_bb, cont_bb, exit_bb;
4879 gimple_stmt_iterator gsi;
4880 gassign *assign_stmt;
4881 tree *counts = NULL;
4882 int i;
4883
4884 gcc_assert (inner_stmt);
4885 gcc_assert (region->cont);
4886 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_TASK
4887 && gimple_omp_task_taskloop_p (inner_stmt));
4888 type = TREE_TYPE (fd->loop.v);
4889
4890 /* See if we need to bias by LLONG_MIN. */
4891 if (fd->iter_type == long_long_unsigned_type_node
4892 && TREE_CODE (type) == INTEGER_TYPE
4893 && !TYPE_UNSIGNED (type))
4894 {
4895 tree n1, n2;
4896
4897 if (fd->loop.cond_code == LT_EXPR)
4898 {
4899 n1 = fd->loop.n1;
4900 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
4901 }
4902 else
4903 {
4904 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
4905 n2 = fd->loop.n1;
4906 }
4907 if (TREE_CODE (n1) != INTEGER_CST
4908 || TREE_CODE (n2) != INTEGER_CST
4909 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
4910 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
4911 }
4912
4913 entry_bb = region->entry;
4914 cont_bb = region->cont;
4915 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4916 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4917 exit_bb = region->exit;
4918
4919 gsi = gsi_last_bb (entry_bb);
4920 gimple *for_stmt = gsi_stmt (gsi);
4921 gcc_assert (gimple_code (for_stmt) == GIMPLE_OMP_FOR);
4922 if (fd->collapse > 1)
4923 {
4924 int first_zero_iter = -1, dummy = -1;
4925 basic_block zero_iter_bb = NULL, dummy_bb = NULL, l2_dom_bb = NULL;
4926
4927 counts = XALLOCAVEC (tree, fd->collapse);
4928 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
4929 zero_iter_bb, first_zero_iter,
4930 dummy_bb, dummy, l2_dom_bb);
4931
4932 if (zero_iter_bb)
4933 {
4934 /* Some counts[i] vars might be uninitialized if
4935 some loop has zero iterations. But the body shouldn't
4936 be executed in that case, so just avoid uninit warnings. */
4937 for (i = first_zero_iter; i < fd->collapse; i++)
4938 if (SSA_VAR_P (counts[i]))
4939 TREE_NO_WARNING (counts[i]) = 1;
4940 gsi_prev (&gsi);
4941 edge e = split_block (entry_bb, gsi_stmt (gsi));
4942 entry_bb = e->dest;
4943 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
4944 gsi = gsi_last_bb (entry_bb);
4945 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
4946 get_immediate_dominator (CDI_DOMINATORS,
4947 zero_iter_bb));
4948 }
4949 }
4950
4951 tree t0, t1;
4952 t1 = fd->loop.n2;
4953 t0 = fd->loop.n1;
4954 if (POINTER_TYPE_P (TREE_TYPE (t0))
4955 && TYPE_PRECISION (TREE_TYPE (t0))
4956 != TYPE_PRECISION (fd->iter_type))
4957 {
4958 /* Avoid casting pointers to integer of a different size. */
4959 tree itype = signed_type_for (type);
4960 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
4961 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
4962 }
4963 else
4964 {
4965 t1 = fold_convert (fd->iter_type, t1);
4966 t0 = fold_convert (fd->iter_type, t0);
4967 }
4968 if (bias)
4969 {
4970 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
4971 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
4972 }
4973
4974 tree innerc = omp_find_clause (gimple_omp_task_clauses (inner_stmt),
4975 OMP_CLAUSE__LOOPTEMP_);
4976 gcc_assert (innerc);
4977 tree startvar = OMP_CLAUSE_DECL (innerc);
4978 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_);
4979 gcc_assert (innerc);
4980 tree endvar = OMP_CLAUSE_DECL (innerc);
4981 if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
4982 {
4983 gcc_assert (innerc);
4984 for (i = 1; i < fd->collapse; i++)
4985 {
4986 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
4987 OMP_CLAUSE__LOOPTEMP_);
4988 gcc_assert (innerc);
4989 }
4990 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
4991 OMP_CLAUSE__LOOPTEMP_);
4992 if (innerc)
4993 {
4994 /* If needed (inner taskloop has lastprivate clause), propagate
4995 down the total number of iterations. */
4996 tree t = force_gimple_operand_gsi (&gsi, fd->loop.n2, false,
4997 NULL_TREE, false,
4998 GSI_CONTINUE_LINKING);
4999 assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t);
5000 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5001 }
5002 }
5003
5004 t0 = force_gimple_operand_gsi (&gsi, t0, false, NULL_TREE, false,
5005 GSI_CONTINUE_LINKING);
5006 assign_stmt = gimple_build_assign (startvar, t0);
5007 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5008
5009 t1 = force_gimple_operand_gsi (&gsi, t1, false, NULL_TREE, false,
5010 GSI_CONTINUE_LINKING);
5011 assign_stmt = gimple_build_assign (endvar, t1);
5012 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5013 if (fd->collapse > 1)
5014 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
5015
5016 /* Remove the GIMPLE_OMP_FOR statement. */
5017 gsi = gsi_for_stmt (for_stmt);
5018 gsi_remove (&gsi, true);
5019
5020 gsi = gsi_last_bb (cont_bb);
5021 gsi_remove (&gsi, true);
5022
5023 gsi = gsi_last_bb (exit_bb);
5024 gsi_remove (&gsi, true);
5025
5026 FALLTHRU_EDGE (entry_bb)->probability = profile_probability::always ();
5027 remove_edge (BRANCH_EDGE (entry_bb));
5028 FALLTHRU_EDGE (cont_bb)->probability = profile_probability::always ();
5029 remove_edge (BRANCH_EDGE (cont_bb));
5030 set_immediate_dominator (CDI_DOMINATORS, exit_bb, cont_bb);
5031 set_immediate_dominator (CDI_DOMINATORS, region->entry,
5032 recompute_dominator (CDI_DOMINATORS, region->entry));
5033 }
5034
5035 /* Taskloop construct is represented after gimplification with
5036 two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched
5037 in between them. This routine expands the inner GIMPLE_OMP_FOR.
5038 GOMP_taskloop{,_ull} function arranges for each task to be given just
5039 a single range of iterations. */
5040
5041 static void
5042 expand_omp_taskloop_for_inner (struct omp_region *region,
5043 struct omp_for_data *fd,
5044 gimple *inner_stmt)
5045 {
5046 tree e, t, type, itype, vmain, vback, bias = NULL_TREE;
5047 basic_block entry_bb, exit_bb, body_bb, cont_bb, collapse_bb = NULL;
5048 basic_block fin_bb;
5049 gimple_stmt_iterator gsi;
5050 edge ep;
5051 bool broken_loop = region->cont == NULL;
5052 tree *counts = NULL;
5053 tree n1, n2, step;
5054
5055 itype = type = TREE_TYPE (fd->loop.v);
5056 if (POINTER_TYPE_P (type))
5057 itype = signed_type_for (type);
5058
5059 /* See if we need to bias by LLONG_MIN. */
5060 if (fd->iter_type == long_long_unsigned_type_node
5061 && TREE_CODE (type) == INTEGER_TYPE
5062 && !TYPE_UNSIGNED (type))
5063 {
5064 tree n1, n2;
5065
5066 if (fd->loop.cond_code == LT_EXPR)
5067 {
5068 n1 = fd->loop.n1;
5069 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
5070 }
5071 else
5072 {
5073 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
5074 n2 = fd->loop.n1;
5075 }
5076 if (TREE_CODE (n1) != INTEGER_CST
5077 || TREE_CODE (n2) != INTEGER_CST
5078 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
5079 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
5080 }
5081
5082 entry_bb = region->entry;
5083 cont_bb = region->cont;
5084 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5085 fin_bb = BRANCH_EDGE (entry_bb)->dest;
5086 gcc_assert (broken_loop
5087 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
5088 body_bb = FALLTHRU_EDGE (entry_bb)->dest;
5089 if (!broken_loop)
5090 {
5091 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
5092 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5093 }
5094 exit_bb = region->exit;
5095
5096 /* Iteration space partitioning goes in ENTRY_BB. */
5097 gsi = gsi_last_bb (entry_bb);
5098 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5099
5100 if (fd->collapse > 1)
5101 {
5102 int first_zero_iter = -1, dummy = -1;
5103 basic_block l2_dom_bb = NULL, dummy_bb = NULL;
5104
5105 counts = XALLOCAVEC (tree, fd->collapse);
5106 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5107 fin_bb, first_zero_iter,
5108 dummy_bb, dummy, l2_dom_bb);
5109 t = NULL_TREE;
5110 }
5111 else
5112 t = integer_one_node;
5113
5114 step = fd->loop.step;
5115 tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
5116 OMP_CLAUSE__LOOPTEMP_);
5117 gcc_assert (innerc);
5118 n1 = OMP_CLAUSE_DECL (innerc);
5119 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_);
5120 gcc_assert (innerc);
5121 n2 = OMP_CLAUSE_DECL (innerc);
5122 if (bias)
5123 {
5124 n1 = fold_build2 (PLUS_EXPR, fd->iter_type, n1, bias);
5125 n2 = fold_build2 (PLUS_EXPR, fd->iter_type, n2, bias);
5126 }
5127 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
5128 true, NULL_TREE, true, GSI_SAME_STMT);
5129 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
5130 true, NULL_TREE, true, GSI_SAME_STMT);
5131 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
5132 true, NULL_TREE, true, GSI_SAME_STMT);
5133
5134 tree startvar = fd->loop.v;
5135 tree endvar = NULL_TREE;
5136
5137 if (gimple_omp_for_combined_p (fd->for_stmt))
5138 {
5139 tree clauses = gimple_omp_for_clauses (inner_stmt);
5140 tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5141 gcc_assert (innerc);
5142 startvar = OMP_CLAUSE_DECL (innerc);
5143 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
5144 OMP_CLAUSE__LOOPTEMP_);
5145 gcc_assert (innerc);
5146 endvar = OMP_CLAUSE_DECL (innerc);
5147 }
5148 t = fold_convert (TREE_TYPE (startvar), n1);
5149 t = force_gimple_operand_gsi (&gsi, t,
5150 DECL_P (startvar)
5151 && TREE_ADDRESSABLE (startvar),
5152 NULL_TREE, false, GSI_CONTINUE_LINKING);
5153 gimple *assign_stmt = gimple_build_assign (startvar, t);
5154 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5155
5156 t = fold_convert (TREE_TYPE (startvar), n2);
5157 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5158 false, GSI_CONTINUE_LINKING);
5159 if (endvar)
5160 {
5161 assign_stmt = gimple_build_assign (endvar, e);
5162 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5163 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
5164 assign_stmt = gimple_build_assign (fd->loop.v, e);
5165 else
5166 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
5167 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5168 }
5169 if (fd->collapse > 1)
5170 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
5171
5172 if (!broken_loop)
5173 {
5174 /* The code controlling the sequential loop replaces the
5175 GIMPLE_OMP_CONTINUE. */
5176 gsi = gsi_last_bb (cont_bb);
5177 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
5178 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
5179 vmain = gimple_omp_continue_control_use (cont_stmt);
5180 vback = gimple_omp_continue_control_def (cont_stmt);
5181
5182 if (!gimple_omp_for_combined_p (fd->for_stmt))
5183 {
5184 if (POINTER_TYPE_P (type))
5185 t = fold_build_pointer_plus (vmain, step);
5186 else
5187 t = fold_build2 (PLUS_EXPR, type, vmain, step);
5188 t = force_gimple_operand_gsi (&gsi, t,
5189 DECL_P (vback)
5190 && TREE_ADDRESSABLE (vback),
5191 NULL_TREE, true, GSI_SAME_STMT);
5192 assign_stmt = gimple_build_assign (vback, t);
5193 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
5194
5195 t = build2 (fd->loop.cond_code, boolean_type_node,
5196 DECL_P (vback) && TREE_ADDRESSABLE (vback)
5197 ? t : vback, e);
5198 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5199 }
5200
5201 /* Remove the GIMPLE_OMP_CONTINUE statement. */
5202 gsi_remove (&gsi, true);
5203
5204 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
5205 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
5206 }
5207
5208 /* Remove the GIMPLE_OMP_FOR statement. */
5209 gsi = gsi_for_stmt (fd->for_stmt);
5210 gsi_remove (&gsi, true);
5211
5212 /* Remove the GIMPLE_OMP_RETURN statement. */
5213 gsi = gsi_last_bb (exit_bb);
5214 gsi_remove (&gsi, true);
5215
5216 FALLTHRU_EDGE (entry_bb)->probability = profile_probability::always ();
5217 if (!broken_loop)
5218 remove_edge (BRANCH_EDGE (entry_bb));
5219 else
5220 {
5221 remove_edge_and_dominated_blocks (BRANCH_EDGE (entry_bb));
5222 region->outer->cont = NULL;
5223 }
5224
5225 /* Connect all the blocks. */
5226 if (!broken_loop)
5227 {
5228 ep = find_edge (cont_bb, body_bb);
5229 if (gimple_omp_for_combined_p (fd->for_stmt))
5230 {
5231 remove_edge (ep);
5232 ep = NULL;
5233 }
5234 else if (fd->collapse > 1)
5235 {
5236 remove_edge (ep);
5237 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
5238 }
5239 else
5240 ep->flags = EDGE_TRUE_VALUE;
5241 find_edge (cont_bb, fin_bb)->flags
5242 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
5243 }
5244
5245 set_immediate_dominator (CDI_DOMINATORS, body_bb,
5246 recompute_dominator (CDI_DOMINATORS, body_bb));
5247 if (!broken_loop)
5248 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
5249 recompute_dominator (CDI_DOMINATORS, fin_bb));
5250
5251 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
5252 {
5253 struct loop *loop = alloc_loop ();
5254 loop->header = body_bb;
5255 if (collapse_bb == NULL)
5256 loop->latch = cont_bb;
5257 add_loop (loop, body_bb->loop_father);
5258 }
5259 }
5260
5261 /* A subroutine of expand_omp_for. Generate code for an OpenACC
5262 partitioned loop. The lowering here is abstracted, in that the
5263 loop parameters are passed through internal functions, which are
5264 further lowered by oacc_device_lower, once we get to the target
5265 compiler. The loop is of the form:
5266
5267 for (V = B; V LTGT E; V += S) {BODY}
5268
5269 where LTGT is < or >. We may have a specified chunking size, CHUNKING
5270 (constant 0 for no chunking) and we will have a GWV partitioning
5271 mask, specifying dimensions over which the loop is to be
5272 partitioned (see note below). We generate code that looks like
5273 (this ignores tiling):
5274
5275 <entry_bb> [incoming FALL->body, BRANCH->exit]
5276 typedef signedintify (typeof (V)) T; // underlying signed integral type
5277 T range = E - B;
5278 T chunk_no = 0;
5279 T DIR = LTGT == '<' ? +1 : -1;
5280 T chunk_max = GOACC_LOOP_CHUNK (dir, range, S, CHUNK_SIZE, GWV);
5281 T step = GOACC_LOOP_STEP (dir, range, S, CHUNK_SIZE, GWV);
5282
5283 <head_bb> [created by splitting end of entry_bb]
5284 T offset = GOACC_LOOP_OFFSET (dir, range, S, CHUNK_SIZE, GWV, chunk_no);
5285 T bound = GOACC_LOOP_BOUND (dir, range, S, CHUNK_SIZE, GWV, offset);
5286 if (!(offset LTGT bound)) goto bottom_bb;
5287
5288 <body_bb> [incoming]
5289 V = B + offset;
5290 {BODY}
5291
5292 <cont_bb> [incoming, may == body_bb FALL->exit_bb, BRANCH->body_bb]
5293 offset += step;
5294 if (offset LTGT bound) goto body_bb; [*]
5295
5296 <bottom_bb> [created by splitting start of exit_bb] insert BRANCH->head_bb
5297 chunk_no++;
5298 if (chunk < chunk_max) goto head_bb;
5299
5300 <exit_bb> [incoming]
5301 V = B + ((range -/+ 1) / S +/- 1) * S [*]
5302
5303 [*] Needed if V live at end of loop. */
5304
5305 static void
5306 expand_oacc_for (struct omp_region *region, struct omp_for_data *fd)
5307 {
5308 tree v = fd->loop.v;
5309 enum tree_code cond_code = fd->loop.cond_code;
5310 enum tree_code plus_code = PLUS_EXPR;
5311
5312 tree chunk_size = integer_minus_one_node;
5313 tree gwv = integer_zero_node;
5314 tree iter_type = TREE_TYPE (v);
5315 tree diff_type = iter_type;
5316 tree plus_type = iter_type;
5317 struct oacc_collapse *counts = NULL;
5318
5319 gcc_checking_assert (gimple_omp_for_kind (fd->for_stmt)
5320 == GF_OMP_FOR_KIND_OACC_LOOP);
5321 gcc_assert (!gimple_omp_for_combined_into_p (fd->for_stmt));
5322 gcc_assert (cond_code == LT_EXPR || cond_code == GT_EXPR);
5323
5324 if (POINTER_TYPE_P (iter_type))
5325 {
5326 plus_code = POINTER_PLUS_EXPR;
5327 plus_type = sizetype;
5328 }
5329 if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type))
5330 diff_type = signed_type_for (diff_type);
5331 if (TYPE_PRECISION (diff_type) < TYPE_PRECISION (integer_type_node))
5332 diff_type = integer_type_node;
5333
5334 basic_block entry_bb = region->entry; /* BB ending in OMP_FOR */
5335 basic_block exit_bb = region->exit; /* BB ending in OMP_RETURN */
5336 basic_block cont_bb = region->cont; /* BB ending in OMP_CONTINUE */
5337 basic_block bottom_bb = NULL;
5338
5339 /* entry_bb has two sucessors; the branch edge is to the exit
5340 block, fallthrough edge to body. */
5341 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2
5342 && BRANCH_EDGE (entry_bb)->dest == exit_bb);
5343
5344 /* If cont_bb non-NULL, it has 2 successors. The branch successor is
5345 body_bb, or to a block whose only successor is the body_bb. Its
5346 fallthrough successor is the final block (same as the branch
5347 successor of the entry_bb). */
5348 if (cont_bb)
5349 {
5350 basic_block body_bb = FALLTHRU_EDGE (entry_bb)->dest;
5351 basic_block bed = BRANCH_EDGE (cont_bb)->dest;
5352
5353 gcc_assert (FALLTHRU_EDGE (cont_bb)->dest == exit_bb);
5354 gcc_assert (bed == body_bb || single_succ_edge (bed)->dest == body_bb);
5355 }
5356 else
5357 gcc_assert (!gimple_in_ssa_p (cfun));
5358
5359 /* The exit block only has entry_bb and cont_bb as predecessors. */
5360 gcc_assert (EDGE_COUNT (exit_bb->preds) == 1 + (cont_bb != NULL));
5361
5362 tree chunk_no;
5363 tree chunk_max = NULL_TREE;
5364 tree bound, offset;
5365 tree step = create_tmp_var (diff_type, ".step");
5366 bool up = cond_code == LT_EXPR;
5367 tree dir = build_int_cst (diff_type, up ? +1 : -1);
5368 bool chunking = !gimple_in_ssa_p (cfun);
5369 bool negating;
5370
5371 /* Tiling vars. */
5372 tree tile_size = NULL_TREE;
5373 tree element_s = NULL_TREE;
5374 tree e_bound = NULL_TREE, e_offset = NULL_TREE, e_step = NULL_TREE;
5375 basic_block elem_body_bb = NULL;
5376 basic_block elem_cont_bb = NULL;
5377
5378 /* SSA instances. */
5379 tree offset_incr = NULL_TREE;
5380 tree offset_init = NULL_TREE;
5381
5382 gimple_stmt_iterator gsi;
5383 gassign *ass;
5384 gcall *call;
5385 gimple *stmt;
5386 tree expr;
5387 location_t loc;
5388 edge split, be, fte;
5389
5390 /* Split the end of entry_bb to create head_bb. */
5391 split = split_block (entry_bb, last_stmt (entry_bb));
5392 basic_block head_bb = split->dest;
5393 entry_bb = split->src;
5394
5395 /* Chunk setup goes at end of entry_bb, replacing the omp_for. */
5396 gsi = gsi_last_bb (entry_bb);
5397 gomp_for *for_stmt = as_a <gomp_for *> (gsi_stmt (gsi));
5398 loc = gimple_location (for_stmt);
5399
5400 if (gimple_in_ssa_p (cfun))
5401 {
5402 offset_init = gimple_omp_for_index (for_stmt, 0);
5403 gcc_assert (integer_zerop (fd->loop.n1));
5404 /* The SSA parallelizer does gang parallelism. */
5405 gwv = build_int_cst (integer_type_node, GOMP_DIM_MASK (GOMP_DIM_GANG));
5406 }
5407
5408 if (fd->collapse > 1 || fd->tiling)
5409 {
5410 gcc_assert (!gimple_in_ssa_p (cfun) && up);
5411 counts = XALLOCAVEC (struct oacc_collapse, fd->collapse);
5412 tree total = expand_oacc_collapse_init (fd, &gsi, counts,
5413 TREE_TYPE (fd->loop.n2), loc);
5414
5415 if (SSA_VAR_P (fd->loop.n2))
5416 {
5417 total = force_gimple_operand_gsi (&gsi, total, false, NULL_TREE,
5418 true, GSI_SAME_STMT);
5419 ass = gimple_build_assign (fd->loop.n2, total);
5420 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
5421 }
5422 }
5423
5424 tree b = fd->loop.n1;
5425 tree e = fd->loop.n2;
5426 tree s = fd->loop.step;
5427
5428 b = force_gimple_operand_gsi (&gsi, b, true, NULL_TREE, true, GSI_SAME_STMT);
5429 e = force_gimple_operand_gsi (&gsi, e, true, NULL_TREE, true, GSI_SAME_STMT);
5430
5431 /* Convert the step, avoiding possible unsigned->signed overflow. */
5432 negating = !up && TYPE_UNSIGNED (TREE_TYPE (s));
5433 if (negating)
5434 s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s);
5435 s = fold_convert (diff_type, s);
5436 if (negating)
5437 s = fold_build1 (NEGATE_EXPR, diff_type, s);
5438 s = force_gimple_operand_gsi (&gsi, s, true, NULL_TREE, true, GSI_SAME_STMT);
5439
5440 if (!chunking)
5441 chunk_size = integer_zero_node;
5442 expr = fold_convert (diff_type, chunk_size);
5443 chunk_size = force_gimple_operand_gsi (&gsi, expr, true,
5444 NULL_TREE, true, GSI_SAME_STMT);
5445
5446 if (fd->tiling)
5447 {
5448 /* Determine the tile size and element step,
5449 modify the outer loop step size. */
5450 tile_size = create_tmp_var (diff_type, ".tile_size");
5451 expr = build_int_cst (diff_type, 1);
5452 for (int ix = 0; ix < fd->collapse; ix++)
5453 expr = fold_build2 (MULT_EXPR, diff_type, counts[ix].tile, expr);
5454 expr = force_gimple_operand_gsi (&gsi, expr, true,
5455 NULL_TREE, true, GSI_SAME_STMT);
5456 ass = gimple_build_assign (tile_size, expr);
5457 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
5458
5459 element_s = create_tmp_var (diff_type, ".element_s");
5460 ass = gimple_build_assign (element_s, s);
5461 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
5462
5463 expr = fold_build2 (MULT_EXPR, diff_type, s, tile_size);
5464 s = force_gimple_operand_gsi (&gsi, expr, true,
5465 NULL_TREE, true, GSI_SAME_STMT);
5466 }
5467
5468 /* Determine the range, avoiding possible unsigned->signed overflow. */
5469 negating = !up && TYPE_UNSIGNED (iter_type);
5470 expr = fold_build2 (MINUS_EXPR, plus_type,
5471 fold_convert (plus_type, negating ? b : e),
5472 fold_convert (plus_type, negating ? e : b));
5473 expr = fold_convert (diff_type, expr);
5474 if (negating)
5475 expr = fold_build1 (NEGATE_EXPR, diff_type, expr);
5476 tree range = force_gimple_operand_gsi (&gsi, expr, true,
5477 NULL_TREE, true, GSI_SAME_STMT);
5478
5479 chunk_no = build_int_cst (diff_type, 0);
5480 if (chunking)
5481 {
5482 gcc_assert (!gimple_in_ssa_p (cfun));
5483
5484 expr = chunk_no;
5485 chunk_max = create_tmp_var (diff_type, ".chunk_max");
5486 chunk_no = create_tmp_var (diff_type, ".chunk_no");
5487
5488 ass = gimple_build_assign (chunk_no, expr);
5489 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
5490
5491 call = gimple_build_call_internal (IFN_GOACC_LOOP, 6,
5492 build_int_cst (integer_type_node,
5493 IFN_GOACC_LOOP_CHUNKS),
5494 dir, range, s, chunk_size, gwv);
5495 gimple_call_set_lhs (call, chunk_max);
5496 gimple_set_location (call, loc);
5497 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
5498 }
5499 else
5500 chunk_size = chunk_no;
5501
5502 call = gimple_build_call_internal (IFN_GOACC_LOOP, 6,
5503 build_int_cst (integer_type_node,
5504 IFN_GOACC_LOOP_STEP),
5505 dir, range, s, chunk_size, gwv);
5506 gimple_call_set_lhs (call, step);
5507 gimple_set_location (call, loc);
5508 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
5509
5510 /* Remove the GIMPLE_OMP_FOR. */
5511 gsi_remove (&gsi, true);
5512
5513 /* Fixup edges from head_bb. */
5514 be = BRANCH_EDGE (head_bb);
5515 fte = FALLTHRU_EDGE (head_bb);
5516 be->flags |= EDGE_FALSE_VALUE;
5517 fte->flags ^= EDGE_FALLTHRU | EDGE_TRUE_VALUE;
5518
5519 basic_block body_bb = fte->dest;
5520
5521 if (gimple_in_ssa_p (cfun))
5522 {
5523 gsi = gsi_last_bb (cont_bb);
5524 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
5525
5526 offset = gimple_omp_continue_control_use (cont_stmt);
5527 offset_incr = gimple_omp_continue_control_def (cont_stmt);
5528 }
5529 else
5530 {
5531 offset = create_tmp_var (diff_type, ".offset");
5532 offset_init = offset_incr = offset;
5533 }
5534 bound = create_tmp_var (TREE_TYPE (offset), ".bound");
5535
5536 /* Loop offset & bound go into head_bb. */
5537 gsi = gsi_start_bb (head_bb);
5538
5539 call = gimple_build_call_internal (IFN_GOACC_LOOP, 7,
5540 build_int_cst (integer_type_node,
5541 IFN_GOACC_LOOP_OFFSET),
5542 dir, range, s,
5543 chunk_size, gwv, chunk_no);
5544 gimple_call_set_lhs (call, offset_init);
5545 gimple_set_location (call, loc);
5546 gsi_insert_after (&gsi, call, GSI_CONTINUE_LINKING);
5547
5548 call = gimple_build_call_internal (IFN_GOACC_LOOP, 7,
5549 build_int_cst (integer_type_node,
5550 IFN_GOACC_LOOP_BOUND),
5551 dir, range, s,
5552 chunk_size, gwv, offset_init);
5553 gimple_call_set_lhs (call, bound);
5554 gimple_set_location (call, loc);
5555 gsi_insert_after (&gsi, call, GSI_CONTINUE_LINKING);
5556
5557 expr = build2 (cond_code, boolean_type_node, offset_init, bound);
5558 gsi_insert_after (&gsi, gimple_build_cond_empty (expr),
5559 GSI_CONTINUE_LINKING);
5560
5561 /* V assignment goes into body_bb. */
5562 if (!gimple_in_ssa_p (cfun))
5563 {
5564 gsi = gsi_start_bb (body_bb);
5565
5566 expr = build2 (plus_code, iter_type, b,
5567 fold_convert (plus_type, offset));
5568 expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
5569 true, GSI_SAME_STMT);
5570 ass = gimple_build_assign (v, expr);
5571 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
5572
5573 if (fd->collapse > 1 || fd->tiling)
5574 expand_oacc_collapse_vars (fd, false, &gsi, counts, v);
5575
5576 if (fd->tiling)
5577 {
5578 /* Determine the range of the element loop -- usually simply
5579 the tile_size, but could be smaller if the final
5580 iteration of the outer loop is a partial tile. */
5581 tree e_range = create_tmp_var (diff_type, ".e_range");
5582
5583 expr = build2 (MIN_EXPR, diff_type,
5584 build2 (MINUS_EXPR, diff_type, bound, offset),
5585 build2 (MULT_EXPR, diff_type, tile_size,
5586 element_s));
5587 expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
5588 true, GSI_SAME_STMT);
5589 ass = gimple_build_assign (e_range, expr);
5590 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
5591
5592 /* Determine bound, offset & step of inner loop. */
5593 e_bound = create_tmp_var (diff_type, ".e_bound");
5594 e_offset = create_tmp_var (diff_type, ".e_offset");
5595 e_step = create_tmp_var (diff_type, ".e_step");
5596
5597 /* Mark these as element loops. */
5598 tree t, e_gwv = integer_minus_one_node;
5599 tree chunk = build_int_cst (diff_type, 0); /* Never chunked. */
5600
5601 t = build_int_cst (integer_type_node, IFN_GOACC_LOOP_OFFSET);
5602 call = gimple_build_call_internal (IFN_GOACC_LOOP, 7, t, dir, e_range,
5603 element_s, chunk, e_gwv, chunk);
5604 gimple_call_set_lhs (call, e_offset);
5605 gimple_set_location (call, loc);
5606 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
5607
5608 t = build_int_cst (integer_type_node, IFN_GOACC_LOOP_BOUND);
5609 call = gimple_build_call_internal (IFN_GOACC_LOOP, 7, t, dir, e_range,
5610 element_s, chunk, e_gwv, e_offset);
5611 gimple_call_set_lhs (call, e_bound);
5612 gimple_set_location (call, loc);
5613 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
5614
5615 t = build_int_cst (integer_type_node, IFN_GOACC_LOOP_STEP);
5616 call = gimple_build_call_internal (IFN_GOACC_LOOP, 6, t, dir, e_range,
5617 element_s, chunk, e_gwv);
5618 gimple_call_set_lhs (call, e_step);
5619 gimple_set_location (call, loc);
5620 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
5621
5622 /* Add test and split block. */
5623 expr = build2 (cond_code, boolean_type_node, e_offset, e_bound);
5624 stmt = gimple_build_cond_empty (expr);
5625 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5626 split = split_block (body_bb, stmt);
5627 elem_body_bb = split->dest;
5628 if (cont_bb == body_bb)
5629 cont_bb = elem_body_bb;
5630 body_bb = split->src;
5631
5632 split->flags ^= EDGE_FALLTHRU | EDGE_TRUE_VALUE;
5633
5634 /* Initialize the user's loop vars. */
5635 gsi = gsi_start_bb (elem_body_bb);
5636 expand_oacc_collapse_vars (fd, true, &gsi, counts, e_offset);
5637 }
5638 }
5639
5640 /* Loop increment goes into cont_bb. If this is not a loop, we
5641 will have spawned threads as if it was, and each one will
5642 execute one iteration. The specification is not explicit about
5643 whether such constructs are ill-formed or not, and they can
5644 occur, especially when noreturn routines are involved. */
5645 if (cont_bb)
5646 {
5647 gsi = gsi_last_bb (cont_bb);
5648 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
5649 loc = gimple_location (cont_stmt);
5650
5651 if (fd->tiling)
5652 {
5653 /* Insert element loop increment and test. */
5654 expr = build2 (PLUS_EXPR, diff_type, e_offset, e_step);
5655 expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
5656 true, GSI_SAME_STMT);
5657 ass = gimple_build_assign (e_offset, expr);
5658 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
5659 expr = build2 (cond_code, boolean_type_node, e_offset, e_bound);
5660
5661 stmt = gimple_build_cond_empty (expr);
5662 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5663 split = split_block (cont_bb, stmt);
5664 elem_cont_bb = split->src;
5665 cont_bb = split->dest;
5666
5667 split->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE;
5668 split->probability = profile_probability::unlikely ().guessed ();
5669 edge latch_edge
5670 = make_edge (elem_cont_bb, elem_body_bb, EDGE_TRUE_VALUE);
5671 latch_edge->probability = profile_probability::likely ().guessed ();
5672
5673 edge skip_edge = make_edge (body_bb, cont_bb, EDGE_FALSE_VALUE);
5674 skip_edge->probability = profile_probability::unlikely ().guessed ();
5675 edge loop_entry_edge = EDGE_SUCC (body_bb, 1 - skip_edge->dest_idx);
5676 loop_entry_edge->probability
5677 = profile_probability::likely ().guessed ();
5678
5679 gsi = gsi_for_stmt (cont_stmt);
5680 }
5681
5682 /* Increment offset. */
5683 if (gimple_in_ssa_p (cfun))
5684 expr = build2 (plus_code, iter_type, offset,
5685 fold_convert (plus_type, step));
5686 else
5687 expr = build2 (PLUS_EXPR, diff_type, offset, step);
5688 expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
5689 true, GSI_SAME_STMT);
5690 ass = gimple_build_assign (offset_incr, expr);
5691 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
5692 expr = build2 (cond_code, boolean_type_node, offset_incr, bound);
5693 gsi_insert_before (&gsi, gimple_build_cond_empty (expr), GSI_SAME_STMT);
5694
5695 /* Remove the GIMPLE_OMP_CONTINUE. */
5696 gsi_remove (&gsi, true);
5697
5698 /* Fixup edges from cont_bb. */
5699 be = BRANCH_EDGE (cont_bb);
5700 fte = FALLTHRU_EDGE (cont_bb);
5701 be->flags |= EDGE_TRUE_VALUE;
5702 fte->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE;
5703
5704 if (chunking)
5705 {
5706 /* Split the beginning of exit_bb to make bottom_bb. We
5707 need to insert a nop at the start, because splitting is
5708 after a stmt, not before. */
5709 gsi = gsi_start_bb (exit_bb);
5710 stmt = gimple_build_nop ();
5711 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5712 split = split_block (exit_bb, stmt);
5713 bottom_bb = split->src;
5714 exit_bb = split->dest;
5715 gsi = gsi_last_bb (bottom_bb);
5716
5717 /* Chunk increment and test goes into bottom_bb. */
5718 expr = build2 (PLUS_EXPR, diff_type, chunk_no,
5719 build_int_cst (diff_type, 1));
5720 ass = gimple_build_assign (chunk_no, expr);
5721 gsi_insert_after (&gsi, ass, GSI_CONTINUE_LINKING);
5722
5723 /* Chunk test at end of bottom_bb. */
5724 expr = build2 (LT_EXPR, boolean_type_node, chunk_no, chunk_max);
5725 gsi_insert_after (&gsi, gimple_build_cond_empty (expr),
5726 GSI_CONTINUE_LINKING);
5727
5728 /* Fixup edges from bottom_bb. */
5729 split->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE;
5730 split->probability = profile_probability::unlikely ().guessed ();
5731 edge latch_edge = make_edge (bottom_bb, head_bb, EDGE_TRUE_VALUE);
5732 latch_edge->probability = profile_probability::likely ().guessed ();
5733 }
5734 }
5735
5736 gsi = gsi_last_bb (exit_bb);
5737 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
5738 loc = gimple_location (gsi_stmt (gsi));
5739
5740 if (!gimple_in_ssa_p (cfun))
5741 {
5742 /* Insert the final value of V, in case it is live. This is the
5743 value for the only thread that survives past the join. */
5744 expr = fold_build2 (MINUS_EXPR, diff_type, range, dir);
5745 expr = fold_build2 (PLUS_EXPR, diff_type, expr, s);
5746 expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s);
5747 expr = fold_build2 (MULT_EXPR, diff_type, expr, s);
5748 expr = build2 (plus_code, iter_type, b, fold_convert (plus_type, expr));
5749 expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
5750 true, GSI_SAME_STMT);
5751 ass = gimple_build_assign (v, expr);
5752 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
5753 }
5754
5755 /* Remove the OMP_RETURN. */
5756 gsi_remove (&gsi, true);
5757
5758 if (cont_bb)
5759 {
5760 /* We now have one, two or three nested loops. Update the loop
5761 structures. */
5762 struct loop *parent = entry_bb->loop_father;
5763 struct loop *body = body_bb->loop_father;
5764
5765 if (chunking)
5766 {
5767 struct loop *chunk_loop = alloc_loop ();
5768 chunk_loop->header = head_bb;
5769 chunk_loop->latch = bottom_bb;
5770 add_loop (chunk_loop, parent);
5771 parent = chunk_loop;
5772 }
5773 else if (parent != body)
5774 {
5775 gcc_assert (body->header == body_bb);
5776 gcc_assert (body->latch == cont_bb
5777 || single_pred (body->latch) == cont_bb);
5778 parent = NULL;
5779 }
5780
5781 if (parent)
5782 {
5783 struct loop *body_loop = alloc_loop ();
5784 body_loop->header = body_bb;
5785 body_loop->latch = cont_bb;
5786 add_loop (body_loop, parent);
5787
5788 if (fd->tiling)
5789 {
5790 /* Insert tiling's element loop. */
5791 struct loop *inner_loop = alloc_loop ();
5792 inner_loop->header = elem_body_bb;
5793 inner_loop->latch = elem_cont_bb;
5794 add_loop (inner_loop, body_loop);
5795 }
5796 }
5797 }
5798 }
5799
5800 /* Expand the OMP loop defined by REGION. */
5801
5802 static void
5803 expand_omp_for (struct omp_region *region, gimple *inner_stmt)
5804 {
5805 struct omp_for_data fd;
5806 struct omp_for_data_loop *loops;
5807
5808 loops
5809 = (struct omp_for_data_loop *)
5810 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
5811 * sizeof (struct omp_for_data_loop));
5812 omp_extract_for_data (as_a <gomp_for *> (last_stmt (region->entry)),
5813 &fd, loops);
5814 region->sched_kind = fd.sched_kind;
5815 region->sched_modifiers = fd.sched_modifiers;
5816
5817 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
5818 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
5819 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
5820 if (region->cont)
5821 {
5822 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
5823 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
5824 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
5825 }
5826 else
5827 /* If there isn't a continue then this is a degerate case where
5828 the introduction of abnormal edges during lowering will prevent
5829 original loops from being detected. Fix that up. */
5830 loops_state_set (LOOPS_NEED_FIXUP);
5831
5832 if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD)
5833 expand_omp_simd (region, &fd);
5834 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
5835 expand_cilk_for (region, &fd);
5836 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
5837 {
5838 gcc_assert (!inner_stmt);
5839 expand_oacc_for (region, &fd);
5840 }
5841 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_TASKLOOP)
5842 {
5843 if (gimple_omp_for_combined_into_p (fd.for_stmt))
5844 expand_omp_taskloop_for_inner (region, &fd, inner_stmt);
5845 else
5846 expand_omp_taskloop_for_outer (region, &fd, inner_stmt);
5847 }
5848 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
5849 && !fd.have_ordered)
5850 {
5851 if (fd.chunk_size == NULL)
5852 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
5853 else
5854 expand_omp_for_static_chunk (region, &fd, inner_stmt);
5855 }
5856 else
5857 {
5858 int fn_index, start_ix, next_ix;
5859
5860 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
5861 == GF_OMP_FOR_KIND_FOR);
5862 if (fd.chunk_size == NULL
5863 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
5864 fd.chunk_size = integer_zero_node;
5865 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
5866 switch (fd.sched_kind)
5867 {
5868 case OMP_CLAUSE_SCHEDULE_RUNTIME:
5869 fn_index = 3;
5870 break;
5871 case OMP_CLAUSE_SCHEDULE_DYNAMIC:
5872 case OMP_CLAUSE_SCHEDULE_GUIDED:
5873 if ((fd.sched_modifiers & OMP_CLAUSE_SCHEDULE_NONMONOTONIC)
5874 && !fd.ordered
5875 && !fd.have_ordered)
5876 {
5877 fn_index = 3 + fd.sched_kind;
5878 break;
5879 }
5880 /* FALLTHRU */
5881 default:
5882 fn_index = fd.sched_kind;
5883 break;
5884 }
5885 if (!fd.ordered)
5886 fn_index += fd.have_ordered * 6;
5887 if (fd.ordered)
5888 start_ix = ((int)BUILT_IN_GOMP_LOOP_DOACROSS_STATIC_START) + fn_index;
5889 else
5890 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
5891 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
5892 if (fd.iter_type == long_long_unsigned_type_node)
5893 {
5894 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
5895 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
5896 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
5897 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
5898 }
5899 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
5900 (enum built_in_function) next_ix, inner_stmt);
5901 }
5902
5903 if (gimple_in_ssa_p (cfun))
5904 update_ssa (TODO_update_ssa_only_virtuals);
5905 }
5906
5907 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
5908
5909 v = GOMP_sections_start (n);
5910 L0:
5911 switch (v)
5912 {
5913 case 0:
5914 goto L2;
5915 case 1:
5916 section 1;
5917 goto L1;
5918 case 2:
5919 ...
5920 case n:
5921 ...
5922 default:
5923 abort ();
5924 }
5925 L1:
5926 v = GOMP_sections_next ();
5927 goto L0;
5928 L2:
5929 reduction;
5930
5931 If this is a combined parallel sections, replace the call to
5932 GOMP_sections_start with call to GOMP_sections_next. */
5933
5934 static void
5935 expand_omp_sections (struct omp_region *region)
5936 {
5937 tree t, u, vin = NULL, vmain, vnext, l2;
5938 unsigned len;
5939 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
5940 gimple_stmt_iterator si, switch_si;
5941 gomp_sections *sections_stmt;
5942 gimple *stmt;
5943 gomp_continue *cont;
5944 edge_iterator ei;
5945 edge e;
5946 struct omp_region *inner;
5947 unsigned i, casei;
5948 bool exit_reachable = region->cont != NULL;
5949
5950 gcc_assert (region->exit != NULL);
5951 entry_bb = region->entry;
5952 l0_bb = single_succ (entry_bb);
5953 l1_bb = region->cont;
5954 l2_bb = region->exit;
5955 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
5956 l2 = gimple_block_label (l2_bb);
5957 else
5958 {
5959 /* This can happen if there are reductions. */
5960 len = EDGE_COUNT (l0_bb->succs);
5961 gcc_assert (len > 0);
5962 e = EDGE_SUCC (l0_bb, len - 1);
5963 si = gsi_last_bb (e->dest);
5964 l2 = NULL_TREE;
5965 if (gsi_end_p (si)
5966 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
5967 l2 = gimple_block_label (e->dest);
5968 else
5969 FOR_EACH_EDGE (e, ei, l0_bb->succs)
5970 {
5971 si = gsi_last_bb (e->dest);
5972 if (gsi_end_p (si)
5973 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
5974 {
5975 l2 = gimple_block_label (e->dest);
5976 break;
5977 }
5978 }
5979 }
5980 if (exit_reachable)
5981 default_bb = create_empty_bb (l1_bb->prev_bb);
5982 else
5983 default_bb = create_empty_bb (l0_bb);
5984
5985 /* We will build a switch() with enough cases for all the
5986 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
5987 and a default case to abort if something goes wrong. */
5988 len = EDGE_COUNT (l0_bb->succs);
5989
5990 /* Use vec::quick_push on label_vec throughout, since we know the size
5991 in advance. */
5992 auto_vec<tree> label_vec (len);
5993
5994 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
5995 GIMPLE_OMP_SECTIONS statement. */
5996 si = gsi_last_bb (entry_bb);
5997 sections_stmt = as_a <gomp_sections *> (gsi_stmt (si));
5998 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
5999 vin = gimple_omp_sections_control (sections_stmt);
6000 if (!is_combined_parallel (region))
6001 {
6002 /* If we are not inside a combined parallel+sections region,
6003 call GOMP_sections_start. */
6004 t = build_int_cst (unsigned_type_node, len - 1);
6005 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
6006 stmt = gimple_build_call (u, 1, t);
6007 }
6008 else
6009 {
6010 /* Otherwise, call GOMP_sections_next. */
6011 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
6012 stmt = gimple_build_call (u, 0);
6013 }
6014 gimple_call_set_lhs (stmt, vin);
6015 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
6016 gsi_remove (&si, true);
6017
6018 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
6019 L0_BB. */
6020 switch_si = gsi_last_bb (l0_bb);
6021 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
6022 if (exit_reachable)
6023 {
6024 cont = as_a <gomp_continue *> (last_stmt (l1_bb));
6025 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
6026 vmain = gimple_omp_continue_control_use (cont);
6027 vnext = gimple_omp_continue_control_def (cont);
6028 }
6029 else
6030 {
6031 vmain = vin;
6032 vnext = NULL_TREE;
6033 }
6034
6035 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
6036 label_vec.quick_push (t);
6037 i = 1;
6038
6039 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
6040 for (inner = region->inner, casei = 1;
6041 inner;
6042 inner = inner->next, i++, casei++)
6043 {
6044 basic_block s_entry_bb, s_exit_bb;
6045
6046 /* Skip optional reduction region. */
6047 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
6048 {
6049 --i;
6050 --casei;
6051 continue;
6052 }
6053
6054 s_entry_bb = inner->entry;
6055 s_exit_bb = inner->exit;
6056
6057 t = gimple_block_label (s_entry_bb);
6058 u = build_int_cst (unsigned_type_node, casei);
6059 u = build_case_label (u, NULL, t);
6060 label_vec.quick_push (u);
6061
6062 si = gsi_last_bb (s_entry_bb);
6063 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
6064 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
6065 gsi_remove (&si, true);
6066 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
6067
6068 if (s_exit_bb == NULL)
6069 continue;
6070
6071 si = gsi_last_bb (s_exit_bb);
6072 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
6073 gsi_remove (&si, true);
6074
6075 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
6076 }
6077
6078 /* Error handling code goes in DEFAULT_BB. */
6079 t = gimple_block_label (default_bb);
6080 u = build_case_label (NULL, NULL, t);
6081 make_edge (l0_bb, default_bb, 0);
6082 add_bb_to_loop (default_bb, current_loops->tree_root);
6083
6084 stmt = gimple_build_switch (vmain, u, label_vec);
6085 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
6086 gsi_remove (&switch_si, true);
6087
6088 si = gsi_start_bb (default_bb);
6089 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
6090 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
6091
6092 if (exit_reachable)
6093 {
6094 tree bfn_decl;
6095
6096 /* Code to get the next section goes in L1_BB. */
6097 si = gsi_last_bb (l1_bb);
6098 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
6099
6100 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
6101 stmt = gimple_build_call (bfn_decl, 0);
6102 gimple_call_set_lhs (stmt, vnext);
6103 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
6104 gsi_remove (&si, true);
6105
6106 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
6107 }
6108
6109 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
6110 si = gsi_last_bb (l2_bb);
6111 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
6112 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
6113 else if (gimple_omp_return_lhs (gsi_stmt (si)))
6114 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
6115 else
6116 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
6117 stmt = gimple_build_call (t, 0);
6118 if (gimple_omp_return_lhs (gsi_stmt (si)))
6119 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
6120 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
6121 gsi_remove (&si, true);
6122
6123 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
6124 }
6125
6126 /* Expand code for an OpenMP single directive. We've already expanded
6127 much of the code, here we simply place the GOMP_barrier call. */
6128
6129 static void
6130 expand_omp_single (struct omp_region *region)
6131 {
6132 basic_block entry_bb, exit_bb;
6133 gimple_stmt_iterator si;
6134
6135 entry_bb = region->entry;
6136 exit_bb = region->exit;
6137
6138 si = gsi_last_bb (entry_bb);
6139 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
6140 gsi_remove (&si, true);
6141 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6142
6143 si = gsi_last_bb (exit_bb);
6144 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
6145 {
6146 tree t = gimple_omp_return_lhs (gsi_stmt (si));
6147 gsi_insert_after (&si, omp_build_barrier (t), GSI_SAME_STMT);
6148 }
6149 gsi_remove (&si, true);
6150 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
6151 }
6152
6153 /* Generic expansion for OpenMP synchronization directives: master,
6154 ordered and critical. All we need to do here is remove the entry
6155 and exit markers for REGION. */
6156
6157 static void
6158 expand_omp_synch (struct omp_region *region)
6159 {
6160 basic_block entry_bb, exit_bb;
6161 gimple_stmt_iterator si;
6162
6163 entry_bb = region->entry;
6164 exit_bb = region->exit;
6165
6166 si = gsi_last_bb (entry_bb);
6167 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
6168 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
6169 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
6170 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
6171 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
6172 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
6173 gsi_remove (&si, true);
6174 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6175
6176 if (exit_bb)
6177 {
6178 si = gsi_last_bb (exit_bb);
6179 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
6180 gsi_remove (&si, true);
6181 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
6182 }
6183 }
6184
6185 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6186 operation as a normal volatile load. */
6187
6188 static bool
6189 expand_omp_atomic_load (basic_block load_bb, tree addr,
6190 tree loaded_val, int index)
6191 {
6192 enum built_in_function tmpbase;
6193 gimple_stmt_iterator gsi;
6194 basic_block store_bb;
6195 location_t loc;
6196 gimple *stmt;
6197 tree decl, call, type, itype;
6198
6199 gsi = gsi_last_bb (load_bb);
6200 stmt = gsi_stmt (gsi);
6201 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
6202 loc = gimple_location (stmt);
6203
6204 /* ??? If the target does not implement atomic_load_optab[mode], and mode
6205 is smaller than word size, then expand_atomic_load assumes that the load
6206 is atomic. We could avoid the builtin entirely in this case. */
6207
6208 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
6209 decl = builtin_decl_explicit (tmpbase);
6210 if (decl == NULL_TREE)
6211 return false;
6212
6213 type = TREE_TYPE (loaded_val);
6214 itype = TREE_TYPE (TREE_TYPE (decl));
6215
6216 call = build_call_expr_loc (loc, decl, 2, addr,
6217 build_int_cst (NULL,
6218 gimple_omp_atomic_seq_cst_p (stmt)
6219 ? MEMMODEL_SEQ_CST
6220 : MEMMODEL_RELAXED));
6221 if (!useless_type_conversion_p (type, itype))
6222 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
6223 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
6224
6225 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6226 gsi_remove (&gsi, true);
6227
6228 store_bb = single_succ (load_bb);
6229 gsi = gsi_last_bb (store_bb);
6230 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
6231 gsi_remove (&gsi, true);
6232
6233 if (gimple_in_ssa_p (cfun))
6234 update_ssa (TODO_update_ssa_no_phi);
6235
6236 return true;
6237 }
6238
6239 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6240 operation as a normal volatile store. */
6241
6242 static bool
6243 expand_omp_atomic_store (basic_block load_bb, tree addr,
6244 tree loaded_val, tree stored_val, int index)
6245 {
6246 enum built_in_function tmpbase;
6247 gimple_stmt_iterator gsi;
6248 basic_block store_bb = single_succ (load_bb);
6249 location_t loc;
6250 gimple *stmt;
6251 tree decl, call, type, itype;
6252 machine_mode imode;
6253 bool exchange;
6254
6255 gsi = gsi_last_bb (load_bb);
6256 stmt = gsi_stmt (gsi);
6257 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
6258
6259 /* If the load value is needed, then this isn't a store but an exchange. */
6260 exchange = gimple_omp_atomic_need_value_p (stmt);
6261
6262 gsi = gsi_last_bb (store_bb);
6263 stmt = gsi_stmt (gsi);
6264 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
6265 loc = gimple_location (stmt);
6266
6267 /* ??? If the target does not implement atomic_store_optab[mode], and mode
6268 is smaller than word size, then expand_atomic_store assumes that the store
6269 is atomic. We could avoid the builtin entirely in this case. */
6270
6271 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
6272 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
6273 decl = builtin_decl_explicit (tmpbase);
6274 if (decl == NULL_TREE)
6275 return false;
6276
6277 type = TREE_TYPE (stored_val);
6278
6279 /* Dig out the type of the function's second argument. */
6280 itype = TREE_TYPE (decl);
6281 itype = TYPE_ARG_TYPES (itype);
6282 itype = TREE_CHAIN (itype);
6283 itype = TREE_VALUE (itype);
6284 imode = TYPE_MODE (itype);
6285
6286 if (exchange && !can_atomic_exchange_p (imode, true))
6287 return false;
6288
6289 if (!useless_type_conversion_p (itype, type))
6290 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
6291 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
6292 build_int_cst (NULL,
6293 gimple_omp_atomic_seq_cst_p (stmt)
6294 ? MEMMODEL_SEQ_CST
6295 : MEMMODEL_RELAXED));
6296 if (exchange)
6297 {
6298 if (!useless_type_conversion_p (type, itype))
6299 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
6300 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
6301 }
6302
6303 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6304 gsi_remove (&gsi, true);
6305
6306 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
6307 gsi = gsi_last_bb (load_bb);
6308 gsi_remove (&gsi, true);
6309
6310 if (gimple_in_ssa_p (cfun))
6311 update_ssa (TODO_update_ssa_no_phi);
6312
6313 return true;
6314 }
6315
6316 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6317 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
6318 size of the data type, and thus usable to find the index of the builtin
6319 decl. Returns false if the expression is not of the proper form. */
6320
6321 static bool
6322 expand_omp_atomic_fetch_op (basic_block load_bb,
6323 tree addr, tree loaded_val,
6324 tree stored_val, int index)
6325 {
6326 enum built_in_function oldbase, newbase, tmpbase;
6327 tree decl, itype, call;
6328 tree lhs, rhs;
6329 basic_block store_bb = single_succ (load_bb);
6330 gimple_stmt_iterator gsi;
6331 gimple *stmt;
6332 location_t loc;
6333 enum tree_code code;
6334 bool need_old, need_new;
6335 machine_mode imode;
6336 bool seq_cst;
6337
6338 /* We expect to find the following sequences:
6339
6340 load_bb:
6341 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
6342
6343 store_bb:
6344 val = tmp OP something; (or: something OP tmp)
6345 GIMPLE_OMP_STORE (val)
6346
6347 ???FIXME: Allow a more flexible sequence.
6348 Perhaps use data flow to pick the statements.
6349
6350 */
6351
6352 gsi = gsi_after_labels (store_bb);
6353 stmt = gsi_stmt (gsi);
6354 loc = gimple_location (stmt);
6355 if (!is_gimple_assign (stmt))
6356 return false;
6357 gsi_next (&gsi);
6358 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
6359 return false;
6360 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
6361 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
6362 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
6363 gcc_checking_assert (!need_old || !need_new);
6364
6365 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
6366 return false;
6367
6368 /* Check for one of the supported fetch-op operations. */
6369 code = gimple_assign_rhs_code (stmt);
6370 switch (code)
6371 {
6372 case PLUS_EXPR:
6373 case POINTER_PLUS_EXPR:
6374 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
6375 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
6376 break;
6377 case MINUS_EXPR:
6378 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
6379 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
6380 break;
6381 case BIT_AND_EXPR:
6382 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
6383 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
6384 break;
6385 case BIT_IOR_EXPR:
6386 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
6387 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
6388 break;
6389 case BIT_XOR_EXPR:
6390 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
6391 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
6392 break;
6393 default:
6394 return false;
6395 }
6396
6397 /* Make sure the expression is of the proper form. */
6398 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
6399 rhs = gimple_assign_rhs2 (stmt);
6400 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
6401 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
6402 rhs = gimple_assign_rhs1 (stmt);
6403 else
6404 return false;
6405
6406 tmpbase = ((enum built_in_function)
6407 ((need_new ? newbase : oldbase) + index + 1));
6408 decl = builtin_decl_explicit (tmpbase);
6409 if (decl == NULL_TREE)
6410 return false;
6411 itype = TREE_TYPE (TREE_TYPE (decl));
6412 imode = TYPE_MODE (itype);
6413
6414 /* We could test all of the various optabs involved, but the fact of the
6415 matter is that (with the exception of i486 vs i586 and xadd) all targets
6416 that support any atomic operaton optab also implements compare-and-swap.
6417 Let optabs.c take care of expanding any compare-and-swap loop. */
6418 if (!can_compare_and_swap_p (imode, true) || !can_atomic_load_p (imode))
6419 return false;
6420
6421 gsi = gsi_last_bb (load_bb);
6422 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
6423
6424 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
6425 It only requires that the operation happen atomically. Thus we can
6426 use the RELAXED memory model. */
6427 call = build_call_expr_loc (loc, decl, 3, addr,
6428 fold_convert_loc (loc, itype, rhs),
6429 build_int_cst (NULL,
6430 seq_cst ? MEMMODEL_SEQ_CST
6431 : MEMMODEL_RELAXED));
6432
6433 if (need_old || need_new)
6434 {
6435 lhs = need_old ? loaded_val : stored_val;
6436 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
6437 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
6438 }
6439 else
6440 call = fold_convert_loc (loc, void_type_node, call);
6441 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6442 gsi_remove (&gsi, true);
6443
6444 gsi = gsi_last_bb (store_bb);
6445 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
6446 gsi_remove (&gsi, true);
6447 gsi = gsi_last_bb (store_bb);
6448 stmt = gsi_stmt (gsi);
6449 gsi_remove (&gsi, true);
6450
6451 if (gimple_in_ssa_p (cfun))
6452 {
6453 release_defs (stmt);
6454 update_ssa (TODO_update_ssa_no_phi);
6455 }
6456
6457 return true;
6458 }
6459
6460 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
6461
6462 oldval = *addr;
6463 repeat:
6464 newval = rhs; // with oldval replacing *addr in rhs
6465 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
6466 if (oldval != newval)
6467 goto repeat;
6468
6469 INDEX is log2 of the size of the data type, and thus usable to find the
6470 index of the builtin decl. */
6471
6472 static bool
6473 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
6474 tree addr, tree loaded_val, tree stored_val,
6475 int index)
6476 {
6477 tree loadedi, storedi, initial, new_storedi, old_vali;
6478 tree type, itype, cmpxchg, iaddr;
6479 gimple_stmt_iterator si;
6480 basic_block loop_header = single_succ (load_bb);
6481 gimple *phi, *stmt;
6482 edge e;
6483 enum built_in_function fncode;
6484
6485 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
6486 order to use the RELAXED memory model effectively. */
6487 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
6488 + index + 1);
6489 cmpxchg = builtin_decl_explicit (fncode);
6490 if (cmpxchg == NULL_TREE)
6491 return false;
6492 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
6493 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
6494
6495 if (!can_compare_and_swap_p (TYPE_MODE (itype), true)
6496 || !can_atomic_load_p (TYPE_MODE (itype)))
6497 return false;
6498
6499 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
6500 si = gsi_last_bb (load_bb);
6501 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
6502
6503 /* For floating-point values, we'll need to view-convert them to integers
6504 so that we can perform the atomic compare and swap. Simplify the
6505 following code by always setting up the "i"ntegral variables. */
6506 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
6507 {
6508 tree iaddr_val;
6509
6510 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
6511 true));
6512 iaddr_val
6513 = force_gimple_operand_gsi (&si,
6514 fold_convert (TREE_TYPE (iaddr), addr),
6515 false, NULL_TREE, true, GSI_SAME_STMT);
6516 stmt = gimple_build_assign (iaddr, iaddr_val);
6517 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6518 loadedi = create_tmp_var (itype);
6519 if (gimple_in_ssa_p (cfun))
6520 loadedi = make_ssa_name (loadedi);
6521 }
6522 else
6523 {
6524 iaddr = addr;
6525 loadedi = loaded_val;
6526 }
6527
6528 fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
6529 tree loaddecl = builtin_decl_explicit (fncode);
6530 if (loaddecl)
6531 initial
6532 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr)),
6533 build_call_expr (loaddecl, 2, iaddr,
6534 build_int_cst (NULL_TREE,
6535 MEMMODEL_RELAXED)));
6536 else
6537 initial = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr,
6538 build_int_cst (TREE_TYPE (iaddr), 0));
6539
6540 initial
6541 = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true,
6542 GSI_SAME_STMT);
6543
6544 /* Move the value to the LOADEDI temporary. */
6545 if (gimple_in_ssa_p (cfun))
6546 {
6547 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
6548 phi = create_phi_node (loadedi, loop_header);
6549 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
6550 initial);
6551 }
6552 else
6553 gsi_insert_before (&si,
6554 gimple_build_assign (loadedi, initial),
6555 GSI_SAME_STMT);
6556 if (loadedi != loaded_val)
6557 {
6558 gimple_stmt_iterator gsi2;
6559 tree x;
6560
6561 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
6562 gsi2 = gsi_start_bb (loop_header);
6563 if (gimple_in_ssa_p (cfun))
6564 {
6565 gassign *stmt;
6566 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
6567 true, GSI_SAME_STMT);
6568 stmt = gimple_build_assign (loaded_val, x);
6569 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
6570 }
6571 else
6572 {
6573 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
6574 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
6575 true, GSI_SAME_STMT);
6576 }
6577 }
6578 gsi_remove (&si, true);
6579
6580 si = gsi_last_bb (store_bb);
6581 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
6582
6583 if (iaddr == addr)
6584 storedi = stored_val;
6585 else
6586 storedi
6587 = force_gimple_operand_gsi (&si,
6588 build1 (VIEW_CONVERT_EXPR, itype,
6589 stored_val), true, NULL_TREE, true,
6590 GSI_SAME_STMT);
6591
6592 /* Build the compare&swap statement. */
6593 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
6594 new_storedi = force_gimple_operand_gsi (&si,
6595 fold_convert (TREE_TYPE (loadedi),
6596 new_storedi),
6597 true, NULL_TREE,
6598 true, GSI_SAME_STMT);
6599
6600 if (gimple_in_ssa_p (cfun))
6601 old_vali = loadedi;
6602 else
6603 {
6604 old_vali = create_tmp_var (TREE_TYPE (loadedi));
6605 stmt = gimple_build_assign (old_vali, loadedi);
6606 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6607
6608 stmt = gimple_build_assign (loadedi, new_storedi);
6609 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6610 }
6611
6612 /* Note that we always perform the comparison as an integer, even for
6613 floating point. This allows the atomic operation to properly
6614 succeed even with NaNs and -0.0. */
6615 tree ne = build2 (NE_EXPR, boolean_type_node, new_storedi, old_vali);
6616 stmt = gimple_build_cond_empty (ne);
6617 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6618
6619 /* Update cfg. */
6620 e = single_succ_edge (store_bb);
6621 e->flags &= ~EDGE_FALLTHRU;
6622 e->flags |= EDGE_FALSE_VALUE;
6623 /* Expect no looping. */
6624 e->probability = profile_probability::guessed_always ();
6625
6626 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
6627 e->probability = profile_probability::guessed_never ();
6628
6629 /* Copy the new value to loadedi (we already did that before the condition
6630 if we are not in SSA). */
6631 if (gimple_in_ssa_p (cfun))
6632 {
6633 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
6634 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
6635 }
6636
6637 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
6638 gsi_remove (&si, true);
6639
6640 struct loop *loop = alloc_loop ();
6641 loop->header = loop_header;
6642 loop->latch = store_bb;
6643 add_loop (loop, loop_header->loop_father);
6644
6645 if (gimple_in_ssa_p (cfun))
6646 update_ssa (TODO_update_ssa_no_phi);
6647
6648 return true;
6649 }
6650
6651 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
6652
6653 GOMP_atomic_start ();
6654 *addr = rhs;
6655 GOMP_atomic_end ();
6656
6657 The result is not globally atomic, but works so long as all parallel
6658 references are within #pragma omp atomic directives. According to
6659 responses received from omp@openmp.org, appears to be within spec.
6660 Which makes sense, since that's how several other compilers handle
6661 this situation as well.
6662 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
6663 expanding. STORED_VAL is the operand of the matching
6664 GIMPLE_OMP_ATOMIC_STORE.
6665
6666 We replace
6667 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
6668 loaded_val = *addr;
6669
6670 and replace
6671 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
6672 *addr = stored_val;
6673 */
6674
6675 static bool
6676 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
6677 tree addr, tree loaded_val, tree stored_val)
6678 {
6679 gimple_stmt_iterator si;
6680 gassign *stmt;
6681 tree t;
6682
6683 si = gsi_last_bb (load_bb);
6684 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
6685
6686 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
6687 t = build_call_expr (t, 0);
6688 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
6689
6690 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
6691 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6692 gsi_remove (&si, true);
6693
6694 si = gsi_last_bb (store_bb);
6695 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
6696
6697 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
6698 stored_val);
6699 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6700
6701 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
6702 t = build_call_expr (t, 0);
6703 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
6704 gsi_remove (&si, true);
6705
6706 if (gimple_in_ssa_p (cfun))
6707 update_ssa (TODO_update_ssa_no_phi);
6708 return true;
6709 }
6710
6711 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
6712 using expand_omp_atomic_fetch_op. If it failed, we try to
6713 call expand_omp_atomic_pipeline, and if it fails too, the
6714 ultimate fallback is wrapping the operation in a mutex
6715 (expand_omp_atomic_mutex). REGION is the atomic region built
6716 by build_omp_regions_1(). */
6717
6718 static void
6719 expand_omp_atomic (struct omp_region *region)
6720 {
6721 basic_block load_bb = region->entry, store_bb = region->exit;
6722 gomp_atomic_load *load = as_a <gomp_atomic_load *> (last_stmt (load_bb));
6723 gomp_atomic_store *store = as_a <gomp_atomic_store *> (last_stmt (store_bb));
6724 tree loaded_val = gimple_omp_atomic_load_lhs (load);
6725 tree addr = gimple_omp_atomic_load_rhs (load);
6726 tree stored_val = gimple_omp_atomic_store_val (store);
6727 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
6728 HOST_WIDE_INT index;
6729
6730 /* Make sure the type is one of the supported sizes. */
6731 index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
6732 index = exact_log2 (index);
6733 if (index >= 0 && index <= 4)
6734 {
6735 unsigned int align = TYPE_ALIGN_UNIT (type);
6736
6737 /* __sync builtins require strict data alignment. */
6738 if (exact_log2 (align) >= index)
6739 {
6740 /* Atomic load. */
6741 if (loaded_val == stored_val
6742 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
6743 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
6744 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
6745 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
6746 return;
6747
6748 /* Atomic store. */
6749 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
6750 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
6751 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
6752 && store_bb == single_succ (load_bb)
6753 && first_stmt (store_bb) == store
6754 && expand_omp_atomic_store (load_bb, addr, loaded_val,
6755 stored_val, index))
6756 return;
6757
6758 /* When possible, use specialized atomic update functions. */
6759 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
6760 && store_bb == single_succ (load_bb)
6761 && expand_omp_atomic_fetch_op (load_bb, addr,
6762 loaded_val, stored_val, index))
6763 return;
6764
6765 /* If we don't have specialized __sync builtins, try and implement
6766 as a compare and swap loop. */
6767 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
6768 loaded_val, stored_val, index))
6769 return;
6770 }
6771 }
6772
6773 /* The ultimate fallback is wrapping the operation in a mutex. */
6774 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
6775 }
6776
6777 /* Mark the loops inside the kernels region starting at REGION_ENTRY and ending
6778 at REGION_EXIT. */
6779
6780 static void
6781 mark_loops_in_oacc_kernels_region (basic_block region_entry,
6782 basic_block region_exit)
6783 {
6784 struct loop *outer = region_entry->loop_father;
6785 gcc_assert (region_exit == NULL || outer == region_exit->loop_father);
6786
6787 /* Don't parallelize the kernels region if it contains more than one outer
6788 loop. */
6789 unsigned int nr_outer_loops = 0;
6790 struct loop *single_outer = NULL;
6791 for (struct loop *loop = outer->inner; loop != NULL; loop = loop->next)
6792 {
6793 gcc_assert (loop_outer (loop) == outer);
6794
6795 if (!dominated_by_p (CDI_DOMINATORS, loop->header, region_entry))
6796 continue;
6797
6798 if (region_exit != NULL
6799 && dominated_by_p (CDI_DOMINATORS, loop->header, region_exit))
6800 continue;
6801
6802 nr_outer_loops++;
6803 single_outer = loop;
6804 }
6805 if (nr_outer_loops != 1)
6806 return;
6807
6808 for (struct loop *loop = single_outer->inner;
6809 loop != NULL;
6810 loop = loop->inner)
6811 if (loop->next)
6812 return;
6813
6814 /* Mark the loops in the region. */
6815 for (struct loop *loop = single_outer; loop != NULL; loop = loop->inner)
6816 loop->in_oacc_kernels_region = true;
6817 }
6818
6819 /* Types used to pass grid and wortkgroup sizes to kernel invocation. */
6820
6821 struct GTY(()) grid_launch_attributes_trees
6822 {
6823 tree kernel_dim_array_type;
6824 tree kernel_lattrs_dimnum_decl;
6825 tree kernel_lattrs_grid_decl;
6826 tree kernel_lattrs_group_decl;
6827 tree kernel_launch_attributes_type;
6828 };
6829
6830 static GTY(()) struct grid_launch_attributes_trees *grid_attr_trees;
6831
6832 /* Create types used to pass kernel launch attributes to target. */
6833
6834 static void
6835 grid_create_kernel_launch_attr_types (void)
6836 {
6837 if (grid_attr_trees)
6838 return;
6839 grid_attr_trees = ggc_alloc <grid_launch_attributes_trees> ();
6840
6841 tree dim_arr_index_type
6842 = build_index_type (build_int_cst (integer_type_node, 2));
6843 grid_attr_trees->kernel_dim_array_type
6844 = build_array_type (uint32_type_node, dim_arr_index_type);
6845
6846 grid_attr_trees->kernel_launch_attributes_type = make_node (RECORD_TYPE);
6847 grid_attr_trees->kernel_lattrs_dimnum_decl
6848 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("ndim"),
6849 uint32_type_node);
6850 DECL_CHAIN (grid_attr_trees->kernel_lattrs_dimnum_decl) = NULL_TREE;
6851
6852 grid_attr_trees->kernel_lattrs_grid_decl
6853 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("grid_size"),
6854 grid_attr_trees->kernel_dim_array_type);
6855 DECL_CHAIN (grid_attr_trees->kernel_lattrs_grid_decl)
6856 = grid_attr_trees->kernel_lattrs_dimnum_decl;
6857 grid_attr_trees->kernel_lattrs_group_decl
6858 = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("group_size"),
6859 grid_attr_trees->kernel_dim_array_type);
6860 DECL_CHAIN (grid_attr_trees->kernel_lattrs_group_decl)
6861 = grid_attr_trees->kernel_lattrs_grid_decl;
6862 finish_builtin_struct (grid_attr_trees->kernel_launch_attributes_type,
6863 "__gomp_kernel_launch_attributes",
6864 grid_attr_trees->kernel_lattrs_group_decl, NULL_TREE);
6865 }
6866
6867 /* Insert before the current statement in GSI a store of VALUE to INDEX of
6868 array (of type kernel_dim_array_type) FLD_DECL of RANGE_VAR. VALUE must be
6869 of type uint32_type_node. */
6870
6871 static void
6872 grid_insert_store_range_dim (gimple_stmt_iterator *gsi, tree range_var,
6873 tree fld_decl, int index, tree value)
6874 {
6875 tree ref = build4 (ARRAY_REF, uint32_type_node,
6876 build3 (COMPONENT_REF,
6877 grid_attr_trees->kernel_dim_array_type,
6878 range_var, fld_decl, NULL_TREE),
6879 build_int_cst (integer_type_node, index),
6880 NULL_TREE, NULL_TREE);
6881 gsi_insert_before (gsi, gimple_build_assign (ref, value), GSI_SAME_STMT);
6882 }
6883
6884 /* Return a tree representation of a pointer to a structure with grid and
6885 work-group size information. Statements filling that information will be
6886 inserted before GSI, TGT_STMT is the target statement which has the
6887 necessary information in it. */
6888
6889 static tree
6890 grid_get_kernel_launch_attributes (gimple_stmt_iterator *gsi,
6891 gomp_target *tgt_stmt)
6892 {
6893 grid_create_kernel_launch_attr_types ();
6894 tree lattrs = create_tmp_var (grid_attr_trees->kernel_launch_attributes_type,
6895 "__kernel_launch_attrs");
6896
6897 unsigned max_dim = 0;
6898 for (tree clause = gimple_omp_target_clauses (tgt_stmt);
6899 clause;
6900 clause = OMP_CLAUSE_CHAIN (clause))
6901 {
6902 if (OMP_CLAUSE_CODE (clause) != OMP_CLAUSE__GRIDDIM_)
6903 continue;
6904
6905 unsigned dim = OMP_CLAUSE__GRIDDIM__DIMENSION (clause);
6906 max_dim = MAX (dim, max_dim);
6907
6908 grid_insert_store_range_dim (gsi, lattrs,
6909 grid_attr_trees->kernel_lattrs_grid_decl,
6910 dim, OMP_CLAUSE__GRIDDIM__SIZE (clause));
6911 grid_insert_store_range_dim (gsi, lattrs,
6912 grid_attr_trees->kernel_lattrs_group_decl,
6913 dim, OMP_CLAUSE__GRIDDIM__GROUP (clause));
6914 }
6915
6916 tree dimref = build3 (COMPONENT_REF, uint32_type_node, lattrs,
6917 grid_attr_trees->kernel_lattrs_dimnum_decl, NULL_TREE);
6918 gcc_checking_assert (max_dim <= 2);
6919 tree dimensions = build_int_cstu (uint32_type_node, max_dim + 1);
6920 gsi_insert_before (gsi, gimple_build_assign (dimref, dimensions),
6921 GSI_SAME_STMT);
6922 TREE_ADDRESSABLE (lattrs) = 1;
6923 return build_fold_addr_expr (lattrs);
6924 }
6925
6926 /* Build target argument identifier from the DEVICE identifier, value
6927 identifier ID and whether the element also has a SUBSEQUENT_PARAM. */
6928
6929 static tree
6930 get_target_argument_identifier_1 (int device, bool subseqent_param, int id)
6931 {
6932 tree t = build_int_cst (integer_type_node, device);
6933 if (subseqent_param)
6934 t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t,
6935 build_int_cst (integer_type_node,
6936 GOMP_TARGET_ARG_SUBSEQUENT_PARAM));
6937 t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t,
6938 build_int_cst (integer_type_node, id));
6939 return t;
6940 }
6941
6942 /* Like above but return it in type that can be directly stored as an element
6943 of the argument array. */
6944
6945 static tree
6946 get_target_argument_identifier (int device, bool subseqent_param, int id)
6947 {
6948 tree t = get_target_argument_identifier_1 (device, subseqent_param, id);
6949 return fold_convert (ptr_type_node, t);
6950 }
6951
6952 /* Return a target argument consisting of DEVICE identifier, value identifier
6953 ID, and the actual VALUE. */
6954
6955 static tree
6956 get_target_argument_value (gimple_stmt_iterator *gsi, int device, int id,
6957 tree value)
6958 {
6959 tree t = fold_build2 (LSHIFT_EXPR, integer_type_node,
6960 fold_convert (integer_type_node, value),
6961 build_int_cst (unsigned_type_node,
6962 GOMP_TARGET_ARG_VALUE_SHIFT));
6963 t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t,
6964 get_target_argument_identifier_1 (device, false, id));
6965 t = fold_convert (ptr_type_node, t);
6966 return force_gimple_operand_gsi (gsi, t, true, NULL, true, GSI_SAME_STMT);
6967 }
6968
6969 /* If VALUE is an integer constant greater than -2^15 and smaller than 2^15,
6970 push one argument to ARGS with both the DEVICE, ID and VALUE embedded in it,
6971 otherwise push an identifier (with DEVICE and ID) and the VALUE in two
6972 arguments. */
6973
6974 static void
6975 push_target_argument_according_to_value (gimple_stmt_iterator *gsi, int device,
6976 int id, tree value, vec <tree> *args)
6977 {
6978 if (tree_fits_shwi_p (value)
6979 && tree_to_shwi (value) > -(1 << 15)
6980 && tree_to_shwi (value) < (1 << 15))
6981 args->quick_push (get_target_argument_value (gsi, device, id, value));
6982 else
6983 {
6984 args->quick_push (get_target_argument_identifier (device, true, id));
6985 value = fold_convert (ptr_type_node, value);
6986 value = force_gimple_operand_gsi (gsi, value, true, NULL, true,
6987 GSI_SAME_STMT);
6988 args->quick_push (value);
6989 }
6990 }
6991
6992 /* Create an array of arguments that is then passed to GOMP_target. */
6993
6994 static tree
6995 get_target_arguments (gimple_stmt_iterator *gsi, gomp_target *tgt_stmt)
6996 {
6997 auto_vec <tree, 6> args;
6998 tree clauses = gimple_omp_target_clauses (tgt_stmt);
6999 tree t, c = omp_find_clause (clauses, OMP_CLAUSE_NUM_TEAMS);
7000 if (c)
7001 t = OMP_CLAUSE_NUM_TEAMS_EXPR (c);
7002 else
7003 t = integer_minus_one_node;
7004 push_target_argument_according_to_value (gsi, GOMP_TARGET_ARG_DEVICE_ALL,
7005 GOMP_TARGET_ARG_NUM_TEAMS, t, &args);
7006
7007 c = omp_find_clause (clauses, OMP_CLAUSE_THREAD_LIMIT);
7008 if (c)
7009 t = OMP_CLAUSE_THREAD_LIMIT_EXPR (c);
7010 else
7011 t = integer_minus_one_node;
7012 push_target_argument_according_to_value (gsi, GOMP_TARGET_ARG_DEVICE_ALL,
7013 GOMP_TARGET_ARG_THREAD_LIMIT, t,
7014 &args);
7015
7016 /* Add HSA-specific grid sizes, if available. */
7017 if (omp_find_clause (gimple_omp_target_clauses (tgt_stmt),
7018 OMP_CLAUSE__GRIDDIM_))
7019 {
7020 int id = GOMP_TARGET_ARG_HSA_KERNEL_ATTRIBUTES;
7021 t = get_target_argument_identifier (GOMP_DEVICE_HSA, true, id);
7022 args.quick_push (t);
7023 args.quick_push (grid_get_kernel_launch_attributes (gsi, tgt_stmt));
7024 }
7025
7026 /* Produce more, perhaps device specific, arguments here. */
7027
7028 tree argarray = create_tmp_var (build_array_type_nelts (ptr_type_node,
7029 args.length () + 1),
7030 ".omp_target_args");
7031 for (unsigned i = 0; i < args.length (); i++)
7032 {
7033 tree ref = build4 (ARRAY_REF, ptr_type_node, argarray,
7034 build_int_cst (integer_type_node, i),
7035 NULL_TREE, NULL_TREE);
7036 gsi_insert_before (gsi, gimple_build_assign (ref, args[i]),
7037 GSI_SAME_STMT);
7038 }
7039 tree ref = build4 (ARRAY_REF, ptr_type_node, argarray,
7040 build_int_cst (integer_type_node, args.length ()),
7041 NULL_TREE, NULL_TREE);
7042 gsi_insert_before (gsi, gimple_build_assign (ref, null_pointer_node),
7043 GSI_SAME_STMT);
7044 TREE_ADDRESSABLE (argarray) = 1;
7045 return build_fold_addr_expr (argarray);
7046 }
7047
7048 /* Expand the GIMPLE_OMP_TARGET starting at REGION. */
7049
7050 static void
7051 expand_omp_target (struct omp_region *region)
7052 {
7053 basic_block entry_bb, exit_bb, new_bb;
7054 struct function *child_cfun;
7055 tree child_fn, block, t;
7056 gimple_stmt_iterator gsi;
7057 gomp_target *entry_stmt;
7058 gimple *stmt;
7059 edge e;
7060 bool offloaded, data_region;
7061
7062 entry_stmt = as_a <gomp_target *> (last_stmt (region->entry));
7063 new_bb = region->entry;
7064
7065 offloaded = is_gimple_omp_offloaded (entry_stmt);
7066 switch (gimple_omp_target_kind (entry_stmt))
7067 {
7068 case GF_OMP_TARGET_KIND_REGION:
7069 case GF_OMP_TARGET_KIND_UPDATE:
7070 case GF_OMP_TARGET_KIND_ENTER_DATA:
7071 case GF_OMP_TARGET_KIND_EXIT_DATA:
7072 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
7073 case GF_OMP_TARGET_KIND_OACC_KERNELS:
7074 case GF_OMP_TARGET_KIND_OACC_UPDATE:
7075 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
7076 case GF_OMP_TARGET_KIND_OACC_DECLARE:
7077 data_region = false;
7078 break;
7079 case GF_OMP_TARGET_KIND_DATA:
7080 case GF_OMP_TARGET_KIND_OACC_DATA:
7081 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
7082 data_region = true;
7083 break;
7084 default:
7085 gcc_unreachable ();
7086 }
7087
7088 child_fn = NULL_TREE;
7089 child_cfun = NULL;
7090 if (offloaded)
7091 {
7092 child_fn = gimple_omp_target_child_fn (entry_stmt);
7093 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
7094 }
7095
7096 /* Supported by expand_omp_taskreg, but not here. */
7097 if (child_cfun != NULL)
7098 gcc_checking_assert (!child_cfun->cfg);
7099 gcc_checking_assert (!gimple_in_ssa_p (cfun));
7100
7101 entry_bb = region->entry;
7102 exit_bb = region->exit;
7103
7104 if (gimple_omp_target_kind (entry_stmt) == GF_OMP_TARGET_KIND_OACC_KERNELS)
7105 {
7106 mark_loops_in_oacc_kernels_region (region->entry, region->exit);
7107
7108 /* Further down, both OpenACC kernels and OpenACC parallel constructs
7109 will be mappted to BUILT_IN_GOACC_PARALLEL, and to distinguish the
7110 two, there is an "oacc kernels" attribute set for OpenACC kernels. */
7111 DECL_ATTRIBUTES (child_fn)
7112 = tree_cons (get_identifier ("oacc kernels"),
7113 NULL_TREE, DECL_ATTRIBUTES (child_fn));
7114 }
7115
7116 if (offloaded)
7117 {
7118 unsigned srcidx, dstidx, num;
7119
7120 /* If the offloading region needs data sent from the parent
7121 function, then the very first statement (except possible
7122 tree profile counter updates) of the offloading body
7123 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
7124 &.OMP_DATA_O is passed as an argument to the child function,
7125 we need to replace it with the argument as seen by the child
7126 function.
7127
7128 In most cases, this will end up being the identity assignment
7129 .OMP_DATA_I = .OMP_DATA_I. However, if the offloading body had
7130 a function call that has been inlined, the original PARM_DECL
7131 .OMP_DATA_I may have been converted into a different local
7132 variable. In which case, we need to keep the assignment. */
7133 tree data_arg = gimple_omp_target_data_arg (entry_stmt);
7134 if (data_arg)
7135 {
7136 basic_block entry_succ_bb = single_succ (entry_bb);
7137 gimple_stmt_iterator gsi;
7138 tree arg;
7139 gimple *tgtcopy_stmt = NULL;
7140 tree sender = TREE_VEC_ELT (data_arg, 0);
7141
7142 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
7143 {
7144 gcc_assert (!gsi_end_p (gsi));
7145 stmt = gsi_stmt (gsi);
7146 if (gimple_code (stmt) != GIMPLE_ASSIGN)
7147 continue;
7148
7149 if (gimple_num_ops (stmt) == 2)
7150 {
7151 tree arg = gimple_assign_rhs1 (stmt);
7152
7153 /* We're ignoring the subcode because we're
7154 effectively doing a STRIP_NOPS. */
7155
7156 if (TREE_CODE (arg) == ADDR_EXPR
7157 && TREE_OPERAND (arg, 0) == sender)
7158 {
7159 tgtcopy_stmt = stmt;
7160 break;
7161 }
7162 }
7163 }
7164
7165 gcc_assert (tgtcopy_stmt != NULL);
7166 arg = DECL_ARGUMENTS (child_fn);
7167
7168 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
7169 gsi_remove (&gsi, true);
7170 }
7171
7172 /* Declare local variables needed in CHILD_CFUN. */
7173 block = DECL_INITIAL (child_fn);
7174 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
7175 /* The gimplifier could record temporaries in the offloading block
7176 rather than in containing function's local_decls chain,
7177 which would mean cgraph missed finalizing them. Do it now. */
7178 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
7179 if (VAR_P (t) && TREE_STATIC (t) && !DECL_EXTERNAL (t))
7180 varpool_node::finalize_decl (t);
7181 DECL_SAVED_TREE (child_fn) = NULL;
7182 /* We'll create a CFG for child_fn, so no gimple body is needed. */
7183 gimple_set_body (child_fn, NULL);
7184 TREE_USED (block) = 1;
7185
7186 /* Reset DECL_CONTEXT on function arguments. */
7187 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
7188 DECL_CONTEXT (t) = child_fn;
7189
7190 /* Split ENTRY_BB at GIMPLE_*,
7191 so that it can be moved to the child function. */
7192 gsi = gsi_last_bb (entry_bb);
7193 stmt = gsi_stmt (gsi);
7194 gcc_assert (stmt
7195 && gimple_code (stmt) == gimple_code (entry_stmt));
7196 e = split_block (entry_bb, stmt);
7197 gsi_remove (&gsi, true);
7198 entry_bb = e->dest;
7199 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7200
7201 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
7202 if (exit_bb)
7203 {
7204 gsi = gsi_last_bb (exit_bb);
7205 gcc_assert (!gsi_end_p (gsi)
7206 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
7207 stmt = gimple_build_return (NULL);
7208 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
7209 gsi_remove (&gsi, true);
7210 }
7211
7212 /* Make sure to generate early debug for the function before
7213 outlining anything. */
7214 if (! gimple_in_ssa_p (cfun))
7215 (*debug_hooks->early_global_decl) (cfun->decl);
7216
7217 /* Move the offloading region into CHILD_CFUN. */
7218
7219 block = gimple_block (entry_stmt);
7220
7221 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
7222 if (exit_bb)
7223 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
7224 /* When the OMP expansion process cannot guarantee an up-to-date
7225 loop tree arrange for the child function to fixup loops. */
7226 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
7227 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
7228
7229 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
7230 num = vec_safe_length (child_cfun->local_decls);
7231 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
7232 {
7233 t = (*child_cfun->local_decls)[srcidx];
7234 if (DECL_CONTEXT (t) == cfun->decl)
7235 continue;
7236 if (srcidx != dstidx)
7237 (*child_cfun->local_decls)[dstidx] = t;
7238 dstidx++;
7239 }
7240 if (dstidx != num)
7241 vec_safe_truncate (child_cfun->local_decls, dstidx);
7242
7243 /* Inform the callgraph about the new function. */
7244 child_cfun->curr_properties = cfun->curr_properties;
7245 child_cfun->has_simduid_loops |= cfun->has_simduid_loops;
7246 child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops;
7247 cgraph_node *node = cgraph_node::get_create (child_fn);
7248 node->parallelized_function = 1;
7249 cgraph_node::add_new_function (child_fn, true);
7250
7251 /* Add the new function to the offload table. */
7252 if (ENABLE_OFFLOADING)
7253 vec_safe_push (offload_funcs, child_fn);
7254
7255 bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl)
7256 && !DECL_ASSEMBLER_NAME_SET_P (child_fn);
7257
7258 /* Fix the callgraph edges for child_cfun. Those for cfun will be
7259 fixed in a following pass. */
7260 push_cfun (child_cfun);
7261 if (need_asm)
7262 assign_assembler_name_if_needed (child_fn);
7263 cgraph_edge::rebuild_edges ();
7264
7265 /* Some EH regions might become dead, see PR34608. If
7266 pass_cleanup_cfg isn't the first pass to happen with the
7267 new child, these dead EH edges might cause problems.
7268 Clean them up now. */
7269 if (flag_exceptions)
7270 {
7271 basic_block bb;
7272 bool changed = false;
7273
7274 FOR_EACH_BB_FN (bb, cfun)
7275 changed |= gimple_purge_dead_eh_edges (bb);
7276 if (changed)
7277 cleanup_tree_cfg ();
7278 }
7279 if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
7280 verify_loop_structure ();
7281 pop_cfun ();
7282
7283 if (dump_file && !gimple_in_ssa_p (cfun))
7284 {
7285 omp_any_child_fn_dumped = true;
7286 dump_function_header (dump_file, child_fn, dump_flags);
7287 dump_function_to_file (child_fn, dump_file, dump_flags);
7288 }
7289 }
7290
7291 /* Emit a library call to launch the offloading region, or do data
7292 transfers. */
7293 tree t1, t2, t3, t4, device, cond, depend, c, clauses;
7294 enum built_in_function start_ix;
7295 location_t clause_loc;
7296 unsigned int flags_i = 0;
7297
7298 switch (gimple_omp_target_kind (entry_stmt))
7299 {
7300 case GF_OMP_TARGET_KIND_REGION:
7301 start_ix = BUILT_IN_GOMP_TARGET;
7302 break;
7303 case GF_OMP_TARGET_KIND_DATA:
7304 start_ix = BUILT_IN_GOMP_TARGET_DATA;
7305 break;
7306 case GF_OMP_TARGET_KIND_UPDATE:
7307 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
7308 break;
7309 case GF_OMP_TARGET_KIND_ENTER_DATA:
7310 start_ix = BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA;
7311 break;
7312 case GF_OMP_TARGET_KIND_EXIT_DATA:
7313 start_ix = BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA;
7314 flags_i |= GOMP_TARGET_FLAG_EXIT_DATA;
7315 break;
7316 case GF_OMP_TARGET_KIND_OACC_KERNELS:
7317 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
7318 start_ix = BUILT_IN_GOACC_PARALLEL;
7319 break;
7320 case GF_OMP_TARGET_KIND_OACC_DATA:
7321 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
7322 start_ix = BUILT_IN_GOACC_DATA_START;
7323 break;
7324 case GF_OMP_TARGET_KIND_OACC_UPDATE:
7325 start_ix = BUILT_IN_GOACC_UPDATE;
7326 break;
7327 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
7328 start_ix = BUILT_IN_GOACC_ENTER_EXIT_DATA;
7329 break;
7330 case GF_OMP_TARGET_KIND_OACC_DECLARE:
7331 start_ix = BUILT_IN_GOACC_DECLARE;
7332 break;
7333 default:
7334 gcc_unreachable ();
7335 }
7336
7337 clauses = gimple_omp_target_clauses (entry_stmt);
7338
7339 /* By default, the value of DEVICE is GOMP_DEVICE_ICV (let runtime
7340 library choose) and there is no conditional. */
7341 cond = NULL_TREE;
7342 device = build_int_cst (integer_type_node, GOMP_DEVICE_ICV);
7343
7344 c = omp_find_clause (clauses, OMP_CLAUSE_IF);
7345 if (c)
7346 cond = OMP_CLAUSE_IF_EXPR (c);
7347
7348 c = omp_find_clause (clauses, OMP_CLAUSE_DEVICE);
7349 if (c)
7350 {
7351 /* Even if we pass it to all library function calls, it is currently only
7352 defined/used for the OpenMP target ones. */
7353 gcc_checking_assert (start_ix == BUILT_IN_GOMP_TARGET
7354 || start_ix == BUILT_IN_GOMP_TARGET_DATA
7355 || start_ix == BUILT_IN_GOMP_TARGET_UPDATE
7356 || start_ix == BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA);
7357
7358 device = OMP_CLAUSE_DEVICE_ID (c);
7359 clause_loc = OMP_CLAUSE_LOCATION (c);
7360 }
7361 else
7362 clause_loc = gimple_location (entry_stmt);
7363
7364 c = omp_find_clause (clauses, OMP_CLAUSE_NOWAIT);
7365 if (c)
7366 flags_i |= GOMP_TARGET_FLAG_NOWAIT;
7367
7368 /* Ensure 'device' is of the correct type. */
7369 device = fold_convert_loc (clause_loc, integer_type_node, device);
7370
7371 /* If we found the clause 'if (cond)', build
7372 (cond ? device : GOMP_DEVICE_HOST_FALLBACK). */
7373 if (cond)
7374 {
7375 cond = gimple_boolify (cond);
7376
7377 basic_block cond_bb, then_bb, else_bb;
7378 edge e;
7379 tree tmp_var;
7380
7381 tmp_var = create_tmp_var (TREE_TYPE (device));
7382 if (offloaded)
7383 e = split_block_after_labels (new_bb);
7384 else
7385 {
7386 gsi = gsi_last_bb (new_bb);
7387 gsi_prev (&gsi);
7388 e = split_block (new_bb, gsi_stmt (gsi));
7389 }
7390 cond_bb = e->src;
7391 new_bb = e->dest;
7392 remove_edge (e);
7393
7394 then_bb = create_empty_bb (cond_bb);
7395 else_bb = create_empty_bb (then_bb);
7396 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
7397 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
7398
7399 stmt = gimple_build_cond_empty (cond);
7400 gsi = gsi_last_bb (cond_bb);
7401 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7402
7403 gsi = gsi_start_bb (then_bb);
7404 stmt = gimple_build_assign (tmp_var, device);
7405 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7406
7407 gsi = gsi_start_bb (else_bb);
7408 stmt = gimple_build_assign (tmp_var,
7409 build_int_cst (integer_type_node,
7410 GOMP_DEVICE_HOST_FALLBACK));
7411 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7412
7413 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
7414 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
7415 add_bb_to_loop (then_bb, cond_bb->loop_father);
7416 add_bb_to_loop (else_bb, cond_bb->loop_father);
7417 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
7418 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
7419
7420 device = tmp_var;
7421 gsi = gsi_last_bb (new_bb);
7422 }
7423 else
7424 {
7425 gsi = gsi_last_bb (new_bb);
7426 device = force_gimple_operand_gsi (&gsi, device, true, NULL_TREE,
7427 true, GSI_SAME_STMT);
7428 }
7429
7430 t = gimple_omp_target_data_arg (entry_stmt);
7431 if (t == NULL)
7432 {
7433 t1 = size_zero_node;
7434 t2 = build_zero_cst (ptr_type_node);
7435 t3 = t2;
7436 t4 = t2;
7437 }
7438 else
7439 {
7440 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
7441 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
7442 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
7443 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
7444 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
7445 }
7446
7447 gimple *g;
7448 bool tagging = false;
7449 /* The maximum number used by any start_ix, without varargs. */
7450 auto_vec<tree, 11> args;
7451 args.quick_push (device);
7452 if (offloaded)
7453 args.quick_push (build_fold_addr_expr (child_fn));
7454 args.quick_push (t1);
7455 args.quick_push (t2);
7456 args.quick_push (t3);
7457 args.quick_push (t4);
7458 switch (start_ix)
7459 {
7460 case BUILT_IN_GOACC_DATA_START:
7461 case BUILT_IN_GOACC_DECLARE:
7462 case BUILT_IN_GOMP_TARGET_DATA:
7463 break;
7464 case BUILT_IN_GOMP_TARGET:
7465 case BUILT_IN_GOMP_TARGET_UPDATE:
7466 case BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA:
7467 args.quick_push (build_int_cst (unsigned_type_node, flags_i));
7468 c = omp_find_clause (clauses, OMP_CLAUSE_DEPEND);
7469 if (c)
7470 depend = OMP_CLAUSE_DECL (c);
7471 else
7472 depend = build_int_cst (ptr_type_node, 0);
7473 args.quick_push (depend);
7474 if (start_ix == BUILT_IN_GOMP_TARGET)
7475 args.quick_push (get_target_arguments (&gsi, entry_stmt));
7476 break;
7477 case BUILT_IN_GOACC_PARALLEL:
7478 oacc_set_fn_attrib (child_fn, clauses, &args);
7479 tagging = true;
7480 /* FALLTHRU */
7481 case BUILT_IN_GOACC_ENTER_EXIT_DATA:
7482 case BUILT_IN_GOACC_UPDATE:
7483 {
7484 tree t_async = NULL_TREE;
7485
7486 /* If present, use the value specified by the respective
7487 clause, making sure that is of the correct type. */
7488 c = omp_find_clause (clauses, OMP_CLAUSE_ASYNC);
7489 if (c)
7490 t_async = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
7491 integer_type_node,
7492 OMP_CLAUSE_ASYNC_EXPR (c));
7493 else if (!tagging)
7494 /* Default values for t_async. */
7495 t_async = fold_convert_loc (gimple_location (entry_stmt),
7496 integer_type_node,
7497 build_int_cst (integer_type_node,
7498 GOMP_ASYNC_SYNC));
7499 if (tagging && t_async)
7500 {
7501 unsigned HOST_WIDE_INT i_async = GOMP_LAUNCH_OP_MAX;
7502
7503 if (TREE_CODE (t_async) == INTEGER_CST)
7504 {
7505 /* See if we can pack the async arg in to the tag's
7506 operand. */
7507 i_async = TREE_INT_CST_LOW (t_async);
7508 if (i_async < GOMP_LAUNCH_OP_MAX)
7509 t_async = NULL_TREE;
7510 else
7511 i_async = GOMP_LAUNCH_OP_MAX;
7512 }
7513 args.safe_push (oacc_launch_pack (GOMP_LAUNCH_ASYNC, NULL_TREE,
7514 i_async));
7515 }
7516 if (t_async)
7517 args.safe_push (t_async);
7518
7519 /* Save the argument index, and ... */
7520 unsigned t_wait_idx = args.length ();
7521 unsigned num_waits = 0;
7522 c = omp_find_clause (clauses, OMP_CLAUSE_WAIT);
7523 if (!tagging || c)
7524 /* ... push a placeholder. */
7525 args.safe_push (integer_zero_node);
7526
7527 for (; c; c = OMP_CLAUSE_CHAIN (c))
7528 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WAIT)
7529 {
7530 args.safe_push (fold_convert_loc (OMP_CLAUSE_LOCATION (c),
7531 integer_type_node,
7532 OMP_CLAUSE_WAIT_EXPR (c)));
7533 num_waits++;
7534 }
7535
7536 if (!tagging || num_waits)
7537 {
7538 tree len;
7539
7540 /* Now that we know the number, update the placeholder. */
7541 if (tagging)
7542 len = oacc_launch_pack (GOMP_LAUNCH_WAIT, NULL_TREE, num_waits);
7543 else
7544 len = build_int_cst (integer_type_node, num_waits);
7545 len = fold_convert_loc (gimple_location (entry_stmt),
7546 unsigned_type_node, len);
7547 args[t_wait_idx] = len;
7548 }
7549 }
7550 break;
7551 default:
7552 gcc_unreachable ();
7553 }
7554 if (tagging)
7555 /* Push terminal marker - zero. */
7556 args.safe_push (oacc_launch_pack (0, NULL_TREE, 0));
7557
7558 g = gimple_build_call_vec (builtin_decl_explicit (start_ix), args);
7559 gimple_set_location (g, gimple_location (entry_stmt));
7560 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
7561 if (!offloaded)
7562 {
7563 g = gsi_stmt (gsi);
7564 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
7565 gsi_remove (&gsi, true);
7566 }
7567 if (data_region && region->exit)
7568 {
7569 gsi = gsi_last_bb (region->exit);
7570 g = gsi_stmt (gsi);
7571 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
7572 gsi_remove (&gsi, true);
7573 }
7574 }
7575
7576 /* Expand KFOR loop as a HSA grifidied kernel, i.e. as a body only with
7577 iteration variable derived from the thread number. INTRA_GROUP means this
7578 is an expansion of a loop iterating over work-items within a separate
7579 iteration over groups. */
7580
7581 static void
7582 grid_expand_omp_for_loop (struct omp_region *kfor, bool intra_group)
7583 {
7584 gimple_stmt_iterator gsi;
7585 gomp_for *for_stmt = as_a <gomp_for *> (last_stmt (kfor->entry));
7586 gcc_checking_assert (gimple_omp_for_kind (for_stmt)
7587 == GF_OMP_FOR_KIND_GRID_LOOP);
7588 size_t collapse = gimple_omp_for_collapse (for_stmt);
7589 struct omp_for_data_loop *loops
7590 = XALLOCAVEC (struct omp_for_data_loop,
7591 gimple_omp_for_collapse (for_stmt));
7592 struct omp_for_data fd;
7593
7594 remove_edge (BRANCH_EDGE (kfor->entry));
7595 basic_block body_bb = FALLTHRU_EDGE (kfor->entry)->dest;
7596
7597 gcc_assert (kfor->cont);
7598 omp_extract_for_data (for_stmt, &fd, loops);
7599
7600 gsi = gsi_start_bb (body_bb);
7601
7602 for (size_t dim = 0; dim < collapse; dim++)
7603 {
7604 tree type, itype;
7605 itype = type = TREE_TYPE (fd.loops[dim].v);
7606 if (POINTER_TYPE_P (type))
7607 itype = signed_type_for (type);
7608
7609 tree n1 = fd.loops[dim].n1;
7610 tree step = fd.loops[dim].step;
7611 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
7612 true, NULL_TREE, true, GSI_SAME_STMT);
7613 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
7614 true, NULL_TREE, true, GSI_SAME_STMT);
7615 tree threadid;
7616 if (gimple_omp_for_grid_group_iter (for_stmt))
7617 {
7618 gcc_checking_assert (!intra_group);
7619 threadid = build_call_expr (builtin_decl_explicit
7620 (BUILT_IN_HSA_WORKGROUPID), 1,
7621 build_int_cstu (unsigned_type_node, dim));
7622 }
7623 else if (intra_group)
7624 threadid = build_call_expr (builtin_decl_explicit
7625 (BUILT_IN_HSA_WORKITEMID), 1,
7626 build_int_cstu (unsigned_type_node, dim));
7627 else
7628 threadid = build_call_expr (builtin_decl_explicit
7629 (BUILT_IN_HSA_WORKITEMABSID), 1,
7630 build_int_cstu (unsigned_type_node, dim));
7631 threadid = fold_convert (itype, threadid);
7632 threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
7633 true, GSI_SAME_STMT);
7634
7635 tree startvar = fd.loops[dim].v;
7636 tree t = fold_build2 (MULT_EXPR, itype, threadid, step);
7637 if (POINTER_TYPE_P (type))
7638 t = fold_build_pointer_plus (n1, t);
7639 else
7640 t = fold_build2 (PLUS_EXPR, type, t, n1);
7641 t = fold_convert (type, t);
7642 t = force_gimple_operand_gsi (&gsi, t,
7643 DECL_P (startvar)
7644 && TREE_ADDRESSABLE (startvar),
7645 NULL_TREE, true, GSI_SAME_STMT);
7646 gassign *assign_stmt = gimple_build_assign (startvar, t);
7647 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
7648 }
7649 /* Remove the omp for statement. */
7650 gsi = gsi_last_bb (kfor->entry);
7651 gsi_remove (&gsi, true);
7652
7653 /* Remove the GIMPLE_OMP_CONTINUE statement. */
7654 gsi = gsi_last_bb (kfor->cont);
7655 gcc_assert (!gsi_end_p (gsi)
7656 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_CONTINUE);
7657 gsi_remove (&gsi, true);
7658
7659 /* Replace the GIMPLE_OMP_RETURN with a barrier, if necessary. */
7660 gsi = gsi_last_bb (kfor->exit);
7661 gcc_assert (!gsi_end_p (gsi)
7662 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
7663 if (intra_group)
7664 gsi_insert_before (&gsi, omp_build_barrier (NULL_TREE), GSI_SAME_STMT);
7665 gsi_remove (&gsi, true);
7666
7667 /* Fixup the much simpler CFG. */
7668 remove_edge (find_edge (kfor->cont, body_bb));
7669
7670 if (kfor->cont != body_bb)
7671 set_immediate_dominator (CDI_DOMINATORS, kfor->cont, body_bb);
7672 set_immediate_dominator (CDI_DOMINATORS, kfor->exit, kfor->cont);
7673 }
7674
7675 /* Structure passed to grid_remap_kernel_arg_accesses so that it can remap
7676 argument_decls. */
7677
7678 struct grid_arg_decl_map
7679 {
7680 tree old_arg;
7681 tree new_arg;
7682 };
7683
7684 /* Invoked through walk_gimple_op, will remap all PARM_DECLs to the ones
7685 pertaining to kernel function. */
7686
7687 static tree
7688 grid_remap_kernel_arg_accesses (tree *tp, int *walk_subtrees, void *data)
7689 {
7690 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
7691 struct grid_arg_decl_map *adm = (struct grid_arg_decl_map *) wi->info;
7692 tree t = *tp;
7693
7694 if (t == adm->old_arg)
7695 *tp = adm->new_arg;
7696 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
7697 return NULL_TREE;
7698 }
7699
7700 /* If TARGET region contains a kernel body for loop, remove its region from the
7701 TARGET and expand it in HSA gridified kernel fashion. */
7702
7703 static void
7704 grid_expand_target_grid_body (struct omp_region *target)
7705 {
7706 if (!hsa_gen_requested_p ())
7707 return;
7708
7709 gomp_target *tgt_stmt = as_a <gomp_target *> (last_stmt (target->entry));
7710 struct omp_region **pp;
7711
7712 for (pp = &target->inner; *pp; pp = &(*pp)->next)
7713 if ((*pp)->type == GIMPLE_OMP_GRID_BODY)
7714 break;
7715
7716 struct omp_region *gpukernel = *pp;
7717
7718 tree orig_child_fndecl = gimple_omp_target_child_fn (tgt_stmt);
7719 if (!gpukernel)
7720 {
7721 /* HSA cannot handle OACC stuff. */
7722 if (gimple_omp_target_kind (tgt_stmt) != GF_OMP_TARGET_KIND_REGION)
7723 return;
7724 gcc_checking_assert (orig_child_fndecl);
7725 gcc_assert (!omp_find_clause (gimple_omp_target_clauses (tgt_stmt),
7726 OMP_CLAUSE__GRIDDIM_));
7727 cgraph_node *n = cgraph_node::get (orig_child_fndecl);
7728
7729 hsa_register_kernel (n);
7730 return;
7731 }
7732
7733 gcc_assert (omp_find_clause (gimple_omp_target_clauses (tgt_stmt),
7734 OMP_CLAUSE__GRIDDIM_));
7735 tree inside_block
7736 = gimple_block (first_stmt (single_succ (gpukernel->entry)));
7737 *pp = gpukernel->next;
7738 for (pp = &gpukernel->inner; *pp; pp = &(*pp)->next)
7739 if ((*pp)->type == GIMPLE_OMP_FOR)
7740 break;
7741
7742 struct omp_region *kfor = *pp;
7743 gcc_assert (kfor);
7744 gomp_for *for_stmt = as_a <gomp_for *> (last_stmt (kfor->entry));
7745 gcc_assert (gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_GRID_LOOP);
7746 *pp = kfor->next;
7747 if (kfor->inner)
7748 {
7749 if (gimple_omp_for_grid_group_iter (for_stmt))
7750 {
7751 struct omp_region **next_pp;
7752 for (pp = &kfor->inner; *pp; pp = next_pp)
7753 {
7754 next_pp = &(*pp)->next;
7755 if ((*pp)->type != GIMPLE_OMP_FOR)
7756 continue;
7757 gomp_for *inner = as_a <gomp_for *> (last_stmt ((*pp)->entry));
7758 gcc_assert (gimple_omp_for_kind (inner)
7759 == GF_OMP_FOR_KIND_GRID_LOOP);
7760 grid_expand_omp_for_loop (*pp, true);
7761 *pp = (*pp)->next;
7762 next_pp = pp;
7763 }
7764 }
7765 expand_omp (kfor->inner);
7766 }
7767 if (gpukernel->inner)
7768 expand_omp (gpukernel->inner);
7769
7770 tree kern_fndecl = copy_node (orig_child_fndecl);
7771 DECL_NAME (kern_fndecl) = clone_function_name (kern_fndecl, "kernel");
7772 SET_DECL_ASSEMBLER_NAME (kern_fndecl, DECL_NAME (kern_fndecl));
7773 tree tgtblock = gimple_block (tgt_stmt);
7774 tree fniniblock = make_node (BLOCK);
7775 BLOCK_ABSTRACT_ORIGIN (fniniblock) = tgtblock;
7776 BLOCK_SOURCE_LOCATION (fniniblock) = BLOCK_SOURCE_LOCATION (tgtblock);
7777 BLOCK_SOURCE_END_LOCATION (fniniblock) = BLOCK_SOURCE_END_LOCATION (tgtblock);
7778 BLOCK_SUPERCONTEXT (fniniblock) = kern_fndecl;
7779 DECL_INITIAL (kern_fndecl) = fniniblock;
7780 push_struct_function (kern_fndecl);
7781 cfun->function_end_locus = gimple_location (tgt_stmt);
7782 init_tree_ssa (cfun);
7783 pop_cfun ();
7784
7785 /* Make sure to generate early debug for the function before
7786 outlining anything. */
7787 if (! gimple_in_ssa_p (cfun))
7788 (*debug_hooks->early_global_decl) (cfun->decl);
7789
7790 tree old_parm_decl = DECL_ARGUMENTS (kern_fndecl);
7791 gcc_assert (!DECL_CHAIN (old_parm_decl));
7792 tree new_parm_decl = copy_node (DECL_ARGUMENTS (kern_fndecl));
7793 DECL_CONTEXT (new_parm_decl) = kern_fndecl;
7794 DECL_ARGUMENTS (kern_fndecl) = new_parm_decl;
7795 gcc_assert (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (kern_fndecl))));
7796 DECL_RESULT (kern_fndecl) = copy_node (DECL_RESULT (kern_fndecl));
7797 DECL_CONTEXT (DECL_RESULT (kern_fndecl)) = kern_fndecl;
7798 struct function *kern_cfun = DECL_STRUCT_FUNCTION (kern_fndecl);
7799 kern_cfun->curr_properties = cfun->curr_properties;
7800
7801 grid_expand_omp_for_loop (kfor, false);
7802
7803 /* Remove the omp for statement. */
7804 gimple_stmt_iterator gsi = gsi_last_bb (gpukernel->entry);
7805 gsi_remove (&gsi, true);
7806 /* Replace the GIMPLE_OMP_RETURN at the end of the kernel region with a real
7807 return. */
7808 gsi = gsi_last_bb (gpukernel->exit);
7809 gcc_assert (!gsi_end_p (gsi)
7810 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
7811 gimple *ret_stmt = gimple_build_return (NULL);
7812 gsi_insert_after (&gsi, ret_stmt, GSI_SAME_STMT);
7813 gsi_remove (&gsi, true);
7814
7815 /* Statements in the first BB in the target construct have been produced by
7816 target lowering and must be copied inside the GPUKERNEL, with the two
7817 exceptions of the first OMP statement and the OMP_DATA assignment
7818 statement. */
7819 gsi = gsi_start_bb (single_succ (gpukernel->entry));
7820 tree data_arg = gimple_omp_target_data_arg (tgt_stmt);
7821 tree sender = data_arg ? TREE_VEC_ELT (data_arg, 0) : NULL;
7822 for (gimple_stmt_iterator tsi = gsi_start_bb (single_succ (target->entry));
7823 !gsi_end_p (tsi); gsi_next (&tsi))
7824 {
7825 gimple *stmt = gsi_stmt (tsi);
7826 if (is_gimple_omp (stmt))
7827 break;
7828 if (sender
7829 && is_gimple_assign (stmt)
7830 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
7831 && TREE_OPERAND (gimple_assign_rhs1 (stmt), 0) == sender)
7832 continue;
7833 gimple *copy = gimple_copy (stmt);
7834 gsi_insert_before (&gsi, copy, GSI_SAME_STMT);
7835 gimple_set_block (copy, fniniblock);
7836 }
7837
7838 move_sese_region_to_fn (kern_cfun, single_succ (gpukernel->entry),
7839 gpukernel->exit, inside_block);
7840
7841 cgraph_node *kcn = cgraph_node::get_create (kern_fndecl);
7842 kcn->mark_force_output ();
7843 cgraph_node *orig_child = cgraph_node::get (orig_child_fndecl);
7844
7845 hsa_register_kernel (kcn, orig_child);
7846
7847 cgraph_node::add_new_function (kern_fndecl, true);
7848 push_cfun (kern_cfun);
7849 cgraph_edge::rebuild_edges ();
7850
7851 /* Re-map any mention of the PARM_DECL of the original function to the
7852 PARM_DECL of the new one.
7853
7854 TODO: It would be great if lowering produced references into the GPU
7855 kernel decl straight away and we did not have to do this. */
7856 struct grid_arg_decl_map adm;
7857 adm.old_arg = old_parm_decl;
7858 adm.new_arg = new_parm_decl;
7859 basic_block bb;
7860 FOR_EACH_BB_FN (bb, kern_cfun)
7861 {
7862 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
7863 {
7864 gimple *stmt = gsi_stmt (gsi);
7865 struct walk_stmt_info wi;
7866 memset (&wi, 0, sizeof (wi));
7867 wi.info = &adm;
7868 walk_gimple_op (stmt, grid_remap_kernel_arg_accesses, &wi);
7869 }
7870 }
7871 pop_cfun ();
7872
7873 return;
7874 }
7875
7876 /* Expand the parallel region tree rooted at REGION. Expansion
7877 proceeds in depth-first order. Innermost regions are expanded
7878 first. This way, parallel regions that require a new function to
7879 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
7880 internal dependencies in their body. */
7881
7882 static void
7883 expand_omp (struct omp_region *region)
7884 {
7885 omp_any_child_fn_dumped = false;
7886 while (region)
7887 {
7888 location_t saved_location;
7889 gimple *inner_stmt = NULL;
7890
7891 /* First, determine whether this is a combined parallel+workshare
7892 region. */
7893 if (region->type == GIMPLE_OMP_PARALLEL)
7894 determine_parallel_type (region);
7895 else if (region->type == GIMPLE_OMP_TARGET)
7896 grid_expand_target_grid_body (region);
7897
7898 if (region->type == GIMPLE_OMP_FOR
7899 && gimple_omp_for_combined_p (last_stmt (region->entry)))
7900 inner_stmt = last_stmt (region->inner->entry);
7901
7902 if (region->inner)
7903 expand_omp (region->inner);
7904
7905 saved_location = input_location;
7906 if (gimple_has_location (last_stmt (region->entry)))
7907 input_location = gimple_location (last_stmt (region->entry));
7908
7909 switch (region->type)
7910 {
7911 case GIMPLE_OMP_PARALLEL:
7912 case GIMPLE_OMP_TASK:
7913 expand_omp_taskreg (region);
7914 break;
7915
7916 case GIMPLE_OMP_FOR:
7917 expand_omp_for (region, inner_stmt);
7918 break;
7919
7920 case GIMPLE_OMP_SECTIONS:
7921 expand_omp_sections (region);
7922 break;
7923
7924 case GIMPLE_OMP_SECTION:
7925 /* Individual omp sections are handled together with their
7926 parent GIMPLE_OMP_SECTIONS region. */
7927 break;
7928
7929 case GIMPLE_OMP_SINGLE:
7930 expand_omp_single (region);
7931 break;
7932
7933 case GIMPLE_OMP_ORDERED:
7934 {
7935 gomp_ordered *ord_stmt
7936 = as_a <gomp_ordered *> (last_stmt (region->entry));
7937 if (omp_find_clause (gimple_omp_ordered_clauses (ord_stmt),
7938 OMP_CLAUSE_DEPEND))
7939 {
7940 /* We'll expand these when expanding corresponding
7941 worksharing region with ordered(n) clause. */
7942 gcc_assert (region->outer
7943 && region->outer->type == GIMPLE_OMP_FOR);
7944 region->ord_stmt = ord_stmt;
7945 break;
7946 }
7947 }
7948 /* FALLTHRU */
7949 case GIMPLE_OMP_MASTER:
7950 case GIMPLE_OMP_TASKGROUP:
7951 case GIMPLE_OMP_CRITICAL:
7952 case GIMPLE_OMP_TEAMS:
7953 expand_omp_synch (region);
7954 break;
7955
7956 case GIMPLE_OMP_ATOMIC_LOAD:
7957 expand_omp_atomic (region);
7958 break;
7959
7960 case GIMPLE_OMP_TARGET:
7961 expand_omp_target (region);
7962 break;
7963
7964 default:
7965 gcc_unreachable ();
7966 }
7967
7968 input_location = saved_location;
7969 region = region->next;
7970 }
7971 if (omp_any_child_fn_dumped)
7972 {
7973 if (dump_file)
7974 dump_function_header (dump_file, current_function_decl, dump_flags);
7975 omp_any_child_fn_dumped = false;
7976 }
7977 }
7978
7979 /* Helper for build_omp_regions. Scan the dominator tree starting at
7980 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
7981 true, the function ends once a single tree is built (otherwise, whole
7982 forest of OMP constructs may be built). */
7983
7984 static void
7985 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
7986 bool single_tree)
7987 {
7988 gimple_stmt_iterator gsi;
7989 gimple *stmt;
7990 basic_block son;
7991
7992 gsi = gsi_last_bb (bb);
7993 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
7994 {
7995 struct omp_region *region;
7996 enum gimple_code code;
7997
7998 stmt = gsi_stmt (gsi);
7999 code = gimple_code (stmt);
8000 if (code == GIMPLE_OMP_RETURN)
8001 {
8002 /* STMT is the return point out of region PARENT. Mark it
8003 as the exit point and make PARENT the immediately
8004 enclosing region. */
8005 gcc_assert (parent);
8006 region = parent;
8007 region->exit = bb;
8008 parent = parent->outer;
8009 }
8010 else if (code == GIMPLE_OMP_ATOMIC_STORE)
8011 {
8012 /* GIMPLE_OMP_ATOMIC_STORE is analogous to
8013 GIMPLE_OMP_RETURN, but matches with
8014 GIMPLE_OMP_ATOMIC_LOAD. */
8015 gcc_assert (parent);
8016 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
8017 region = parent;
8018 region->exit = bb;
8019 parent = parent->outer;
8020 }
8021 else if (code == GIMPLE_OMP_CONTINUE)
8022 {
8023 gcc_assert (parent);
8024 parent->cont = bb;
8025 }
8026 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
8027 {
8028 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
8029 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
8030 }
8031 else
8032 {
8033 region = new_omp_region (bb, code, parent);
8034 /* Otherwise... */
8035 if (code == GIMPLE_OMP_TARGET)
8036 {
8037 switch (gimple_omp_target_kind (stmt))
8038 {
8039 case GF_OMP_TARGET_KIND_REGION:
8040 case GF_OMP_TARGET_KIND_DATA:
8041 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
8042 case GF_OMP_TARGET_KIND_OACC_KERNELS:
8043 case GF_OMP_TARGET_KIND_OACC_DATA:
8044 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
8045 break;
8046 case GF_OMP_TARGET_KIND_UPDATE:
8047 case GF_OMP_TARGET_KIND_ENTER_DATA:
8048 case GF_OMP_TARGET_KIND_EXIT_DATA:
8049 case GF_OMP_TARGET_KIND_OACC_UPDATE:
8050 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
8051 case GF_OMP_TARGET_KIND_OACC_DECLARE:
8052 /* ..., other than for those stand-alone directives... */
8053 region = NULL;
8054 break;
8055 default:
8056 gcc_unreachable ();
8057 }
8058 }
8059 else if (code == GIMPLE_OMP_ORDERED
8060 && omp_find_clause (gimple_omp_ordered_clauses
8061 (as_a <gomp_ordered *> (stmt)),
8062 OMP_CLAUSE_DEPEND))
8063 /* #pragma omp ordered depend is also just a stand-alone
8064 directive. */
8065 region = NULL;
8066 /* ..., this directive becomes the parent for a new region. */
8067 if (region)
8068 parent = region;
8069 }
8070 }
8071
8072 if (single_tree && !parent)
8073 return;
8074
8075 for (son = first_dom_son (CDI_DOMINATORS, bb);
8076 son;
8077 son = next_dom_son (CDI_DOMINATORS, son))
8078 build_omp_regions_1 (son, parent, single_tree);
8079 }
8080
8081 /* Builds the tree of OMP regions rooted at ROOT, storing it to
8082 root_omp_region. */
8083
8084 static void
8085 build_omp_regions_root (basic_block root)
8086 {
8087 gcc_assert (root_omp_region == NULL);
8088 build_omp_regions_1 (root, NULL, true);
8089 gcc_assert (root_omp_region != NULL);
8090 }
8091
8092 /* Expands omp construct (and its subconstructs) starting in HEAD. */
8093
8094 void
8095 omp_expand_local (basic_block head)
8096 {
8097 build_omp_regions_root (head);
8098 if (dump_file && (dump_flags & TDF_DETAILS))
8099 {
8100 fprintf (dump_file, "\nOMP region tree\n\n");
8101 dump_omp_region (dump_file, root_omp_region, 0);
8102 fprintf (dump_file, "\n");
8103 }
8104
8105 remove_exit_barriers (root_omp_region);
8106 expand_omp (root_omp_region);
8107
8108 omp_free_regions ();
8109 }
8110
8111 /* Scan the CFG and build a tree of OMP regions. Return the root of
8112 the OMP region tree. */
8113
8114 static void
8115 build_omp_regions (void)
8116 {
8117 gcc_assert (root_omp_region == NULL);
8118 calculate_dominance_info (CDI_DOMINATORS);
8119 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
8120 }
8121
8122 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
8123
8124 static unsigned int
8125 execute_expand_omp (void)
8126 {
8127 build_omp_regions ();
8128
8129 if (!root_omp_region)
8130 return 0;
8131
8132 if (dump_file)
8133 {
8134 fprintf (dump_file, "\nOMP region tree\n\n");
8135 dump_omp_region (dump_file, root_omp_region, 0);
8136 fprintf (dump_file, "\n");
8137 }
8138
8139 remove_exit_barriers (root_omp_region);
8140
8141 expand_omp (root_omp_region);
8142
8143 if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
8144 verify_loop_structure ();
8145 cleanup_tree_cfg ();
8146
8147 omp_free_regions ();
8148
8149 return 0;
8150 }
8151
8152 /* OMP expansion -- the default pass, run before creation of SSA form. */
8153
8154 namespace {
8155
8156 const pass_data pass_data_expand_omp =
8157 {
8158 GIMPLE_PASS, /* type */
8159 "ompexp", /* name */
8160 OPTGROUP_OMP, /* optinfo_flags */
8161 TV_NONE, /* tv_id */
8162 PROP_gimple_any, /* properties_required */
8163 PROP_gimple_eomp, /* properties_provided */
8164 0, /* properties_destroyed */
8165 0, /* todo_flags_start */
8166 0, /* todo_flags_finish */
8167 };
8168
8169 class pass_expand_omp : public gimple_opt_pass
8170 {
8171 public:
8172 pass_expand_omp (gcc::context *ctxt)
8173 : gimple_opt_pass (pass_data_expand_omp, ctxt)
8174 {}
8175
8176 /* opt_pass methods: */
8177 virtual unsigned int execute (function *)
8178 {
8179 bool gate = ((flag_cilkplus != 0 || flag_openacc != 0 || flag_openmp != 0
8180 || flag_openmp_simd != 0)
8181 && !seen_error ());
8182
8183 /* This pass always runs, to provide PROP_gimple_eomp.
8184 But often, there is nothing to do. */
8185 if (!gate)
8186 return 0;
8187
8188 return execute_expand_omp ();
8189 }
8190
8191 }; // class pass_expand_omp
8192
8193 } // anon namespace
8194
8195 gimple_opt_pass *
8196 make_pass_expand_omp (gcc::context *ctxt)
8197 {
8198 return new pass_expand_omp (ctxt);
8199 }
8200
8201 namespace {
8202
8203 const pass_data pass_data_expand_omp_ssa =
8204 {
8205 GIMPLE_PASS, /* type */
8206 "ompexpssa", /* name */
8207 OPTGROUP_OMP, /* optinfo_flags */
8208 TV_NONE, /* tv_id */
8209 PROP_cfg | PROP_ssa, /* properties_required */
8210 PROP_gimple_eomp, /* properties_provided */
8211 0, /* properties_destroyed */
8212 0, /* todo_flags_start */
8213 TODO_cleanup_cfg | TODO_rebuild_alias, /* todo_flags_finish */
8214 };
8215
8216 class pass_expand_omp_ssa : public gimple_opt_pass
8217 {
8218 public:
8219 pass_expand_omp_ssa (gcc::context *ctxt)
8220 : gimple_opt_pass (pass_data_expand_omp_ssa, ctxt)
8221 {}
8222
8223 /* opt_pass methods: */
8224 virtual bool gate (function *fun)
8225 {
8226 return !(fun->curr_properties & PROP_gimple_eomp);
8227 }
8228 virtual unsigned int execute (function *) { return execute_expand_omp (); }
8229 opt_pass * clone () { return new pass_expand_omp_ssa (m_ctxt); }
8230
8231 }; // class pass_expand_omp_ssa
8232
8233 } // anon namespace
8234
8235 gimple_opt_pass *
8236 make_pass_expand_omp_ssa (gcc::context *ctxt)
8237 {
8238 return new pass_expand_omp_ssa (ctxt);
8239 }
8240
8241 /* Called from tree-cfg.c::make_edges to create cfg edges for all relevant
8242 GIMPLE_* codes. */
8243
8244 bool
8245 omp_make_gimple_edges (basic_block bb, struct omp_region **region,
8246 int *region_idx)
8247 {
8248 gimple *last = last_stmt (bb);
8249 enum gimple_code code = gimple_code (last);
8250 struct omp_region *cur_region = *region;
8251 bool fallthru = false;
8252
8253 switch (code)
8254 {
8255 case GIMPLE_OMP_PARALLEL:
8256 case GIMPLE_OMP_TASK:
8257 case GIMPLE_OMP_FOR:
8258 case GIMPLE_OMP_SINGLE:
8259 case GIMPLE_OMP_TEAMS:
8260 case GIMPLE_OMP_MASTER:
8261 case GIMPLE_OMP_TASKGROUP:
8262 case GIMPLE_OMP_CRITICAL:
8263 case GIMPLE_OMP_SECTION:
8264 case GIMPLE_OMP_GRID_BODY:
8265 cur_region = new_omp_region (bb, code, cur_region);
8266 fallthru = true;
8267 break;
8268
8269 case GIMPLE_OMP_ORDERED:
8270 cur_region = new_omp_region (bb, code, cur_region);
8271 fallthru = true;
8272 if (omp_find_clause (gimple_omp_ordered_clauses
8273 (as_a <gomp_ordered *> (last)),
8274 OMP_CLAUSE_DEPEND))
8275 cur_region = cur_region->outer;
8276 break;
8277
8278 case GIMPLE_OMP_TARGET:
8279 cur_region = new_omp_region (bb, code, cur_region);
8280 fallthru = true;
8281 switch (gimple_omp_target_kind (last))
8282 {
8283 case GF_OMP_TARGET_KIND_REGION:
8284 case GF_OMP_TARGET_KIND_DATA:
8285 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
8286 case GF_OMP_TARGET_KIND_OACC_KERNELS:
8287 case GF_OMP_TARGET_KIND_OACC_DATA:
8288 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
8289 break;
8290 case GF_OMP_TARGET_KIND_UPDATE:
8291 case GF_OMP_TARGET_KIND_ENTER_DATA:
8292 case GF_OMP_TARGET_KIND_EXIT_DATA:
8293 case GF_OMP_TARGET_KIND_OACC_UPDATE:
8294 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
8295 case GF_OMP_TARGET_KIND_OACC_DECLARE:
8296 cur_region = cur_region->outer;
8297 break;
8298 default:
8299 gcc_unreachable ();
8300 }
8301 break;
8302
8303 case GIMPLE_OMP_SECTIONS:
8304 cur_region = new_omp_region (bb, code, cur_region);
8305 fallthru = true;
8306 break;
8307
8308 case GIMPLE_OMP_SECTIONS_SWITCH:
8309 fallthru = false;
8310 break;
8311
8312 case GIMPLE_OMP_ATOMIC_LOAD:
8313 case GIMPLE_OMP_ATOMIC_STORE:
8314 fallthru = true;
8315 break;
8316
8317 case GIMPLE_OMP_RETURN:
8318 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
8319 somewhere other than the next block. This will be
8320 created later. */
8321 cur_region->exit = bb;
8322 if (cur_region->type == GIMPLE_OMP_TASK)
8323 /* Add an edge corresponding to not scheduling the task
8324 immediately. */
8325 make_edge (cur_region->entry, bb, EDGE_ABNORMAL);
8326 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
8327 cur_region = cur_region->outer;
8328 break;
8329
8330 case GIMPLE_OMP_CONTINUE:
8331 cur_region->cont = bb;
8332 switch (cur_region->type)
8333 {
8334 case GIMPLE_OMP_FOR:
8335 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
8336 succs edges as abnormal to prevent splitting
8337 them. */
8338 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
8339 /* Make the loopback edge. */
8340 make_edge (bb, single_succ (cur_region->entry),
8341 EDGE_ABNORMAL);
8342
8343 /* Create an edge from GIMPLE_OMP_FOR to exit, which
8344 corresponds to the case that the body of the loop
8345 is not executed at all. */
8346 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
8347 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
8348 fallthru = false;
8349 break;
8350
8351 case GIMPLE_OMP_SECTIONS:
8352 /* Wire up the edges into and out of the nested sections. */
8353 {
8354 basic_block switch_bb = single_succ (cur_region->entry);
8355
8356 struct omp_region *i;
8357 for (i = cur_region->inner; i ; i = i->next)
8358 {
8359 gcc_assert (i->type == GIMPLE_OMP_SECTION);
8360 make_edge (switch_bb, i->entry, 0);
8361 make_edge (i->exit, bb, EDGE_FALLTHRU);
8362 }
8363
8364 /* Make the loopback edge to the block with
8365 GIMPLE_OMP_SECTIONS_SWITCH. */
8366 make_edge (bb, switch_bb, 0);
8367
8368 /* Make the edge from the switch to exit. */
8369 make_edge (switch_bb, bb->next_bb, 0);
8370 fallthru = false;
8371 }
8372 break;
8373
8374 case GIMPLE_OMP_TASK:
8375 fallthru = true;
8376 break;
8377
8378 default:
8379 gcc_unreachable ();
8380 }
8381 break;
8382
8383 default:
8384 gcc_unreachable ();
8385 }
8386
8387 if (*region != cur_region)
8388 {
8389 *region = cur_region;
8390 if (cur_region)
8391 *region_idx = cur_region->entry->index;
8392 else
8393 *region_idx = 0;
8394 }
8395
8396 return fallthru;
8397 }
8398
8399 #include "gt-omp-expand.h"