]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/omp-low.c
[multiple changes]
[thirdparty/gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
23 02110-1301, USA. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "tree-gimple.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic.h"
35 #include "tree-flow.h"
36 #include "timevar.h"
37 #include "flags.h"
38 #include "function.h"
39 #include "expr.h"
40 #include "toplev.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44
45
46 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
47 phases. The first phase scans the function looking for OMP statements
48 and then for variables that must be replaced to satisfy data sharing
49 clauses. The second phase expands code for the constructs, as well as
50 re-gimplifing things when variables have been replaced with complex
51 expressions.
52
53 Lowering of a parallel statement results in the contents of the
54 parallel being moved to a new function, to be invoked by the thread
55 library. The variable remapping process is complex enough that only
56 one level of parallel statement is handled at one time. If there are
57 nested parallel statements, those nested statements are handled when
58 the new function is lowered and optimized. The result is not 100%
59 optimal, but lexically nested parallels effectively only happens in
60 test suites. */
61
62 /* Context structure. Used to store information about each parallel
63 directive in the code. */
64
65 typedef struct omp_context
66 {
67 /* This field must be at the beginning, as we do "inheritance": Some
68 callback functions for tree-inline.c (e.g., omp_copy_decl)
69 receive a copy_body_data pointer that is up-casted to an
70 omp_context pointer. */
71 copy_body_data cb;
72
73 /* The tree of contexts corresponding to the encountered constructs. */
74 struct omp_context *outer;
75 tree stmt;
76
77 /* Map variables to fields in a structure that allows communication
78 between sending and receiving threads. */
79 splay_tree field_map;
80 tree record_type;
81 tree sender_decl;
82 tree receiver_decl;
83
84 /* A chain of variables to add to the top-level block surrounding the
85 construct. In the case of a parallel, this is in the child function. */
86 tree block_vars;
87
88 /* What to do with variables with implicitly determined sharing
89 attributes. */
90 enum omp_clause_default_kind default_kind;
91
92 /* Nesting depth of this context. Used to beautify error messages re
93 invalid gotos. The outermost ctx is depth 1, with depth 0 being
94 reserved for the main body of the function. */
95 int depth;
96
97 /* Type of parallel construct. Used to distinguish regular parallel
98 regions from combined parallel+workshare directives (parallel,
99 parallel loop and parallel sections). */
100 enum omp_parallel_type parallel_type;
101
102 /* True if this parallel directive is nested within another. */
103 bool is_nested;
104
105 /* For combined parallel constructs, the built-in index for the
106 library call used to launch the children threads. */
107 int parallel_start_ix;
108
109 /* If the combined parallel directive needs additional arguments for
110 the call to GOMP_parallel_start_foo, they are added here. */
111 tree parallel_start_additional_args;
112 } omp_context;
113
114
115 /* A structure describing the main elements of a parallel loop.
116 Mostly used to communicate between the various subroutines of
117 expand_omp_for_1. */
118
119 struct expand_omp_for_data
120 {
121 tree v, n1, n2, step, chunk_size, for_stmt;
122 enum tree_code cond_code;
123 tree pre;
124 omp_context *ctx;
125 bool have_nowait, have_ordered;
126 enum omp_clause_schedule_kind sched_kind;
127 };
128
129 static splay_tree all_contexts;
130 static int parallel_nesting_level;
131
132 static void scan_omp (tree *, omp_context *);
133 static void expand_omp (tree *, omp_context *);
134
135
136 /* Find an OpenMP clause of type KIND within CLAUSES. */
137
138 tree
139 find_omp_clause (tree clauses, enum tree_code kind)
140 {
141 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
142 if (TREE_CODE (clauses) == kind)
143 return clauses;
144
145 return NULL_TREE;
146 }
147
148 /* Return true if CTX is for an omp parallel. */
149
150 static inline bool
151 is_parallel_ctx (omp_context *ctx)
152 {
153 return ctx->parallel_type != IS_NOT_PARALLEL;
154 }
155
156 /* Return true if CTX is inside a combined omp parallel + workshare. */
157
158 static inline bool
159 is_in_combined_parallel_ctx (omp_context *ctx)
160 {
161 return ctx->outer && ctx->outer->parallel_type == IS_COMBINED_PARALLEL;
162 }
163
164 /* Return true if EXPR is variable sized. */
165
166 static inline bool
167 is_variable_sized (tree expr)
168 {
169 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
170 }
171
172 /* Return true if DECL is a reference type. */
173
174 static inline bool
175 is_reference (tree decl)
176 {
177 return lang_hooks.decls.omp_privatize_by_reference (decl);
178 }
179
180 /* Lookup variables in the decl or field splay trees. The "maybe" form
181 allows for the variable form to not have been entered, otherwise we
182 assert that the variable must have been entered. */
183
184 static inline tree
185 lookup_decl (tree var, omp_context *ctx)
186 {
187 splay_tree_node n;
188 n = splay_tree_lookup (ctx->cb.decl_map, (splay_tree_key) var);
189 return (tree) n->value;
190 }
191
192 static inline tree
193 maybe_lookup_decl (tree var, omp_context *ctx)
194 {
195 splay_tree_node n;
196 n = splay_tree_lookup (ctx->cb.decl_map, (splay_tree_key) var);
197 return n ? (tree) n->value : NULL_TREE;
198 }
199
200 static inline tree
201 lookup_field (tree var, omp_context *ctx)
202 {
203 splay_tree_node n;
204 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
205 return (tree) n->value;
206 }
207
208 static inline tree
209 maybe_lookup_field (tree var, omp_context *ctx)
210 {
211 splay_tree_node n;
212 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
213 return n ? (tree) n->value : NULL_TREE;
214 }
215
216 /* Return true if DECL should be copied by pointer. SHARED_P is true
217 if DECL is to be shared. */
218
219 static bool
220 use_pointer_for_field (tree decl, bool shared_p)
221 {
222 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
223 return true;
224
225 /* We can only use copy-in/copy-out semantics for shared varibles
226 when we know the value is not accessible from an outer scope. */
227 if (shared_p)
228 {
229 /* ??? Trivially accessible from anywhere. But why would we even
230 be passing an address in this case? Should we simply assert
231 this to be false, or should we have a cleanup pass that removes
232 these from the list of mappings? */
233 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
234 return true;
235
236 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
237 without analyzing the expression whether or not its location
238 is accessible to anyone else. In the case of nested parallel
239 regions it certainly may be. */
240 if (DECL_HAS_VALUE_EXPR_P (decl))
241 return true;
242
243 /* Do not use copy-in/copy-out for variables that have their
244 address taken. */
245 if (TREE_ADDRESSABLE (decl))
246 return true;
247 }
248
249 return false;
250 }
251
252 /* Construct a new automatic decl similar to VAR. */
253
254 static tree
255 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
256 {
257 tree copy = build_decl (VAR_DECL, name, type);
258
259 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
260 DECL_COMPLEX_GIMPLE_REG_P (copy) = DECL_COMPLEX_GIMPLE_REG_P (var);
261 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
262 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
263 TREE_USED (copy) = 1;
264 DECL_CONTEXT (copy) = ctx->cb.dst_fn;
265 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
266
267 TREE_CHAIN (copy) = ctx->block_vars;
268 ctx->block_vars = copy;
269
270 return copy;
271 }
272
273 static tree
274 omp_copy_decl_1 (tree var, omp_context *ctx)
275 {
276 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
277 }
278
279 /* Build tree nodes to access the field for VAR on the receiver side. */
280
281 static tree
282 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
283 {
284 tree x, field = lookup_field (var, ctx);
285
286 /* If the receiver record type was remapped in the child function,
287 remap the field into the new record type. */
288 x = maybe_lookup_field (field, ctx);
289 if (x != NULL)
290 field = x;
291
292 x = build_fold_indirect_ref (ctx->receiver_decl);
293 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
294 if (by_ref)
295 x = build_fold_indirect_ref (x);
296
297 return x;
298 }
299
300 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
301 of a parallel, this is a component reference; for workshare constructs
302 this is some variable. */
303
304 static tree
305 build_outer_var_ref (tree var, omp_context *ctx)
306 {
307 tree x;
308
309 if (is_global_var (var))
310 x = var;
311 else if (is_variable_sized (var))
312 {
313 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
314 x = build_outer_var_ref (x, ctx);
315 x = build_fold_indirect_ref (x);
316 }
317 else if (is_parallel_ctx (ctx))
318 {
319 bool by_ref = use_pointer_for_field (var, false);
320 x = build_receiver_ref (var, by_ref, ctx);
321 }
322 else if (ctx->outer)
323 x = lookup_decl (var, ctx->outer);
324 else
325 gcc_unreachable ();
326
327 if (is_reference (var))
328 x = build_fold_indirect_ref (x);
329
330 return x;
331 }
332
333 /* Build tree nodes to access the field for VAR on the sender side. */
334
335 static tree
336 build_sender_ref (tree var, omp_context *ctx)
337 {
338 tree field = lookup_field (var, ctx);
339 return build3 (COMPONENT_REF, TREE_TYPE (field),
340 ctx->sender_decl, field, NULL);
341 }
342
343 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
344
345 static void
346 install_var_field (tree var, bool by_ref, omp_context *ctx)
347 {
348 tree field, type;
349
350 gcc_assert (!splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
351
352 type = TREE_TYPE (var);
353 if (by_ref)
354 type = build_pointer_type (type);
355
356 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
357
358 /* Remember what variable this field was created for. This does have a
359 side effect of making dwarf2out ignore this member, so for helpful
360 debugging we clear it later in delete_omp_context. */
361 DECL_ABSTRACT_ORIGIN (field) = var;
362
363 insert_field_into_struct (ctx->record_type, field);
364
365 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
366 (splay_tree_value) field);
367 }
368
369 static tree
370 install_var_local (tree var, omp_context *ctx)
371 {
372 tree new_var = omp_copy_decl_1 (var, ctx);
373 insert_decl_map (&ctx->cb, var, new_var);
374 return new_var;
375 }
376
377 /* Adjust the replacement for DECL in CTX for the new context. This means
378 copying the DECL_VALUE_EXPR, and fixing up the type. */
379
380 static void
381 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
382 {
383 tree new_decl, size;
384
385 new_decl = lookup_decl (decl, ctx);
386
387 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
388
389 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
390 && DECL_HAS_VALUE_EXPR_P (decl))
391 {
392 tree ve = DECL_VALUE_EXPR (decl);
393 walk_tree (&ve, copy_body_r, &ctx->cb, NULL);
394 SET_DECL_VALUE_EXPR (new_decl, ve);
395 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
396 }
397
398 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
399 {
400 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
401 if (size == error_mark_node)
402 size = TYPE_SIZE (TREE_TYPE (new_decl));
403 DECL_SIZE (new_decl) = size;
404
405 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
406 if (size == error_mark_node)
407 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
408 DECL_SIZE_UNIT (new_decl) = size;
409 }
410 }
411
412 /* The callback for remap_decl. Search all containing contexts for a
413 mapping of the variable; this avoids having to duplicate the splay
414 tree ahead of time. We know a mapping doesn't already exist in the
415 given context. Create new mappings to implement default semantics. */
416
417 static tree
418 omp_copy_decl (tree var, copy_body_data *cb)
419 {
420 omp_context *ctx = (omp_context *) cb;
421 tree new_var;
422
423 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
424 return var;
425
426 if (TREE_CODE (var) == LABEL_DECL)
427 {
428 new_var = create_artificial_label ();
429 DECL_CONTEXT (new_var) = ctx->cb.dst_fn;
430 insert_decl_map (&ctx->cb, var, new_var);
431 return new_var;
432 }
433
434 while (!is_parallel_ctx (ctx))
435 {
436 ctx = ctx->outer;
437 if (ctx == NULL)
438 return var;
439 new_var = maybe_lookup_decl (var, ctx);
440 if (new_var)
441 return new_var;
442 }
443
444 return error_mark_node;
445 }
446
447 /* Create a new context, with OUTER_CTX being the surrounding context. */
448
449 static omp_context *
450 new_omp_context (tree stmt, omp_context *outer_ctx)
451 {
452 omp_context *ctx = XCNEW (omp_context);
453
454 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
455 (splay_tree_value) ctx);
456 ctx->stmt = stmt;
457
458 if (outer_ctx)
459 {
460 ctx->outer = outer_ctx;
461 ctx->cb = outer_ctx->cb;
462 ctx->cb.block = NULL;
463 ctx->depth = outer_ctx->depth + 1;
464 }
465 else
466 {
467 ctx->cb.src_fn = current_function_decl;
468 ctx->cb.dst_fn = current_function_decl;
469 ctx->cb.src_node = cgraph_node (current_function_decl);
470 ctx->cb.dst_node = ctx->cb.src_node;
471 ctx->cb.src_cfun = cfun;
472 ctx->cb.copy_decl = omp_copy_decl;
473 ctx->cb.eh_region = -1;
474 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
475 ctx->depth = 1;
476 }
477
478 ctx->cb.decl_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
479
480 return ctx;
481 }
482
483 /* Destroy a omp_context data structures. Called through the splay tree
484 value delete callback. */
485
486 static void
487 delete_omp_context (splay_tree_value value)
488 {
489 omp_context *ctx = (omp_context *) value;
490
491 splay_tree_delete (ctx->cb.decl_map);
492
493 if (ctx->field_map)
494 splay_tree_delete (ctx->field_map);
495
496 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
497 it produces corrupt debug information. */
498 if (ctx->record_type)
499 {
500 tree t;
501 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
502 DECL_ABSTRACT_ORIGIN (t) = NULL;
503 }
504
505 XDELETE (ctx);
506 }
507
508 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
509 context. */
510
511 static void
512 fixup_child_record_type (omp_context *ctx)
513 {
514 tree f, type = ctx->record_type;
515
516 /* ??? It isn't sufficient to just call remap_type here, because
517 variably_modified_type_p doesn't work the way we expect for
518 record types. Testing each field for whether it needs remapping
519 and creating a new record by hand works, however. */
520 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
521 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
522 break;
523 if (f)
524 {
525 tree name, new_fields = NULL;
526
527 type = lang_hooks.types.make_type (RECORD_TYPE);
528 name = DECL_NAME (TYPE_NAME (ctx->record_type));
529 name = build_decl (TYPE_DECL, name, type);
530 TYPE_NAME (type) = name;
531
532 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
533 {
534 tree new_f = copy_node (f);
535 DECL_CONTEXT (new_f) = type;
536 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
537 TREE_CHAIN (new_f) = new_fields;
538 new_fields = new_f;
539
540 /* Arrange to be able to look up the receiver field
541 given the sender field. */
542 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
543 (splay_tree_value) new_f);
544 }
545 TYPE_FIELDS (type) = nreverse (new_fields);
546 layout_type (type);
547 }
548
549 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
550 }
551
552 /* Instantiate decls as necessary in CTX to satisfy the data sharing
553 specified by CLAUSES. */
554
555 static void
556 scan_sharing_clauses (tree clauses, omp_context *ctx)
557 {
558 tree c, decl;
559 bool scan_array_reductions = false;
560
561 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
562 {
563 bool by_ref;
564
565 switch (TREE_CODE (c))
566 {
567 case OMP_CLAUSE_PRIVATE:
568 decl = OMP_CLAUSE_DECL (c);
569 if (!is_variable_sized (decl))
570 install_var_local (decl, ctx);
571 break;
572
573 case OMP_CLAUSE_SHARED:
574 gcc_assert (is_parallel_ctx (ctx));
575 decl = OMP_CLAUSE_DECL (c);
576 gcc_assert (!is_variable_sized (decl));
577 by_ref = use_pointer_for_field (decl, true);
578 if (! TREE_READONLY (decl)
579 || TREE_ADDRESSABLE (decl)
580 || by_ref
581 || is_reference (decl))
582 {
583 install_var_field (decl, by_ref, ctx);
584 install_var_local (decl, ctx);
585 break;
586 }
587 /* We don't need to copy const scalar vars back. */
588 TREE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
589 goto do_private;
590
591 case OMP_CLAUSE_LASTPRIVATE:
592 /* Let the corresponding firstprivate clause create
593 the variable. */
594 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
595 break;
596 /* FALLTHRU */
597
598 case OMP_CLAUSE_FIRSTPRIVATE:
599 case OMP_CLAUSE_REDUCTION:
600 decl = OMP_CLAUSE_DECL (c);
601 do_private:
602 if (is_variable_sized (decl))
603 break;
604 else if (is_parallel_ctx (ctx))
605 {
606 by_ref = use_pointer_for_field (decl, false);
607 install_var_field (decl, by_ref, ctx);
608 }
609 install_var_local (decl, ctx);
610 break;
611
612 case OMP_CLAUSE_COPYPRIVATE:
613 if (ctx->outer)
614 scan_omp (&OMP_CLAUSE_DECL (c), ctx->outer);
615 /* FALLTHRU */
616
617 case OMP_CLAUSE_COPYIN:
618 decl = OMP_CLAUSE_DECL (c);
619 by_ref = use_pointer_for_field (decl, false);
620 install_var_field (decl, by_ref, ctx);
621 break;
622
623 case OMP_CLAUSE_DEFAULT:
624 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
625 break;
626
627 case OMP_CLAUSE_IF:
628 case OMP_CLAUSE_NUM_THREADS:
629 case OMP_CLAUSE_SCHEDULE:
630 if (ctx->outer)
631 scan_omp (&TREE_OPERAND (c, 0), ctx->outer);
632 break;
633
634 case OMP_CLAUSE_NOWAIT:
635 case OMP_CLAUSE_ORDERED:
636 break;
637
638 default:
639 gcc_unreachable ();
640 }
641 }
642
643 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
644 {
645 switch (TREE_CODE (c))
646 {
647 case OMP_CLAUSE_LASTPRIVATE:
648 /* Let the corresponding firstprivate clause create
649 the variable. */
650 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
651 break;
652 /* FALLTHRU */
653
654 case OMP_CLAUSE_PRIVATE:
655 case OMP_CLAUSE_FIRSTPRIVATE:
656 case OMP_CLAUSE_REDUCTION:
657 decl = OMP_CLAUSE_DECL (c);
658 if (is_variable_sized (decl))
659 install_var_local (decl, ctx);
660 fixup_remapped_decl (decl, ctx,
661 TREE_CODE (c) == OMP_CLAUSE_PRIVATE
662 && OMP_CLAUSE_PRIVATE_DEBUG (c));
663 if (TREE_CODE (c) == OMP_CLAUSE_REDUCTION
664 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
665 scan_array_reductions = true;
666 break;
667
668 case OMP_CLAUSE_SHARED:
669 decl = OMP_CLAUSE_DECL (c);
670 fixup_remapped_decl (decl, ctx, false);
671 break;
672
673 case OMP_CLAUSE_COPYPRIVATE:
674 case OMP_CLAUSE_COPYIN:
675 case OMP_CLAUSE_DEFAULT:
676 case OMP_CLAUSE_IF:
677 case OMP_CLAUSE_NUM_THREADS:
678 case OMP_CLAUSE_SCHEDULE:
679 case OMP_CLAUSE_NOWAIT:
680 case OMP_CLAUSE_ORDERED:
681 break;
682
683 default:
684 gcc_unreachable ();
685 }
686 }
687
688 if (scan_array_reductions)
689 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
690 if (TREE_CODE (c) == OMP_CLAUSE_REDUCTION
691 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
692 {
693 scan_omp (&OMP_CLAUSE_REDUCTION_INIT (c), ctx);
694 scan_omp (&OMP_CLAUSE_REDUCTION_MERGE (c), ctx);
695 }
696 }
697
698 /* Create a new name for omp child function. Returns an identifier. */
699
700 static GTY(()) unsigned int tmp_ompfn_id_num;
701
702 static tree
703 create_omp_child_function_name (void)
704 {
705 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
706 size_t len = IDENTIFIER_LENGTH (name);
707 char *tmp_name, *prefix;
708
709 prefix = alloca (len + sizeof ("_omp_fn"));
710 memcpy (prefix, IDENTIFIER_POINTER (name), len);
711 strcpy (prefix + len, "_omp_fn");
712 #ifndef NO_DOT_IN_LABEL
713 prefix[len] = '.';
714 #elif !defined NO_DOLLAR_IN_LABEL
715 prefix[len] = '$';
716 #endif
717 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
718 return get_identifier (tmp_name);
719 }
720
721 /* Build a decl for the omp child function. It'll not contain a body
722 yet, just the bare decl. */
723
724 static void
725 create_omp_child_function (omp_context *ctx)
726 {
727 tree decl, type, name, t;
728
729 name = create_omp_child_function_name ();
730 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
731
732 decl = build_decl (FUNCTION_DECL, name, type);
733 decl = lang_hooks.decls.pushdecl (decl);
734
735 ctx->cb.dst_fn = decl;
736
737 TREE_STATIC (decl) = 1;
738 TREE_USED (decl) = 1;
739 DECL_ARTIFICIAL (decl) = 1;
740 DECL_IGNORED_P (decl) = 0;
741 TREE_PUBLIC (decl) = 0;
742 DECL_UNINLINABLE (decl) = 1;
743 DECL_EXTERNAL (decl) = 0;
744 DECL_CONTEXT (decl) = NULL_TREE;
745
746 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
747 DECL_ARTIFICIAL (t) = 1;
748 DECL_IGNORED_P (t) = 1;
749 DECL_RESULT (decl) = t;
750
751 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
752 DECL_ARTIFICIAL (t) = 1;
753 DECL_ARG_TYPE (t) = ptr_type_node;
754 DECL_CONTEXT (t) = decl;
755 TREE_USED (t) = 1;
756 DECL_ARGUMENTS (decl) = t;
757 ctx->receiver_decl = t;
758
759 /* Allocate memory for the function structure. The call to
760 allocate_struct_function clobbers cfun, so we need to restore
761 it afterward. */
762 allocate_struct_function (decl);
763 DECL_SOURCE_LOCATION (decl) = EXPR_LOCATION (ctx->stmt);
764 cfun->function_end_locus = EXPR_LOCATION (ctx->stmt);
765 cfun = ctx->cb.src_cfun;
766 }
767
768 /* Given an OMP_PARALLEL statement, determine whether it is a combined
769 parallel+worksharing directive. This is simply done by examining
770 the body of the directive. If the body contains a single OMP_FOR
771 or a single OMP_SECTIONS then this is a combined directive.
772 Otherwise, it is a regular parallel directive. */
773
774 enum omp_parallel_type
775 determine_parallel_type (tree stmt)
776 {
777 enum omp_parallel_type par_type;
778 tree body = BIND_EXPR_BODY (OMP_PARALLEL_BODY (stmt));
779 tree t;
780
781 par_type = IS_PARALLEL;
782
783 t = expr_only (body);
784 if (t && TREE_CODE (t) == OMP_SECTIONS)
785 par_type = IS_COMBINED_PARALLEL;
786 else
787 par_type = IS_PARALLEL;
788
789 return par_type;
790 }
791
792
793 /* Scan an OpenMP parallel directive. */
794
795 static void
796 scan_omp_parallel (tree *stmt_p, omp_context *outer_ctx)
797 {
798 omp_context *ctx;
799 tree name;
800
801 /* Ignore parallel directives with empty bodies, unless there
802 are copyin clauses. */
803 if (optimize > 0
804 && empty_body_p (OMP_PARALLEL_BODY (*stmt_p))
805 && find_omp_clause (OMP_CLAUSES (*stmt_p), OMP_CLAUSE_COPYIN) == NULL)
806 {
807 *stmt_p = build_empty_stmt ();
808 return;
809 }
810
811 ctx = new_omp_context (*stmt_p, outer_ctx);
812 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
813 ctx->parallel_type = determine_parallel_type (*stmt_p);
814 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
815 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
816 ctx->parallel_start_ix = BUILT_IN_GOMP_PARALLEL_START;
817 ctx->parallel_start_additional_args = NULL_TREE;
818 name = create_tmp_var_name (".omp_data_s");
819 name = build_decl (TYPE_DECL, name, ctx->record_type);
820 TYPE_NAME (ctx->record_type) = name;
821 create_omp_child_function (ctx);
822
823 scan_sharing_clauses (OMP_PARALLEL_CLAUSES (*stmt_p), ctx);
824 scan_omp (&OMP_PARALLEL_BODY (*stmt_p), ctx);
825
826 if (TYPE_FIELDS (ctx->record_type) == NULL)
827 ctx->record_type = ctx->receiver_decl = NULL;
828 else
829 {
830 layout_type (ctx->record_type);
831 fixup_child_record_type (ctx);
832 }
833 }
834
835
836 /* Extract the header elements of parallel loop FOR_STMT and store
837 them into *FD. */
838
839 static void
840 extract_omp_for_data (tree for_stmt, omp_context *ctx,
841 struct expand_omp_for_data *fd)
842 {
843 tree t;
844
845 fd->for_stmt = for_stmt;
846 fd->pre = NULL;
847 fd->ctx = ctx;
848
849 t = OMP_FOR_INIT (for_stmt);
850 gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
851 fd->v = TREE_OPERAND (t, 0);
852 gcc_assert (DECL_P (fd->v));
853 gcc_assert (TREE_CODE (TREE_TYPE (fd->v)) == INTEGER_TYPE);
854 fd->n1 = TREE_OPERAND (t, 1);
855
856 t = OMP_FOR_COND (for_stmt);
857 fd->cond_code = TREE_CODE (t);
858 gcc_assert (TREE_OPERAND (t, 0) == fd->v);
859 fd->n2 = TREE_OPERAND (t, 1);
860 switch (fd->cond_code)
861 {
862 case LT_EXPR:
863 case GT_EXPR:
864 break;
865 case LE_EXPR:
866 fd->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->n2), fd->n2,
867 build_int_cst (TREE_TYPE (fd->n2), 1));
868 fd->cond_code = LT_EXPR;
869 break;
870 case GE_EXPR:
871 fd->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->n2), fd->n2,
872 build_int_cst (TREE_TYPE (fd->n2), 1));
873 fd->cond_code = GT_EXPR;
874 break;
875 default:
876 gcc_unreachable ();
877 }
878
879 t = OMP_FOR_INCR (fd->for_stmt);
880 gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
881 gcc_assert (TREE_OPERAND (t, 0) == fd->v);
882 t = TREE_OPERAND (t, 1);
883 gcc_assert (TREE_OPERAND (t, 0) == fd->v);
884 switch (TREE_CODE (t))
885 {
886 case PLUS_EXPR:
887 fd->step = TREE_OPERAND (t, 1);
888 break;
889 case MINUS_EXPR:
890 fd->step = TREE_OPERAND (t, 1);
891 fd->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (fd->step), fd->step);
892 break;
893 default:
894 gcc_unreachable ();
895 }
896
897 fd->have_nowait = fd->have_ordered = false;
898 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
899 fd->chunk_size = NULL_TREE;
900
901 for (t = OMP_FOR_CLAUSES (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
902 switch (TREE_CODE (t))
903 {
904 case OMP_CLAUSE_NOWAIT:
905 fd->have_nowait = true;
906 break;
907 case OMP_CLAUSE_ORDERED:
908 fd->have_ordered = true;
909 break;
910 case OMP_CLAUSE_SCHEDULE:
911 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
912 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
913 break;
914 default:
915 break;
916 }
917
918 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
919 gcc_assert (fd->chunk_size == NULL);
920 else if (fd->chunk_size == NULL)
921 {
922 /* We only need to compute a default chunk size for ordered
923 static loops and dynamic loops. */
924 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered)
925 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
926 ? integer_zero_node : integer_one_node;
927 }
928 }
929
930
931 /* Scan an OpenMP loop directive. */
932
933 static void
934 scan_omp_for (tree *stmt_p, omp_context *outer_ctx)
935 {
936 omp_context *ctx;
937 tree stmt = *stmt_p;
938
939 ctx = new_omp_context (stmt, outer_ctx);
940
941 /* If this is a combined parallel loop directive, we need to extract
942 the bounds, step and chunk size for the loop so that we can build
943 the call to GOMP_parallel_loop_foo_start. Do this before
944 scanning the loop header to avoid getting the mapped variables
945 from the child context. */
946 if (is_in_combined_parallel_ctx (ctx))
947 {
948 struct expand_omp_for_data fd;
949 tree t, additional_args;
950
951 extract_omp_for_data (stmt, ctx, &fd);
952
953 additional_args = NULL_TREE;
954 if (fd.chunk_size)
955 {
956 t = fold_convert (long_integer_type_node, fd.chunk_size);
957 additional_args = tree_cons (NULL, t, additional_args);
958 }
959 t = fold_convert (long_integer_type_node, fd.step);
960 additional_args = tree_cons (NULL, t, additional_args);
961 t = fold_convert (long_integer_type_node, fd.n2);
962 additional_args = tree_cons (NULL, t, additional_args);
963 t = fold_convert (long_integer_type_node, fd.n1);
964 additional_args = tree_cons (NULL, t, additional_args);
965 outer_ctx->parallel_start_additional_args = additional_args;
966 }
967
968 scan_sharing_clauses (OMP_FOR_CLAUSES (stmt), ctx);
969
970 /* FIXME. When expanding into a combined parallel loop, we may not
971 need to map some of the variables in the loop header (in
972 particular, FD.N1 and FD.N2 for dynamic loops). */
973 scan_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
974 scan_omp (&OMP_FOR_INIT (stmt), ctx);
975 scan_omp (&OMP_FOR_COND (stmt), ctx);
976 scan_omp (&OMP_FOR_INCR (stmt), ctx);
977 scan_omp (&OMP_FOR_BODY (stmt), ctx);
978 }
979
980 /* Scan an OpenMP sections directive. */
981
982 static void
983 scan_omp_sections (tree *stmt_p, omp_context *outer_ctx)
984 {
985 tree stmt = *stmt_p;
986 omp_context *ctx;
987
988 ctx = new_omp_context (stmt, outer_ctx);
989 scan_sharing_clauses (OMP_SECTIONS_CLAUSES (stmt), ctx);
990 scan_omp (&OMP_SECTIONS_BODY (stmt), ctx);
991 }
992
993 /* Scan an OpenMP single directive. */
994
995 static void
996 scan_omp_single (tree *stmt_p, omp_context *outer_ctx)
997 {
998 tree stmt = *stmt_p;
999 omp_context *ctx;
1000 tree name;
1001
1002 ctx = new_omp_context (stmt, outer_ctx);
1003 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1004 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1005 name = create_tmp_var_name (".omp_copy_s");
1006 name = build_decl (TYPE_DECL, name, ctx->record_type);
1007 TYPE_NAME (ctx->record_type) = name;
1008
1009 scan_sharing_clauses (OMP_SINGLE_CLAUSES (stmt), ctx);
1010 scan_omp (&OMP_SINGLE_BODY (stmt), ctx);
1011
1012 if (TYPE_FIELDS (ctx->record_type) == NULL)
1013 ctx->record_type = NULL;
1014 else
1015 layout_type (ctx->record_type);
1016 }
1017
1018 /* Similar, except this is either a parallel nested within another
1019 parallel, or a workshare construct nested within a nested parallel.
1020 In this case we want to do minimal processing, as the real work
1021 will be done during lowering of the function generated by the
1022 outermost parallel.
1023
1024 The minimal amount of work is processing private clauses, and simply
1025 scanning the rest. Private clauses are the only ones that don't
1026 also imply a reference in the outer parallel. We must set up a
1027 translation lest the default behaviour in omp_copy_decl substitute
1028 error_mark_node. */
1029
1030 static void
1031 scan_omp_nested (tree *stmt_p, omp_context *outer_ctx)
1032 {
1033 omp_context *ctx;
1034 tree var_sized_list = NULL;
1035 tree c, decl, stmt = *stmt_p;
1036
1037 ctx = new_omp_context (stmt, outer_ctx);
1038 ctx->is_nested = true;
1039
1040 for (c = OMP_CLAUSES (stmt); c ; c = OMP_CLAUSE_CHAIN (c))
1041 {
1042 switch (TREE_CODE (c))
1043 {
1044 case OMP_CLAUSE_PRIVATE:
1045 decl = OMP_CLAUSE_DECL (c);
1046 if (is_variable_sized (decl))
1047 var_sized_list = tree_cons (NULL, c, var_sized_list);
1048 OMP_CLAUSE_DECL (c) = install_var_local (decl, ctx);
1049 break;
1050
1051 case OMP_CLAUSE_FIRSTPRIVATE:
1052 case OMP_CLAUSE_LASTPRIVATE:
1053 case OMP_CLAUSE_REDUCTION:
1054 case OMP_CLAUSE_SHARED:
1055 case OMP_CLAUSE_COPYPRIVATE:
1056 case OMP_CLAUSE_IF:
1057 case OMP_CLAUSE_NUM_THREADS:
1058 case OMP_CLAUSE_SCHEDULE:
1059 scan_omp (&TREE_OPERAND (c, 0), ctx->outer);
1060 break;
1061
1062 case OMP_CLAUSE_COPYIN:
1063 case OMP_CLAUSE_NOWAIT:
1064 case OMP_CLAUSE_ORDERED:
1065 case OMP_CLAUSE_DEFAULT:
1066 break;
1067
1068 default:
1069 gcc_unreachable ();
1070 }
1071 }
1072
1073 /* Instantiate the VALUE_EXPR for variable sized variables. We have
1074 to do this as a separate pass, since we need the pointer and size
1075 decls installed first. */
1076 for (c = var_sized_list; c ; c = TREE_CHAIN (c))
1077 fixup_remapped_decl (OMP_CLAUSE_DECL (TREE_VALUE (c)), ctx,
1078 OMP_CLAUSE_PRIVATE_DEBUG (TREE_VALUE (c)));
1079
1080 scan_omp (&OMP_BODY (stmt), ctx);
1081
1082 if (TREE_CODE (stmt) == OMP_FOR)
1083 {
1084 scan_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
1085 scan_omp (&OMP_FOR_INIT (stmt), ctx);
1086 scan_omp (&OMP_FOR_COND (stmt), ctx);
1087 scan_omp (&OMP_FOR_INCR (stmt), ctx);
1088 }
1089 }
1090
1091
1092 /* Callback for walk_stmts used to scan for OpenMP directives at TP. */
1093
1094 static tree
1095 scan_omp_1 (tree *tp, int *walk_subtrees, void *data)
1096 {
1097 struct walk_stmt_info *wi = data;
1098 omp_context *ctx = wi->info;
1099 tree t = *tp;
1100
1101 if (EXPR_HAS_LOCATION (t))
1102 input_location = EXPR_LOCATION (t);
1103
1104 *walk_subtrees = 0;
1105 switch (TREE_CODE (t))
1106 {
1107 case OMP_PARALLEL:
1108 if (++parallel_nesting_level == 1)
1109 scan_omp_parallel (tp, ctx);
1110 else
1111 scan_omp_nested (tp, ctx);
1112 parallel_nesting_level--;
1113 break;
1114
1115 case OMP_FOR:
1116 if (parallel_nesting_level <= 1)
1117 scan_omp_for (tp, ctx);
1118 else
1119 scan_omp_nested (tp, ctx);
1120 break;
1121
1122 case OMP_SECTIONS:
1123 if (parallel_nesting_level <= 1)
1124 scan_omp_sections (tp, ctx);
1125 else
1126 scan_omp_nested (tp, ctx);
1127 break;
1128
1129 case OMP_SINGLE:
1130 if (parallel_nesting_level <= 1)
1131 scan_omp_single (tp, ctx);
1132 else
1133 scan_omp_nested (tp, ctx);
1134 break;
1135
1136 case OMP_SECTION:
1137 case OMP_MASTER:
1138 case OMP_ORDERED:
1139 case OMP_CRITICAL:
1140 ctx = new_omp_context (*tp, ctx);
1141 scan_omp (&OMP_BODY (*tp), ctx);
1142 break;
1143
1144 case BIND_EXPR:
1145 {
1146 tree var;
1147 *walk_subtrees = 1;
1148
1149 for (var = BIND_EXPR_VARS (t); var ; var = TREE_CHAIN (var))
1150 {
1151 if (DECL_CONTEXT (var) == ctx->cb.src_fn)
1152 DECL_CONTEXT (var) = ctx->cb.dst_fn;
1153 insert_decl_map (&ctx->cb, var, var);
1154 }
1155 }
1156 break;
1157
1158 case VAR_DECL:
1159 case PARM_DECL:
1160 case LABEL_DECL:
1161 if (ctx)
1162 *tp = remap_decl (t, &ctx->cb);
1163 break;
1164
1165 default:
1166 if (ctx && TYPE_P (t))
1167 *tp = remap_type (t, &ctx->cb);
1168 else if (!DECL_P (t))
1169 *walk_subtrees = 1;
1170 break;
1171 }
1172
1173 return NULL_TREE;
1174 }
1175
1176
1177 /* Scan all the statements starting at STMT_P. CTX contains context
1178 information about the OpenMP directives and clauses found during
1179 the scan. */
1180
1181 static void
1182 scan_omp (tree *stmt_p, omp_context *ctx)
1183 {
1184 location_t saved_location;
1185 struct walk_stmt_info wi;
1186
1187 memset (&wi, 0, sizeof (wi));
1188 wi.callback = scan_omp_1;
1189 wi.info = ctx;
1190 wi.want_bind_expr = (ctx != NULL);
1191 wi.want_locations = true;
1192
1193 saved_location = input_location;
1194 walk_stmts (&wi, stmt_p);
1195 input_location = saved_location;
1196 }
1197 \f
1198 /* Re-gimplification and code generation routines. */
1199
1200 /* Build a call to GOMP_barrier. */
1201
1202 static void
1203 build_omp_barrier (tree *stmt_list)
1204 {
1205 tree t;
1206
1207 t = built_in_decls[BUILT_IN_GOMP_BARRIER];
1208 t = build_function_call_expr (t, NULL);
1209 gimplify_and_add (t, stmt_list);
1210 }
1211
1212 /* If a context was created for STMT when it was scanned, return it. */
1213
1214 static omp_context *
1215 maybe_lookup_ctx (tree stmt)
1216 {
1217 splay_tree_node n;
1218 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
1219 return n ? (omp_context *) n->value : NULL;
1220 }
1221
1222 /* Construct the initialization value for reduction CLAUSE. */
1223
1224 tree
1225 omp_reduction_init (tree clause, tree type)
1226 {
1227 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
1228 {
1229 case PLUS_EXPR:
1230 case MINUS_EXPR:
1231 case BIT_IOR_EXPR:
1232 case BIT_XOR_EXPR:
1233 case TRUTH_OR_EXPR:
1234 case TRUTH_ORIF_EXPR:
1235 case TRUTH_XOR_EXPR:
1236 case NE_EXPR:
1237 return fold_convert (type, integer_zero_node);
1238
1239 case MULT_EXPR:
1240 case TRUTH_AND_EXPR:
1241 case TRUTH_ANDIF_EXPR:
1242 case EQ_EXPR:
1243 return fold_convert (type, integer_one_node);
1244
1245 case BIT_AND_EXPR:
1246 return fold_convert (type, integer_minus_one_node);
1247
1248 case MAX_EXPR:
1249 if (SCALAR_FLOAT_TYPE_P (type))
1250 {
1251 REAL_VALUE_TYPE max, min;
1252 if (HONOR_INFINITIES (TYPE_MODE (type)))
1253 {
1254 real_inf (&max);
1255 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
1256 }
1257 else
1258 real_maxval (&min, 1, TYPE_MODE (type));
1259 return build_real (type, min);
1260 }
1261 else
1262 {
1263 gcc_assert (INTEGRAL_TYPE_P (type));
1264 return TYPE_MIN_VALUE (type);
1265 }
1266
1267 case MIN_EXPR:
1268 if (SCALAR_FLOAT_TYPE_P (type))
1269 {
1270 REAL_VALUE_TYPE max;
1271 if (HONOR_INFINITIES (TYPE_MODE (type)))
1272 real_inf (&max);
1273 else
1274 real_maxval (&max, 0, TYPE_MODE (type));
1275 return build_real (type, max);
1276 }
1277 else
1278 {
1279 gcc_assert (INTEGRAL_TYPE_P (type));
1280 return TYPE_MAX_VALUE (type);
1281 }
1282
1283 default:
1284 gcc_unreachable ();
1285 }
1286 }
1287
1288 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
1289 from the receiver (aka child) side and initializers for REFERENCE_TYPE
1290 private variables. Initialization statements go in ILIST, while calls
1291 to destructors go in DLIST. */
1292
1293 static void
1294 expand_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
1295 omp_context *ctx)
1296 {
1297 tree_stmt_iterator diter;
1298 tree c, dtor, copyin_seq, x, args, ptr;
1299 bool copyin_by_ref = false;
1300 int pass;
1301
1302 *dlist = alloc_stmt_list ();
1303 diter = tsi_start (*dlist);
1304 copyin_seq = NULL;
1305
1306 /* Do all the fixed sized types in the first pass, and the variable sized
1307 types in the second pass. This makes sure that the scalar arguments to
1308 the variable sized types are processed before we use them in the
1309 variable sized operations. */
1310 for (pass = 0; pass < 2; ++pass)
1311 {
1312 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1313 {
1314 enum tree_code c_kind = TREE_CODE (c);
1315 tree var, new_var;
1316 bool by_ref;
1317
1318 switch (c_kind)
1319 {
1320 case OMP_CLAUSE_PRIVATE:
1321 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
1322 continue;
1323 break;
1324 case OMP_CLAUSE_SHARED:
1325 case OMP_CLAUSE_FIRSTPRIVATE:
1326 case OMP_CLAUSE_LASTPRIVATE:
1327 case OMP_CLAUSE_COPYIN:
1328 case OMP_CLAUSE_REDUCTION:
1329 break;
1330 default:
1331 continue;
1332 }
1333
1334 new_var = var = OMP_CLAUSE_DECL (c);
1335 if (c_kind != OMP_CLAUSE_COPYIN)
1336 new_var = lookup_decl (var, ctx);
1337
1338 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
1339 {
1340 if (pass != 0)
1341 continue;
1342 }
1343 /* For variable sized types, we need to allocate the actual
1344 storage here. Call alloca and store the result in the pointer
1345 decl that we created elsewhere. */
1346 else if (is_variable_sized (var))
1347 {
1348 if (pass == 0)
1349 continue;
1350
1351 ptr = DECL_VALUE_EXPR (new_var);
1352 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
1353 ptr = TREE_OPERAND (ptr, 0);
1354 gcc_assert (DECL_P (ptr));
1355
1356 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
1357 args = tree_cons (NULL, x, NULL);
1358 x = built_in_decls[BUILT_IN_ALLOCA];
1359 x = build_function_call_expr (x, args);
1360 x = fold_convert (TREE_TYPE (ptr), x);
1361 x = build2 (MODIFY_EXPR, void_type_node, ptr, x);
1362 gimplify_and_add (x, ilist);
1363 }
1364 /* For references that are being privatized for Fortran, allocate
1365 new backing storage for the new pointer variable. This allows
1366 us to avoid changing all the code that expects a pointer to
1367 something that expects a direct variable. Note that this
1368 doesn't apply to C++, since reference types are disallowed in
1369 data sharing clauses there. */
1370 else if (is_reference (var))
1371 {
1372 if (pass == 0)
1373 continue;
1374
1375 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
1376 if (TREE_CONSTANT (x))
1377 {
1378 const char *name = NULL;
1379 if (DECL_NAME (var))
1380 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
1381
1382 x = create_tmp_var (TREE_TYPE (TREE_TYPE (new_var)), name);
1383 x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
1384 }
1385 else
1386 {
1387 args = tree_cons (NULL, x, NULL);
1388 x = built_in_decls[BUILT_IN_ALLOCA];
1389 x = build_function_call_expr (x, args);
1390 x = fold_convert (TREE_TYPE (new_var), x);
1391 }
1392
1393 x = build2 (MODIFY_EXPR, void_type_node, new_var, x);
1394 gimplify_and_add (x, ilist);
1395
1396 new_var = build_fold_indirect_ref (new_var);
1397 }
1398 else if (c_kind == OMP_CLAUSE_REDUCTION
1399 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1400 {
1401 if (pass == 0)
1402 continue;
1403 }
1404 else if (pass != 0)
1405 continue;
1406
1407 switch (TREE_CODE (c))
1408 {
1409 case OMP_CLAUSE_SHARED:
1410 /* Set up the DECL_VALUE_EXPR for shared variables now. This
1411 needs to be delayed until after fixup_child_record_type so
1412 that we get the correct type during the dereference. */
1413 by_ref = use_pointer_for_field (var, true);
1414 x = build_receiver_ref (var, by_ref, ctx);
1415 SET_DECL_VALUE_EXPR (new_var, x);
1416 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
1417
1418 /* ??? If VAR is not passed by reference, and the variable
1419 hasn't been initialized yet, then we'll get a warning for
1420 the store into the omp_data_s structure. Ideally, we'd be
1421 able to notice this and not store anything at all, but
1422 we're generating code too early. Suppress the warning. */
1423 if (!by_ref)
1424 TREE_NO_WARNING (var) = 1;
1425 break;
1426
1427 case OMP_CLAUSE_LASTPRIVATE:
1428 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1429 break;
1430 /* FALLTHRU */
1431
1432 case OMP_CLAUSE_PRIVATE:
1433 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var);
1434 if (x)
1435 gimplify_and_add (x, ilist);
1436 /* FALLTHRU */
1437
1438 do_dtor:
1439 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
1440 if (x)
1441 {
1442 dtor = x;
1443 gimplify_stmt (&dtor);
1444 tsi_link_before (&diter, dtor, TSI_SAME_STMT);
1445 }
1446 break;
1447
1448 case OMP_CLAUSE_FIRSTPRIVATE:
1449 x = build_outer_var_ref (var, ctx);
1450 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
1451 gimplify_and_add (x, ilist);
1452 goto do_dtor;
1453 break;
1454
1455 case OMP_CLAUSE_COPYIN:
1456 by_ref = use_pointer_for_field (var, false);
1457 x = build_receiver_ref (var, by_ref, ctx);
1458 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
1459 append_to_statement_list (x, &copyin_seq);
1460 copyin_by_ref |= by_ref;
1461 break;
1462
1463 case OMP_CLAUSE_REDUCTION:
1464 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1465 {
1466 gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c), ilist);
1467 OMP_CLAUSE_REDUCTION_INIT (c) = NULL;
1468 }
1469 else
1470 {
1471 x = omp_reduction_init (c, TREE_TYPE (new_var));
1472 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
1473 x = build2 (MODIFY_EXPR, void_type_node, new_var, x);
1474 gimplify_and_add (x, ilist);
1475 }
1476 break;
1477
1478 default:
1479 gcc_unreachable ();
1480 }
1481 }
1482 }
1483
1484 /* The copyin sequence is not to be executed by the main thread, since
1485 that would result in self-copies. Perhaps not visible to scalars,
1486 but it certainly is to C++ operator=. */
1487 if (copyin_seq)
1488 {
1489 x = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
1490 x = build_function_call_expr (x, NULL);
1491 x = build2 (NE_EXPR, boolean_type_node, x,
1492 build_int_cst (TREE_TYPE (x), 0));
1493 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
1494 gimplify_and_add (x, ilist);
1495 }
1496
1497 /* If any copyin variable is passed by reference, we must ensure the
1498 master thread doesn't modify it before it is copied over in all
1499 threads. */
1500 if (copyin_by_ref)
1501 build_omp_barrier (ilist);
1502 }
1503
1504 /* Generate code to implement the LASTPRIVATE clauses. This is used for
1505 both parallel and workshare constructs. PREDICATE may be NULL if it's
1506 always true. */
1507
1508 static void
1509 expand_lastprivate_clauses (tree clauses, tree predicate, tree *stmt_list,
1510 omp_context *ctx)
1511 {
1512 tree sub_list, x, c;
1513
1514 /* Early exit if there are no lastprivate clauses. */
1515 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
1516 if (clauses == NULL)
1517 {
1518 /* If this was a workshare clause, see if it had been combined
1519 with its parallel. In that case, look for the clauses on the
1520 parallel statement itself. */
1521 if (is_parallel_ctx (ctx))
1522 return;
1523
1524 ctx = ctx->outer;
1525 if (ctx == NULL || !is_parallel_ctx (ctx))
1526 return;
1527
1528 clauses = find_omp_clause (OMP_PARALLEL_CLAUSES (ctx->stmt),
1529 OMP_CLAUSE_LASTPRIVATE);
1530 if (clauses == NULL)
1531 return;
1532 }
1533
1534 sub_list = alloc_stmt_list ();
1535
1536 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1537 {
1538 tree var, new_var;
1539
1540 if (TREE_CODE (c) != OMP_CLAUSE_LASTPRIVATE)
1541 continue;
1542
1543 var = OMP_CLAUSE_DECL (c);
1544 new_var = lookup_decl (var, ctx);
1545
1546 x = build_outer_var_ref (var, ctx);
1547 if (is_reference (var))
1548 new_var = build_fold_indirect_ref (new_var);
1549 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
1550 append_to_statement_list (x, &sub_list);
1551 }
1552
1553 if (predicate)
1554 x = build3 (COND_EXPR, void_type_node, predicate, sub_list, NULL);
1555 else
1556 x = sub_list;
1557 gimplify_and_add (x, stmt_list);
1558 }
1559
1560 /* Generate code to implement the REDUCTION clauses. */
1561
1562 static void
1563 expand_reduction_clauses (tree clauses, tree *stmt_list, omp_context *ctx)
1564 {
1565 tree sub_list = NULL, x, c;
1566 int count = 0;
1567
1568 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
1569 update in that case, otherwise use a lock. */
1570 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
1571 if (TREE_CODE (c) == OMP_CLAUSE_REDUCTION)
1572 {
1573 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1574 {
1575 /* Never use OMP_ATOMIC for array reductions. */
1576 count = -1;
1577 break;
1578 }
1579 count++;
1580 }
1581
1582 if (count == 0)
1583 return;
1584
1585 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1586 {
1587 tree var, ref, new_var;
1588 enum tree_code code;
1589
1590 if (TREE_CODE (c) != OMP_CLAUSE_REDUCTION)
1591 continue;
1592
1593 var = OMP_CLAUSE_DECL (c);
1594 new_var = lookup_decl (var, ctx);
1595 if (is_reference (var))
1596 new_var = build_fold_indirect_ref (new_var);
1597 ref = build_outer_var_ref (var, ctx);
1598 code = OMP_CLAUSE_REDUCTION_CODE (c);
1599 /* reduction(-:var) sums up the partial results, so it acts identically
1600 to reduction(+:var). */
1601 if (code == MINUS_EXPR)
1602 code = PLUS_EXPR;
1603
1604 if (count == 1)
1605 {
1606 tree addr = build_fold_addr_expr (ref);
1607
1608 addr = save_expr (addr);
1609 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
1610 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
1611 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
1612 gimplify_and_add (x, stmt_list);
1613 return;
1614 }
1615
1616 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1617 {
1618 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
1619
1620 if (is_reference (var))
1621 ref = build_fold_addr_expr (ref);
1622 SET_DECL_VALUE_EXPR (placeholder, ref);
1623 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
1624 gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c), &sub_list);
1625 OMP_CLAUSE_REDUCTION_MERGE (c) = NULL;
1626 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
1627 }
1628 else
1629 {
1630 x = build2 (code, TREE_TYPE (ref), ref, new_var);
1631 ref = build_outer_var_ref (var, ctx);
1632 x = build2 (MODIFY_EXPR, void_type_node, ref, x);
1633 append_to_statement_list (x, &sub_list);
1634 }
1635 }
1636
1637 x = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
1638 x = build_function_call_expr (x, NULL);
1639 gimplify_and_add (x, stmt_list);
1640
1641 gimplify_and_add (sub_list, stmt_list);
1642
1643 x = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
1644 x = build_function_call_expr (x, NULL);
1645 gimplify_and_add (x, stmt_list);
1646 }
1647
1648 /* Generate code to implement the COPYPRIVATE clauses. */
1649
1650 static void
1651 expand_copyprivate_clauses (tree clauses, tree *slist, tree *rlist,
1652 omp_context *ctx)
1653 {
1654 tree c;
1655
1656 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1657 {
1658 tree var, ref, x;
1659 bool by_ref;
1660
1661 if (TREE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
1662 continue;
1663
1664 var = OMP_CLAUSE_DECL (c);
1665 by_ref = use_pointer_for_field (var, false);
1666
1667 ref = build_sender_ref (var, ctx);
1668 x = by_ref ? build_fold_addr_expr (var) : var;
1669 x = build2 (MODIFY_EXPR, void_type_node, ref, x);
1670 gimplify_and_add (x, slist);
1671
1672 ref = build_receiver_ref (var, by_ref, ctx);
1673 if (is_reference (var))
1674 {
1675 ref = build_fold_indirect_ref (ref);
1676 var = build_fold_indirect_ref (var);
1677 }
1678 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
1679 gimplify_and_add (x, rlist);
1680 }
1681 }
1682
1683 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
1684 and REDUCTION from the sender (aka parent) side. */
1685
1686 static void
1687 expand_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx)
1688 {
1689 tree c;
1690
1691 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1692 {
1693 tree val, ref, x;
1694 bool by_ref, do_in = false, do_out = false;
1695
1696 switch (TREE_CODE (c))
1697 {
1698 case OMP_CLAUSE_FIRSTPRIVATE:
1699 case OMP_CLAUSE_COPYIN:
1700 case OMP_CLAUSE_LASTPRIVATE:
1701 case OMP_CLAUSE_REDUCTION:
1702 break;
1703 default:
1704 continue;
1705 }
1706
1707 val = OMP_CLAUSE_DECL (c);
1708 if (is_variable_sized (val))
1709 continue;
1710 by_ref = use_pointer_for_field (val, false);
1711
1712 switch (TREE_CODE (c))
1713 {
1714 case OMP_CLAUSE_FIRSTPRIVATE:
1715 case OMP_CLAUSE_COPYIN:
1716 do_in = true;
1717 break;
1718
1719 case OMP_CLAUSE_LASTPRIVATE:
1720 if (by_ref || is_reference (val))
1721 {
1722 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1723 continue;
1724 do_in = true;
1725 }
1726 else
1727 do_out = true;
1728 break;
1729
1730 case OMP_CLAUSE_REDUCTION:
1731 do_in = true;
1732 do_out = !(by_ref || is_reference (val));
1733 break;
1734
1735 default:
1736 gcc_unreachable ();
1737 }
1738
1739 if (do_in)
1740 {
1741 ref = build_sender_ref (val, ctx);
1742 x = by_ref ? build_fold_addr_expr (val) : val;
1743 x = build2 (MODIFY_EXPR, void_type_node, ref, x);
1744 gimplify_and_add (x, ilist);
1745 }
1746 if (do_out)
1747 {
1748 ref = build_sender_ref (val, ctx);
1749 x = build2 (MODIFY_EXPR, void_type_node, val, ref);
1750 gimplify_and_add (x, olist);
1751 }
1752 }
1753 }
1754
1755 /* Generate code to implement SHARED from the sender (aka parent) side.
1756 This is trickier, since OMP_PARALLEL_CLAUSES doesn't list things that
1757 got automatically shared. */
1758
1759 static void
1760 expand_send_shared_vars (tree *ilist, tree *olist, omp_context *ctx)
1761 {
1762 tree ovar, nvar, f, x;
1763
1764 if (ctx->record_type == NULL)
1765 return;
1766
1767 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1768 {
1769 ovar = DECL_ABSTRACT_ORIGIN (f);
1770 nvar = maybe_lookup_decl (ovar, ctx);
1771 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
1772 continue;
1773
1774 if (use_pointer_for_field (ovar, true))
1775 {
1776 x = build_sender_ref (ovar, ctx);
1777 ovar = build_fold_addr_expr (ovar);
1778 x = build2 (MODIFY_EXPR, void_type_node, x, ovar);
1779 gimplify_and_add (x, ilist);
1780 }
1781 else
1782 {
1783 x = build_sender_ref (ovar, ctx);
1784 x = build2 (MODIFY_EXPR, void_type_node, x, ovar);
1785 gimplify_and_add (x, ilist);
1786
1787 x = build_sender_ref (ovar, ctx);
1788 x = build2 (MODIFY_EXPR, void_type_node, ovar, x);
1789 gimplify_and_add (x, olist);
1790 }
1791 }
1792 }
1793
1794 /* Build the function calls to GOMP_parallel_start etc to actually
1795 generate the parallel operation. */
1796
1797 static void
1798 build_parallel_call (tree clauses, tree *stmt_list, omp_context *ctx)
1799 {
1800 tree t, args, val, cond, c;
1801
1802 /* By default, the value of NUM_THREADS is zero (selected at run time)
1803 and there is no conditional. */
1804 cond = NULL_TREE;
1805 val = build_int_cst (unsigned_type_node, 0);
1806
1807 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
1808 if (c)
1809 cond = OMP_CLAUSE_IF_EXPR (c);
1810
1811 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
1812 if (c)
1813 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
1814
1815 /* Ensure 'val' is of the correct type. */
1816 val = fold_convert (unsigned_type_node, val);
1817
1818 /* If we found the clause 'if (cond)', build either
1819 (cond != 0) or (cond ? val : 1u). */
1820 if (cond)
1821 {
1822 if (integer_zerop (val))
1823 val = build2 (EQ_EXPR, unsigned_type_node, cond,
1824 build_int_cst (TREE_TYPE (cond), 0));
1825 else
1826 val = build3 (COND_EXPR, unsigned_type_node, cond, val,
1827 build_int_cst (unsigned_type_node, 1));
1828 }
1829
1830 args = tree_cons (NULL, val, NULL);
1831 t = ctx->sender_decl;
1832 if (t == NULL)
1833 t = null_pointer_node;
1834 else
1835 t = build_fold_addr_expr (t);
1836 args = tree_cons (NULL, t, args);
1837 t = build_fold_addr_expr (ctx->cb.dst_fn);
1838 args = tree_cons (NULL, t, args);
1839 if (ctx->parallel_start_additional_args)
1840 args = chainon (args, ctx->parallel_start_additional_args);
1841 t = built_in_decls[ctx->parallel_start_ix];
1842 t = build_function_call_expr (t, args);
1843 gimplify_and_add (t, stmt_list);
1844
1845 t = ctx->sender_decl;
1846 if (t == NULL)
1847 t = null_pointer_node;
1848 else
1849 t = build_fold_addr_expr (t);
1850 args = tree_cons (NULL, t, NULL);
1851 t = build_function_call_expr (ctx->cb.dst_fn, args);
1852 gimplify_and_add (t, stmt_list);
1853
1854 t = built_in_decls[BUILT_IN_GOMP_PARALLEL_END];
1855 t = build_function_call_expr (t, NULL);
1856 gimplify_and_add (t, stmt_list);
1857 }
1858
1859 /* If exceptions are enabled, wrap *STMT_P in a MUST_NOT_THROW catch
1860 handler. This prevents programs from violating the structured
1861 block semantics with throws. */
1862
1863 static void
1864 maybe_catch_exception (tree *stmt_p)
1865 {
1866 tree f, t;
1867
1868 if (!flag_exceptions)
1869 return;
1870
1871 if (lang_protect_cleanup_actions)
1872 t = lang_protect_cleanup_actions ();
1873 else
1874 {
1875 t = built_in_decls[BUILT_IN_TRAP];
1876 t = build_function_call_expr (t, NULL);
1877 }
1878 f = build2 (EH_FILTER_EXPR, void_type_node, NULL, NULL);
1879 EH_FILTER_MUST_NOT_THROW (f) = 1;
1880 gimplify_and_add (t, &EH_FILTER_FAILURE (f));
1881
1882 t = build2 (TRY_CATCH_EXPR, void_type_node, *stmt_p, NULL);
1883 append_to_statement_list (f, &TREE_OPERAND (t, 1));
1884
1885 *stmt_p = NULL;
1886 append_to_statement_list (t, stmt_p);
1887 }
1888
1889
1890 /* Expand the OpenMP parallel directive pointed to by STMT_P. CTX
1891 holds context information for *STMT_P. Expansion proceeds in
1892 two main phases:
1893
1894 (1) The body of the parallel is expanded in-situ.
1895 All the input and reduction clauses are expanded (from the
1896 child's perspective). The body of the parallel is then
1897 inserted as the body of CTX->CB.DST_FUN (the function spawned
1898 to execute each child thread).
1899
1900 (2) Back in the original function, the original body of the
1901 directive is replaced with the expansion of clauses (from the
1902 parent's perspective), and the thread library call to launch
1903 all the children threads. */
1904
1905 static void
1906 expand_omp_parallel (tree *stmt_p, omp_context *ctx)
1907 {
1908 tree clauses, block, bind, body, olist;
1909
1910 current_function_decl = ctx->cb.dst_fn;
1911 cfun = DECL_STRUCT_FUNCTION (current_function_decl);
1912
1913 push_gimplify_context ();
1914
1915 /* First phase. Expand the body of the children threads, emit
1916 receiving code for data copying clauses. */
1917 clauses = OMP_PARALLEL_CLAUSES (*stmt_p);
1918 bind = OMP_PARALLEL_BODY (*stmt_p);
1919 block = BIND_EXPR_BLOCK (bind);
1920 body = BIND_EXPR_BODY (bind);
1921 BIND_EXPR_BODY (bind) = alloc_stmt_list ();
1922
1923 expand_rec_input_clauses (clauses, &BIND_EXPR_BODY (bind), &olist, ctx);
1924
1925 expand_omp (&body, ctx);
1926 append_to_statement_list (body, &BIND_EXPR_BODY (bind));
1927
1928 expand_reduction_clauses (clauses, &BIND_EXPR_BODY (bind), ctx);
1929 append_to_statement_list (olist, &BIND_EXPR_BODY (bind));
1930 maybe_catch_exception (&BIND_EXPR_BODY (bind));
1931
1932 pop_gimplify_context (bind);
1933 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
1934 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
1935
1936 DECL_INITIAL (ctx->cb.dst_fn) = block;
1937 DECL_SAVED_TREE (ctx->cb.dst_fn) = bind;
1938 cgraph_add_new_function (ctx->cb.dst_fn);
1939
1940 current_function_decl = ctx->cb.src_fn;
1941 cfun = DECL_STRUCT_FUNCTION (current_function_decl);
1942
1943 block = make_node (BLOCK);
1944 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
1945 *stmt_p = bind;
1946
1947 push_gimplify_context ();
1948
1949 /* Second phase. Build the sender decl now that we're in the
1950 correct context. Replace the original body of the directive with
1951 sending code for data copying clauses and the parallel call to
1952 launch children threads. */
1953 if (ctx->record_type)
1954 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_data_o");
1955
1956 olist = NULL;
1957 expand_send_clauses (clauses, &BIND_EXPR_BODY (bind), &olist, ctx);
1958 expand_send_shared_vars (&BIND_EXPR_BODY (bind), &olist, ctx);
1959 build_parallel_call (clauses, &BIND_EXPR_BODY (bind), ctx);
1960 append_to_statement_list (olist, &BIND_EXPR_BODY (bind));
1961
1962 pop_gimplify_context (bind);
1963 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
1964 }
1965
1966 /* A subroutine of expand_omp_for_1. Generate code to emit the
1967 for for a lastprivate clause. Given a loop control predicate
1968 of (V cond N2), we gate the clause on (!(V cond N2)). */
1969
1970 static void
1971 expand_omp_for_lastprivate (struct expand_omp_for_data *fd)
1972 {
1973 tree clauses, cond;
1974 enum tree_code cond_code;
1975
1976 cond_code = fd->cond_code;
1977 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
1978
1979 /* When possible, use a strict equality expression. This can let VRP
1980 type optimizations deduce the value and remove a copy. */
1981 if (host_integerp (fd->step, 0))
1982 {
1983 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->step);
1984 if (step == 1 || step == -1)
1985 cond_code = EQ_EXPR;
1986 }
1987
1988 cond = build2 (cond_code, boolean_type_node, fd->v, fd->n2);
1989
1990 clauses = OMP_FOR_CLAUSES (fd->for_stmt);
1991 expand_lastprivate_clauses (clauses, cond, &fd->pre, fd->ctx);
1992 }
1993
1994 /* A subroutine of expand_omp_for_1. Generate code for a parallel
1995 loop with any schedule. Given parameters:
1996
1997 for (V = N1; V cond N2; V += STEP) BODY;
1998
1999 where COND is "<" or ">", we generate pseudocode
2000
2001 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
2002 if (more) goto L0; else goto L2;
2003 L0:
2004 V = istart0;
2005 iend = iend0;
2006 L1:
2007 BODY;
2008 V += STEP;
2009 if (V cond iend) goto L1;
2010 more = GOMP_loop_foo_next (&istart0, &iend0);
2011 if (more) goto L0;
2012 lastprivate;
2013 L2:
2014
2015 If this is a combined omp parallel loop, we can skip the call
2016 to GOMP_loop_foo_start and generate
2017
2018 L0:
2019 if (!GOMP_loop_foo_next (&istart0, &iend0)) goto L2;
2020 V = istart0;
2021 iend = iend0;
2022 L1:
2023 BODY;
2024 V += STEP;
2025 if (V cond iend) goto L1;
2026 goto L0;
2027 L2:
2028 lastprivate;
2029 */
2030
2031 static void
2032 expand_omp_for_generic (struct expand_omp_for_data *fd,
2033 enum built_in_function start_fn,
2034 enum built_in_function next_fn)
2035 {
2036 tree l0, l1, l2;
2037 tree type, istart0, iend0, iend;
2038 tree t, args;
2039 bool in_combined_parallel = is_in_combined_parallel_ctx (fd->ctx);
2040
2041 type = TREE_TYPE (fd->v);
2042
2043 istart0 = create_tmp_var (long_integer_type_node, ".istart0");
2044 iend0 = create_tmp_var (long_integer_type_node, ".iend0");
2045
2046 l0 = create_artificial_label ();
2047 l1 = create_artificial_label ();
2048 l2 = create_artificial_label ();
2049 iend = create_tmp_var (type, NULL);
2050
2051 /* If this is a combined parallel loop, skip the call to
2052 GOMP_loop_foo_start and call GOMP_loop_foo_next directly. */
2053 if (in_combined_parallel)
2054 {
2055 t = build1 (LABEL_EXPR, void_type_node, l0);
2056 gimplify_and_add (t, &fd->pre);
2057 t = build_fold_addr_expr (iend0);
2058 args = tree_cons (NULL, t, NULL);
2059 t = build_fold_addr_expr (istart0);
2060 args = tree_cons (NULL, t, args);
2061 t = build_function_call_expr (built_in_decls[next_fn], args);
2062 t = build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t);
2063 t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l2), NULL);
2064 gimplify_and_add (t, &fd->pre);
2065 }
2066 else
2067 {
2068 t = build_fold_addr_expr (iend0);
2069 args = tree_cons (NULL, t, NULL);
2070 t = build_fold_addr_expr (istart0);
2071 args = tree_cons (NULL, t, args);
2072 if (fd->chunk_size)
2073 {
2074 t = fold_convert (long_integer_type_node, fd->chunk_size);
2075 args = tree_cons (NULL, t, args);
2076 }
2077 t = fold_convert (long_integer_type_node, fd->step);
2078 args = tree_cons (NULL, t, args);
2079 t = fold_convert (long_integer_type_node, fd->n2);
2080 args = tree_cons (NULL, t, args);
2081 t = fold_convert (long_integer_type_node, fd->n1);
2082 args = tree_cons (NULL, t, args);
2083 t = build_function_call_expr (built_in_decls[start_fn], args);
2084 t = build3 (COND_EXPR, void_type_node, t,
2085 build_and_jump (&l0), build_and_jump (&l2));
2086 gimplify_and_add (t, &fd->pre);
2087 t = build1 (LABEL_EXPR, void_type_node, l0);
2088 gimplify_and_add (t, &fd->pre);
2089 }
2090
2091 t = fold_convert (type, istart0);
2092 t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
2093 gimplify_and_add (t, &fd->pre);
2094
2095 t = fold_convert (type, iend0);
2096 t = build2 (MODIFY_EXPR, void_type_node, iend, t);
2097 gimplify_and_add (t, &fd->pre);
2098
2099 t = build1 (LABEL_EXPR, void_type_node, l1);
2100 gimplify_and_add (t, &fd->pre);
2101
2102 append_to_statement_list (OMP_FOR_BODY (fd->for_stmt), &fd->pre);
2103
2104 t = build2 (PLUS_EXPR, type, fd->v, fd->step);
2105 t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
2106 gimplify_and_add (t, &fd->pre);
2107
2108 t = build2 (fd->cond_code, boolean_type_node, fd->v, iend);
2109 t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1), NULL);
2110 gimplify_and_add (t, &fd->pre);
2111
2112 /* If emitting a combined parallel loop, we only need to emit a jump
2113 back to L0 to call GOMP_loop_foo_next again. */
2114 if (in_combined_parallel)
2115 {
2116 t = build_and_jump (&l0);
2117 gimplify_and_add (t, &fd->pre);
2118 }
2119 else
2120 {
2121 t = build_fold_addr_expr (iend0);
2122 args = tree_cons (NULL, t, NULL);
2123 t = build_fold_addr_expr (istart0);
2124 args = tree_cons (NULL, t, args);
2125 t = build_function_call_expr (built_in_decls[next_fn], args);
2126 t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0), NULL);
2127 gimplify_and_add (t, &fd->pre);
2128 }
2129
2130 expand_omp_for_lastprivate (fd);
2131
2132 t = build1 (LABEL_EXPR, void_type_node, l2);
2133 gimplify_and_add (t, &fd->pre);
2134 }
2135
2136
2137 /* A subroutine of expand_omp_for_1. Generate code for a parallel
2138 loop with static schedule and no specified chunk size. Given parameters:
2139
2140 for (V = N1; V cond N2; V += STEP) BODY;
2141
2142 where COND is "<" or ">", we generate pseudocode
2143
2144 if (cond is <)
2145 adj = STEP - 1;
2146 else
2147 adj = STEP + 1;
2148 n = (adj + N2 - N1) / STEP;
2149 q = n / nthreads;
2150 q += (q * nthreads != n);
2151 s0 = q * threadid;
2152 e0 = min(s0 + q, n);
2153 if (s0 >= e0) goto L2; else goto L0;
2154 L0:
2155 V = s0 * STEP + N1;
2156 e = e0 * STEP + N1;
2157 L1:
2158 BODY;
2159 V += STEP;
2160 if (V cond e) goto L1;
2161 lastprivate;
2162 L2:
2163 */
2164
2165 static void
2166 expand_omp_for_static_nochunk (struct expand_omp_for_data *fd)
2167 {
2168 tree l0, l1, l2, n, q, s0, e0, e, t, nthreads, threadid;
2169 tree type, utype;
2170
2171 l0 = create_artificial_label ();
2172 l1 = create_artificial_label ();
2173 l2 = create_artificial_label ();
2174
2175 type = TREE_TYPE (fd->v);
2176 utype = lang_hooks.types.unsigned_type (type);
2177
2178 t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS];
2179 t = build_function_call_expr (t, NULL);
2180 t = fold_convert (utype, t);
2181 nthreads = get_formal_tmp_var (t, &fd->pre);
2182
2183 t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
2184 t = build_function_call_expr (t, NULL);
2185 t = fold_convert (utype, t);
2186 threadid = get_formal_tmp_var (t, &fd->pre);
2187
2188 fd->n1 = fold_convert (type, fd->n1);
2189 if (!is_gimple_val (fd->n1))
2190 fd->n1 = get_formal_tmp_var (fd->n1, &fd->pre);
2191
2192 fd->n2 = fold_convert (type, fd->n2);
2193 if (!is_gimple_val (fd->n2))
2194 fd->n2 = get_formal_tmp_var (fd->n2, &fd->pre);
2195
2196 fd->step = fold_convert (type, fd->step);
2197 if (!is_gimple_val (fd->step))
2198 fd->step = get_formal_tmp_var (fd->step, &fd->pre);
2199
2200 t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
2201 t = fold_build2 (PLUS_EXPR, type, fd->step, t);
2202 t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
2203 t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
2204 t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
2205 t = fold_convert (utype, t);
2206 if (is_gimple_val (t))
2207 n = t;
2208 else
2209 n = get_formal_tmp_var (t, &fd->pre);
2210
2211 t = build2 (TRUNC_DIV_EXPR, utype, n, nthreads);
2212 q = get_formal_tmp_var (t, &fd->pre);
2213
2214 t = build2 (MULT_EXPR, utype, q, nthreads);
2215 t = build2 (NE_EXPR, utype, t, n);
2216 t = build2 (PLUS_EXPR, utype, q, t);
2217 q = get_formal_tmp_var (t, &fd->pre);
2218
2219 t = build2 (MULT_EXPR, utype, q, threadid);
2220 s0 = get_formal_tmp_var (t, &fd->pre);
2221
2222 t = build2 (PLUS_EXPR, utype, s0, q);
2223 t = build2 (MIN_EXPR, utype, t, n);
2224 e0 = get_formal_tmp_var (t, &fd->pre);
2225
2226 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
2227 t = build3 (COND_EXPR, void_type_node, t,
2228 build_and_jump (&l2), build_and_jump (&l0));
2229 gimplify_and_add (t, &fd->pre);
2230
2231 t = build1 (LABEL_EXPR, void_type_node, l0);
2232 gimplify_and_add (t, &fd->pre);
2233
2234 t = fold_convert (type, s0);
2235 t = build2 (MULT_EXPR, type, t, fd->step);
2236 t = build2 (PLUS_EXPR, type, t, fd->n1);
2237 t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
2238 gimplify_and_add (t, &fd->pre);
2239
2240 t = fold_convert (type, e0);
2241 t = build2 (MULT_EXPR, type, t, fd->step);
2242 t = build2 (PLUS_EXPR, type, t, fd->n1);
2243 e = get_formal_tmp_var (t, &fd->pre);
2244
2245 t = build1 (LABEL_EXPR, void_type_node, l1);
2246 gimplify_and_add (t, &fd->pre);
2247
2248 append_to_statement_list (OMP_FOR_BODY (fd->for_stmt), &fd->pre);
2249
2250 t = build2 (PLUS_EXPR, type, fd->v, fd->step);
2251 t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
2252 gimplify_and_add (t, &fd->pre);
2253
2254 t = build2 (fd->cond_code, boolean_type_node, fd->v, e);
2255 t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1), NULL);
2256 gimplify_and_add (t, &fd->pre);
2257
2258 expand_omp_for_lastprivate (fd);
2259
2260 t = build1 (LABEL_EXPR, void_type_node, l2);
2261 gimplify_and_add (t, &fd->pre);
2262 }
2263
2264 /* A subroutine of expand_omp_for_1. Generate code for a parallel
2265 loop with static schedule and a specified chunk size. Given parameters:
2266
2267 for (V = N1; V cond N2; V += STEP) BODY;
2268
2269 where COND is "<" or ">", we generate pseudocode
2270
2271 if (cond is <)
2272 adj = STEP - 1;
2273 else
2274 adj = STEP + 1;
2275 n = (adj + N2 - N1) / STEP;
2276 trip = 0;
2277 L0:
2278 s0 = (trip * nthreads + threadid) * CHUNK;
2279 e0 = min(s0 + CHUNK, n);
2280 if (s0 < n) goto L1; else goto L4;
2281 L1:
2282 V = s0 * STEP + N1;
2283 e = e0 * STEP + N1;
2284 L2:
2285 BODY;
2286 V += STEP;
2287 if (V cond e) goto L2; else goto L3;
2288 L3:
2289 trip += 1;
2290 goto L0;
2291 L4:
2292 if (trip == 0) goto L5;
2293 lastprivate;
2294 L5:
2295 */
2296
2297 static void
2298 expand_omp_for_static_chunk (struct expand_omp_for_data *fd)
2299 {
2300 tree l0, l1, l2, l3, l4, l5, n, s0, e0, e, t;
2301 tree trip, nthreads, threadid;
2302 tree type, utype;
2303
2304 l0 = create_artificial_label ();
2305 l1 = create_artificial_label ();
2306 l2 = create_artificial_label ();
2307 l3 = create_artificial_label ();
2308 l4 = create_artificial_label ();
2309 l5 = create_artificial_label ();
2310
2311 type = TREE_TYPE (fd->v);
2312 utype = lang_hooks.types.unsigned_type (type);
2313
2314 t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS];
2315 t = build_function_call_expr (t, NULL);
2316 t = fold_convert (utype, t);
2317 nthreads = get_formal_tmp_var (t, &fd->pre);
2318
2319 t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
2320 t = build_function_call_expr (t, NULL);
2321 t = fold_convert (utype, t);
2322 threadid = get_formal_tmp_var (t, &fd->pre);
2323
2324 fd->n1 = fold_convert (type, fd->n1);
2325 if (!is_gimple_val (fd->n1))
2326 fd->n1 = get_formal_tmp_var (fd->n1, &fd->pre);
2327
2328 fd->n2 = fold_convert (type, fd->n2);
2329 if (!is_gimple_val (fd->n2))
2330 fd->n2 = get_formal_tmp_var (fd->n2, &fd->pre);
2331
2332 fd->step = fold_convert (type, fd->step);
2333 if (!is_gimple_val (fd->step))
2334 fd->step = get_formal_tmp_var (fd->step, &fd->pre);
2335
2336 fd->chunk_size = fold_convert (utype, fd->chunk_size);
2337 if (!is_gimple_val (fd->chunk_size))
2338 fd->chunk_size = get_formal_tmp_var (fd->chunk_size, &fd->pre);
2339
2340 t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
2341 t = fold_build2 (PLUS_EXPR, type, fd->step, t);
2342 t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
2343 t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
2344 t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
2345 t = fold_convert (utype, t);
2346 if (is_gimple_val (t))
2347 n = t;
2348 else
2349 n = get_formal_tmp_var (t, &fd->pre);
2350
2351 t = build_int_cst (utype, 0);
2352 trip = get_initialized_tmp_var (t, &fd->pre, NULL);
2353
2354 t = build1 (LABEL_EXPR, void_type_node, l0);
2355 gimplify_and_add (t, &fd->pre);
2356
2357 t = build2 (MULT_EXPR, utype, trip, nthreads);
2358 t = build2 (PLUS_EXPR, utype, t, threadid);
2359 t = build2 (MULT_EXPR, utype, t, fd->chunk_size);
2360 s0 = get_formal_tmp_var (t, &fd->pre);
2361
2362 t = build2 (PLUS_EXPR, utype, s0, fd->chunk_size);
2363 t = build2 (MIN_EXPR, utype, t, n);
2364 e0 = get_formal_tmp_var (t, &fd->pre);
2365
2366 t = build2 (LT_EXPR, boolean_type_node, s0, n);
2367 t = build3 (COND_EXPR, void_type_node, t,
2368 build_and_jump (&l1), build_and_jump (&l4));
2369 gimplify_and_add (t, &fd->pre);
2370
2371 t = build1 (LABEL_EXPR, void_type_node, l1);
2372 gimplify_and_add (t, &fd->pre);
2373
2374 t = fold_convert (type, s0);
2375 t = build2 (MULT_EXPR, type, t, fd->step);
2376 t = build2 (PLUS_EXPR, type, t, fd->n1);
2377 t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
2378 gimplify_and_add (t, &fd->pre);
2379
2380 t = fold_convert (type, e0);
2381 t = build2 (MULT_EXPR, type, t, fd->step);
2382 t = build2 (PLUS_EXPR, type, t, fd->n1);
2383 e = get_formal_tmp_var (t, &fd->pre);
2384
2385 t = build1 (LABEL_EXPR, void_type_node, l2);
2386 gimplify_and_add (t, &fd->pre);
2387
2388 append_to_statement_list (OMP_FOR_BODY (fd->for_stmt), &fd->pre);
2389
2390 t = build2 (PLUS_EXPR, type, fd->v, fd->step);
2391 t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
2392 gimplify_and_add (t, &fd->pre);
2393
2394 t = build2 (fd->cond_code, boolean_type_node, fd->v, e);
2395 t = build3 (COND_EXPR, void_type_node, t,
2396 build_and_jump (&l2), build_and_jump (&l3));
2397 gimplify_and_add (t, &fd->pre);
2398
2399 t = build1 (LABEL_EXPR, void_type_node, l3);
2400 gimplify_and_add (t, &fd->pre);
2401
2402 t = build_int_cst (utype, 1);
2403 t = build2 (PLUS_EXPR, utype, trip, t);
2404 t = build2 (MODIFY_EXPR, void_type_node, trip, t);
2405 gimplify_and_add (t, &fd->pre);
2406
2407 t = build1 (GOTO_EXPR, void_type_node, l0);
2408 gimplify_and_add (t, &fd->pre);
2409
2410 t = build1 (LABEL_EXPR, void_type_node, l4);
2411 gimplify_and_add (t, &fd->pre);
2412
2413 t = build_int_cst (utype, 0);
2414 t = build2 (EQ_EXPR, boolean_type_node, trip, t);
2415 t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l5), NULL);
2416
2417 expand_omp_for_lastprivate (fd);
2418
2419 t = build1 (LABEL_EXPR, void_type_node, l5);
2420 gimplify_and_add (t, &fd->pre);
2421 }
2422
2423 /* A subroutine of expand_omp_for. Expand the logic of the loop itself. */
2424
2425 static tree
2426 expand_omp_for_1 (tree *stmt_p, omp_context *ctx)
2427 {
2428 struct expand_omp_for_data fd;
2429 tree dlist;
2430
2431 extract_omp_for_data (*stmt_p, ctx, &fd);
2432
2433 expand_rec_input_clauses (OMP_FOR_CLAUSES (fd.for_stmt),
2434 &fd.pre, &dlist, ctx);
2435
2436 expand_omp (&OMP_FOR_PRE_BODY (fd.for_stmt), ctx);
2437 append_to_statement_list (OMP_FOR_PRE_BODY (fd.for_stmt), &fd.pre);
2438
2439 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC && !fd.have_ordered)
2440 {
2441 if (fd.chunk_size == NULL)
2442 expand_omp_for_static_nochunk (&fd);
2443 else
2444 expand_omp_for_static_chunk (&fd);
2445 }
2446 else
2447 {
2448 int fn_index;
2449
2450 fn_index = fd.sched_kind + fd.have_ordered * 4;
2451
2452 expand_omp_for_generic (&fd, BUILT_IN_GOMP_LOOP_STATIC_START + fn_index,
2453 BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index);
2454 }
2455
2456 expand_reduction_clauses (OMP_FOR_CLAUSES (fd.for_stmt), &fd.pre, ctx);
2457 append_to_statement_list (dlist, &fd.pre);
2458
2459 /* If this parallel loop was part of a combined parallel loop
2460 directive, inform the parent parallel what flavour of
2461 GOMP_parallel_loop_XXX_start to use. */
2462 if (is_in_combined_parallel_ctx (ctx))
2463 {
2464 int start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START + fd.sched_kind;
2465 ctx->outer->parallel_start_ix = start_ix;
2466 }
2467 else if (!fd.have_nowait)
2468 build_omp_barrier (&fd.pre);
2469
2470 return fd.pre;
2471 }
2472
2473 /* Expand code for an OpenMP loop directive. */
2474
2475 static void
2476 expand_omp_for (tree *stmt_p, omp_context *ctx)
2477 {
2478 tree bind, block, stmt_list;
2479
2480 push_gimplify_context ();
2481
2482 expand_omp (&OMP_FOR_BODY (*stmt_p), ctx);
2483
2484 stmt_list = expand_omp_for_1 (stmt_p, ctx);
2485 block = make_node (BLOCK);
2486 bind = build3 (BIND_EXPR, void_type_node, NULL, stmt_list, block);
2487 maybe_catch_exception (&BIND_EXPR_BODY (bind));
2488 *stmt_p = bind;
2489
2490 pop_gimplify_context (bind);
2491 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
2492 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
2493 }
2494
2495 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
2496
2497 firstprivate;
2498 v = GOMP_sections_start (n);
2499 L0:
2500 switch (v)
2501 {
2502 case 0:
2503 goto L2;
2504 case 1:
2505 section 1;
2506 goto L1;
2507 case 2:
2508 ...
2509 case n:
2510 ...
2511 lastprivate;
2512 default:
2513 abort ();
2514 }
2515 L1:
2516 v = GOMP_sections_next ();
2517 goto L0;
2518 L2:
2519 reduction;
2520
2521 If this is a combined parallel sections skip the call to
2522 GOMP_sections_start and emit the call to GOMP_sections_next right
2523 before the switch(). */
2524
2525 static void
2526 expand_omp_sections (tree *stmt_p, omp_context *ctx)
2527 {
2528 tree sec_stmt, label_vec, bind, block, stmt_list, l0, l1, l2, t, u, v;
2529 tree_stmt_iterator tsi;
2530 tree dlist;
2531 unsigned i, len;
2532 bool in_combined_parallel = is_in_combined_parallel_ctx (ctx);
2533
2534 sec_stmt = *stmt_p;
2535 stmt_list = NULL;
2536
2537 push_gimplify_context ();
2538
2539 expand_rec_input_clauses (OMP_SECTIONS_CLAUSES (sec_stmt),
2540 &stmt_list, &dlist, ctx);
2541
2542 tsi = tsi_start (OMP_SECTIONS_BODY (sec_stmt));
2543 for (len = 0; !tsi_end_p (tsi); len++, tsi_next (&tsi))
2544 continue;
2545
2546 l0 = create_artificial_label ();
2547 l1 = create_artificial_label ();
2548 l2 = create_artificial_label ();
2549 v = create_tmp_var (unsigned_type_node, ".section");
2550 label_vec = make_tree_vec (len + 2);
2551
2552 t = build_int_cst (unsigned_type_node, len);
2553 t = tree_cons (NULL, t, NULL);
2554
2555 if (in_combined_parallel)
2556 {
2557 /* Nothing to do. Just inform our parent of the additional
2558 arguments to invoke GOMP_parallel_sections_start. */
2559 ctx->outer->parallel_start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2560 ctx->outer->parallel_start_additional_args = t;
2561 }
2562 else
2563 {
2564 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
2565 t = build_function_call_expr (u, t);
2566 t = build2 (MODIFY_EXPR, void_type_node, v, t);
2567 gimplify_and_add (t, &stmt_list);
2568 }
2569
2570 t = build1 (LABEL_EXPR, void_type_node, l0);
2571 gimplify_and_add (t, &stmt_list);
2572
2573 if (in_combined_parallel)
2574 {
2575 /* Combined parallel sections need the call to GOMP_sections_next
2576 before the switch(). */
2577 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
2578 t = build_function_call_expr (t, NULL);
2579 t = build2 (MODIFY_EXPR, void_type_node, v, t);
2580 gimplify_and_add (t, &stmt_list);
2581 }
2582
2583 t = build3 (SWITCH_EXPR, void_type_node, v, NULL, label_vec);
2584 gimplify_and_add (t, &stmt_list);
2585
2586 t = build3 (CASE_LABEL_EXPR, void_type_node,
2587 build_int_cst (unsigned_type_node, 0), NULL, l2);
2588 TREE_VEC_ELT (label_vec, 0) = t;
2589
2590 tsi = tsi_start (OMP_SECTIONS_BODY (sec_stmt));
2591 for (i = 0; i < len; i++, tsi_next (&tsi))
2592 {
2593 omp_context *sctx;
2594
2595 t = create_artificial_label ();
2596 u = build_int_cst (unsigned_type_node, i + 1);
2597 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
2598 TREE_VEC_ELT (label_vec, i + 1) = u;
2599 t = build1 (LABEL_EXPR, void_type_node, t);
2600 gimplify_and_add (t, &stmt_list);
2601
2602 t = tsi_stmt (tsi);
2603 sctx = maybe_lookup_ctx (t);
2604 gcc_assert (sctx);
2605 expand_omp (&OMP_SECTION_BODY (t), sctx);
2606 append_to_statement_list (OMP_SECTION_BODY (t), &stmt_list);
2607
2608 if (i == len - 1)
2609 expand_lastprivate_clauses (OMP_SECTIONS_CLAUSES (sec_stmt),
2610 NULL, &stmt_list, ctx);
2611
2612 t = build1 (GOTO_EXPR, void_type_node, l1);
2613 gimplify_and_add (t, &stmt_list);
2614 }
2615
2616 t = create_artificial_label ();
2617 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
2618 TREE_VEC_ELT (label_vec, len + 1) = u;
2619 t = build1 (LABEL_EXPR, void_type_node, t);
2620 gimplify_and_add (t, &stmt_list);
2621
2622 t = built_in_decls[BUILT_IN_TRAP];
2623 t = build_function_call_expr (t, NULL);
2624 gimplify_and_add (t, &stmt_list);
2625
2626 t = build1 (LABEL_EXPR, void_type_node, l1);
2627 gimplify_and_add (t, &stmt_list);
2628
2629 if (!in_combined_parallel)
2630 {
2631 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
2632 t = build_function_call_expr (t, NULL);
2633 t = build2 (MODIFY_EXPR, void_type_node, v, t);
2634 gimplify_and_add (t, &stmt_list);
2635 }
2636
2637 t = build1 (GOTO_EXPR, void_type_node, l0);
2638 gimplify_and_add (t, &stmt_list);
2639
2640 t = build1 (LABEL_EXPR, void_type_node, l2);
2641 gimplify_and_add (t, &stmt_list);
2642
2643 expand_reduction_clauses (OMP_SECTIONS_CLAUSES (sec_stmt), &stmt_list, ctx);
2644 append_to_statement_list (dlist, &stmt_list);
2645
2646 /* Unless there's a nowait clause, add a barrier afterward. */
2647 if (!find_omp_clause (OMP_SECTIONS_CLAUSES (sec_stmt), OMP_CLAUSE_NOWAIT))
2648 build_omp_barrier (&stmt_list);
2649
2650 block = make_node (BLOCK);
2651 bind = build3 (BIND_EXPR, void_type_node, NULL, stmt_list, block);
2652 maybe_catch_exception (&BIND_EXPR_BODY (bind));
2653 *stmt_p = bind;
2654
2655 pop_gimplify_context (bind);
2656 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
2657 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
2658 }
2659
2660
2661 /* A subroutine of expand_omp_single. Expand the simple form of
2662 an OMP_SINGLE, without a copyprivate clause:
2663
2664 if (GOMP_single_start ())
2665 BODY;
2666 [ GOMP_barrier (); ] -> unless 'nowait' is present.
2667 */
2668
2669 static void
2670 expand_omp_single_simple (tree single_stmt, tree *pre_p)
2671 {
2672 tree t;
2673
2674 t = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
2675 t = build_function_call_expr (t, NULL);
2676 t = build3 (COND_EXPR, void_type_node, t,
2677 OMP_SINGLE_BODY (single_stmt), NULL);
2678 gimplify_and_add (t, pre_p);
2679
2680 if (!find_omp_clause (OMP_SINGLE_CLAUSES (single_stmt), OMP_CLAUSE_NOWAIT))
2681 build_omp_barrier (pre_p);
2682 }
2683
2684 /* A subroutine of expand_omp_single. Expand the simple form of
2685 an OMP_SINGLE, with a copyprivate clause:
2686
2687 #pragma omp single copyprivate (a, b, c)
2688
2689 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
2690
2691 {
2692 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
2693 {
2694 BODY;
2695 copyout.a = a;
2696 copyout.b = b;
2697 copyout.c = c;
2698 GOMP_single_copy_end (&copyout);
2699 }
2700 else
2701 {
2702 a = copyout_p->a;
2703 b = copyout_p->b;
2704 c = copyout_p->c;
2705 }
2706 GOMP_barrier ();
2707 }
2708 */
2709
2710 static void
2711 expand_omp_single_copy (tree single_stmt, tree *pre_p, omp_context *ctx)
2712 {
2713 tree ptr_type, t, args, l0, l1, l2, copyin_seq;
2714
2715 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
2716
2717 ptr_type = build_pointer_type (ctx->record_type);
2718 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
2719
2720 l0 = create_artificial_label ();
2721 l1 = create_artificial_label ();
2722 l2 = create_artificial_label ();
2723
2724 t = built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START];
2725 t = build_function_call_expr (t, NULL);
2726 t = fold_convert (ptr_type, t);
2727 t = build2 (MODIFY_EXPR, void_type_node, ctx->receiver_decl, t);
2728 gimplify_and_add (t, pre_p);
2729
2730 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
2731 build_int_cst (ptr_type, 0));
2732 t = build3 (COND_EXPR, void_type_node, t,
2733 build_and_jump (&l0), build_and_jump (&l1));
2734 gimplify_and_add (t, pre_p);
2735
2736 t = build1 (LABEL_EXPR, void_type_node, l0);
2737 gimplify_and_add (t, pre_p);
2738
2739 append_to_statement_list (OMP_SINGLE_BODY (single_stmt), pre_p);
2740
2741 copyin_seq = NULL;
2742 expand_copyprivate_clauses (OMP_SINGLE_CLAUSES (single_stmt), pre_p,
2743 &copyin_seq, ctx);
2744
2745 t = build_fold_addr_expr (ctx->sender_decl);
2746 args = tree_cons (NULL, t, NULL);
2747 t = built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END];
2748 t = build_function_call_expr (t, args);
2749 gimplify_and_add (t, pre_p);
2750
2751 t = build_and_jump (&l2);
2752 gimplify_and_add (t, pre_p);
2753
2754 t = build1 (LABEL_EXPR, void_type_node, l1);
2755 gimplify_and_add (t, pre_p);
2756
2757 append_to_statement_list (copyin_seq, pre_p);
2758
2759 t = build1 (LABEL_EXPR, void_type_node, l2);
2760 gimplify_and_add (t, pre_p);
2761
2762 build_omp_barrier (pre_p);
2763 }
2764
2765 /* Expand code for an OpenMP single directive. */
2766
2767 static void
2768 expand_omp_single (tree *stmt_p, omp_context *ctx)
2769 {
2770 tree bind, block, single_stmt = *stmt_p, dlist;
2771
2772 push_gimplify_context ();
2773
2774 block = make_node (BLOCK);
2775 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
2776 *stmt_p = bind;
2777
2778 expand_rec_input_clauses (OMP_SINGLE_CLAUSES (single_stmt),
2779 &BIND_EXPR_BODY (bind), &dlist, ctx);
2780
2781 expand_omp (&OMP_SINGLE_BODY (single_stmt), ctx);
2782
2783 if (ctx->record_type)
2784 expand_omp_single_copy (single_stmt, &BIND_EXPR_BODY (bind), ctx);
2785 else
2786 expand_omp_single_simple (single_stmt, &BIND_EXPR_BODY (bind));
2787
2788 append_to_statement_list (dlist, &BIND_EXPR_BODY (bind));
2789
2790 maybe_catch_exception (&BIND_EXPR_BODY (bind));
2791 pop_gimplify_context (bind);
2792 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
2793 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
2794 }
2795
2796 /* Expand code for an OpenMP master directive. */
2797
2798 static void
2799 expand_omp_master (tree *stmt_p, omp_context *ctx)
2800 {
2801 tree bind, block, stmt = *stmt_p, lab = NULL, x;
2802
2803 push_gimplify_context ();
2804
2805 block = make_node (BLOCK);
2806 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
2807 *stmt_p = bind;
2808
2809 x = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
2810 x = build_function_call_expr (x, NULL);
2811 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
2812 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
2813 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
2814
2815 expand_omp (&OMP_MASTER_BODY (stmt), ctx);
2816 append_to_statement_list (OMP_MASTER_BODY (stmt), &BIND_EXPR_BODY (bind));
2817
2818 x = build1 (LABEL_EXPR, void_type_node, lab);
2819 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
2820
2821 maybe_catch_exception (&BIND_EXPR_BODY (bind));
2822 pop_gimplify_context (bind);
2823 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
2824 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
2825 }
2826
2827 /* Expand code for an OpenMP ordered directive. */
2828
2829 static void
2830 expand_omp_ordered (tree *stmt_p, omp_context *ctx)
2831 {
2832 tree bind, block, stmt = *stmt_p, x;
2833
2834 push_gimplify_context ();
2835
2836 block = make_node (BLOCK);
2837 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
2838 *stmt_p = bind;
2839
2840 x = built_in_decls[BUILT_IN_GOMP_ORDERED_START];
2841 x = build_function_call_expr (x, NULL);
2842 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
2843
2844 expand_omp (&OMP_ORDERED_BODY (stmt), ctx);
2845 append_to_statement_list (OMP_ORDERED_BODY (stmt), &BIND_EXPR_BODY (bind));
2846
2847 x = built_in_decls[BUILT_IN_GOMP_ORDERED_END];
2848 x = build_function_call_expr (x, NULL);
2849 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
2850
2851 maybe_catch_exception (&BIND_EXPR_BODY (bind));
2852 pop_gimplify_context (bind);
2853 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
2854 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
2855 }
2856
2857 /* Expand code for an OpenMP critical directive. */
2858
2859 /* Gimplify an OMP_CRITICAL statement. This is a relatively simple
2860 substitution of a couple of function calls. But in the NAMED case,
2861 requires that languages coordinate a symbol name. It is therefore
2862 best put here in common code. */
2863
2864 static GTY((param1_is (tree), param2_is (tree)))
2865 splay_tree critical_name_mutexes;
2866
2867 static void
2868 expand_omp_critical (tree *stmt_p, omp_context *ctx)
2869 {
2870 tree bind, block, stmt = *stmt_p;
2871 tree lock, unlock, name;
2872
2873 name = OMP_CRITICAL_NAME (stmt);
2874 if (name)
2875 {
2876 tree decl, args;
2877 splay_tree_node n;
2878
2879 if (!critical_name_mutexes)
2880 critical_name_mutexes
2881 = splay_tree_new_ggc (splay_tree_compare_pointers);
2882
2883 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
2884 if (n == NULL)
2885 {
2886 char *new_str;
2887
2888 decl = create_tmp_var_raw (ptr_type_node, NULL);
2889
2890 new_str = ACONCAT ((".gomp_critical_user_",
2891 IDENTIFIER_POINTER (name), NULL));
2892 DECL_NAME (decl) = get_identifier (new_str);
2893 TREE_PUBLIC (decl) = 1;
2894 TREE_STATIC (decl) = 1;
2895 DECL_COMMON (decl) = 1;
2896 DECL_ARTIFICIAL (decl) = 1;
2897 DECL_IGNORED_P (decl) = 1;
2898 cgraph_varpool_finalize_decl (decl);
2899
2900 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
2901 (splay_tree_value) decl);
2902 }
2903 else
2904 decl = (tree) n->value;
2905
2906 args = tree_cons (NULL, build_fold_addr_expr (decl), NULL);
2907 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
2908 lock = build_function_call_expr (lock, args);
2909
2910 args = tree_cons (NULL, build_fold_addr_expr (decl), NULL);
2911 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
2912 unlock = build_function_call_expr (unlock, args);
2913 }
2914 else
2915 {
2916 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
2917 lock = build_function_call_expr (lock, NULL);
2918
2919 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
2920 unlock = build_function_call_expr (unlock, NULL);
2921 }
2922
2923 push_gimplify_context ();
2924
2925 block = make_node (BLOCK);
2926 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
2927 *stmt_p = bind;
2928
2929 gimplify_and_add (lock, &BIND_EXPR_BODY (bind));
2930
2931 expand_omp (&OMP_CRITICAL_BODY (stmt), ctx);
2932 maybe_catch_exception (&OMP_CRITICAL_BODY (stmt));
2933 append_to_statement_list (OMP_CRITICAL_BODY (stmt), &BIND_EXPR_BODY (bind));
2934
2935 gimplify_and_add (unlock, &BIND_EXPR_BODY (bind));
2936
2937 pop_gimplify_context (bind);
2938 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
2939 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
2940 }
2941
2942 /* Pass *TP back through the gimplifier within the context determined by WI.
2943 This handles replacement of DECL_VALUE_EXPR, as well as adjusting the
2944 flags on ADDR_EXPR. */
2945
2946 static void
2947 expand_regimplify (tree *tp, struct walk_stmt_info *wi)
2948 {
2949 enum gimplify_status gs;
2950 tree pre = NULL;
2951
2952 if (wi->is_lhs)
2953 gs = gimplify_expr (tp, &pre, NULL, is_gimple_lvalue, fb_lvalue);
2954 else if (wi->val_only)
2955 gs = gimplify_expr (tp, &pre, NULL, is_gimple_val, fb_rvalue);
2956 else
2957 gs = gimplify_expr (tp, &pre, NULL, is_gimple_formal_tmp_var, fb_rvalue);
2958 gcc_assert (gs == GS_ALL_DONE);
2959
2960 if (pre)
2961 tsi_link_before (&wi->tsi, pre, TSI_SAME_STMT);
2962 }
2963
2964 static tree
2965 expand_omp_1 (tree *tp, int *walk_subtrees, void *data)
2966 {
2967 struct walk_stmt_info *wi = data;
2968 omp_context *ctx = wi->info;
2969 tree t = *tp;
2970
2971 *walk_subtrees = 0;
2972 switch (TREE_CODE (*tp))
2973 {
2974 case OMP_PARALLEL:
2975 ctx = maybe_lookup_ctx (t);
2976 if (!ctx->is_nested)
2977 expand_omp_parallel (tp, ctx);
2978 break;
2979
2980 case OMP_FOR:
2981 ctx = maybe_lookup_ctx (t);
2982 gcc_assert (ctx);
2983 expand_omp_for (tp, ctx);
2984 break;
2985
2986 case OMP_SECTIONS:
2987 ctx = maybe_lookup_ctx (t);
2988 gcc_assert (ctx);
2989 expand_omp_sections (tp, ctx);
2990 break;
2991
2992 case OMP_SINGLE:
2993 ctx = maybe_lookup_ctx (t);
2994 gcc_assert (ctx);
2995 expand_omp_single (tp, ctx);
2996 break;
2997
2998 case OMP_MASTER:
2999 ctx = maybe_lookup_ctx (t);
3000 gcc_assert (ctx);
3001 expand_omp_master (tp, ctx);
3002 break;
3003
3004 case OMP_ORDERED:
3005 ctx = maybe_lookup_ctx (t);
3006 gcc_assert (ctx);
3007 expand_omp_ordered (tp, ctx);
3008 break;
3009
3010 case OMP_CRITICAL:
3011 ctx = maybe_lookup_ctx (t);
3012 gcc_assert (ctx);
3013 expand_omp_critical (tp, ctx);
3014 break;
3015
3016 case VAR_DECL:
3017 if (ctx && DECL_HAS_VALUE_EXPR_P (t))
3018 expand_regimplify (tp, wi);
3019 break;
3020
3021 case ADDR_EXPR:
3022 if (ctx)
3023 expand_regimplify (tp, wi);
3024 break;
3025
3026 case ARRAY_REF:
3027 case ARRAY_RANGE_REF:
3028 case REALPART_EXPR:
3029 case IMAGPART_EXPR:
3030 case COMPONENT_REF:
3031 case VIEW_CONVERT_EXPR:
3032 if (ctx)
3033 expand_regimplify (tp, wi);
3034 break;
3035
3036 case INDIRECT_REF:
3037 if (ctx)
3038 {
3039 wi->is_lhs = false;
3040 wi->val_only = true;
3041 expand_regimplify (&TREE_OPERAND (t, 0), wi);
3042 }
3043 break;
3044
3045 default:
3046 if (!TYPE_P (t) && !DECL_P (t))
3047 *walk_subtrees = 1;
3048 break;
3049 }
3050
3051 return NULL_TREE;
3052 }
3053
3054 static void
3055 expand_omp (tree *stmt_p, omp_context *ctx)
3056 {
3057 struct walk_stmt_info wi;
3058
3059 memset (&wi, 0, sizeof (wi));
3060 wi.callback = expand_omp_1;
3061 wi.info = ctx;
3062 wi.val_only = true;
3063 wi.want_locations = true;
3064
3065 walk_stmts (&wi, stmt_p);
3066 }
3067 \f
3068 /* Main entry point. */
3069
3070 static void
3071 execute_lower_omp (void)
3072 {
3073 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
3074 delete_omp_context);
3075
3076 scan_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
3077 gcc_assert (parallel_nesting_level == 0);
3078
3079 if (all_contexts->root)
3080 expand_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
3081
3082 splay_tree_delete (all_contexts);
3083 all_contexts = NULL;
3084 }
3085
3086 static bool
3087 gate_lower_omp (void)
3088 {
3089 return flag_openmp != 0;
3090 }
3091
3092 struct tree_opt_pass pass_lower_omp =
3093 {
3094 "omplower", /* name */
3095 gate_lower_omp, /* gate */
3096 execute_lower_omp, /* execute */
3097 NULL, /* sub */
3098 NULL, /* next */
3099 0, /* static_pass_number */
3100 0, /* tv_id */
3101 PROP_gimple_any, /* properties_required */
3102 PROP_gimple_lomp, /* properties_provided */
3103 0, /* properties_destroyed */
3104 0, /* todo_flags_start */
3105 TODO_dump_func, /* todo_flags_finish */
3106 0 /* letter */
3107 };
3108
3109 \f
3110 /* The following is a utility to diagnose OpenMP structured block violations.
3111 It's part of the "omplower" pass, as that's invoked too late. It should
3112 be invoked by the respective front ends after gimplification. */
3113
3114 static splay_tree all_labels;
3115
3116 /* Check for mismatched contexts and generate an error if needed. Return
3117 true if an error is detected. */
3118
3119 static bool
3120 diagnose_sb_0 (tree *stmt_p, tree branch_ctx, tree label_ctx)
3121 {
3122 bool exit_p = true;
3123
3124 if ((label_ctx ? TREE_VALUE (label_ctx) : NULL) == branch_ctx)
3125 return false;
3126
3127 /* Try to avoid confusing the user by producing and error message
3128 with correct "exit" or "enter" verbage. We prefer "exit"
3129 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
3130 if (branch_ctx == NULL)
3131 exit_p = false;
3132 else
3133 {
3134 while (label_ctx)
3135 {
3136 if (TREE_VALUE (label_ctx) == branch_ctx)
3137 {
3138 exit_p = false;
3139 break;
3140 }
3141 label_ctx = TREE_CHAIN (label_ctx);
3142 }
3143 }
3144
3145 if (exit_p)
3146 error ("invalid exit from OpenMP structured block");
3147 else
3148 error ("invalid entry to OpenMP structured block");
3149
3150 *stmt_p = build_empty_stmt ();
3151 return true;
3152 }
3153
3154 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
3155 where in the tree each label is found. */
3156
3157 static tree
3158 diagnose_sb_1 (tree *tp, int *walk_subtrees, void *data)
3159 {
3160 struct walk_stmt_info *wi = data;
3161 tree context = (tree) wi->info;
3162 tree inner_context;
3163 tree t = *tp;
3164
3165 *walk_subtrees = 0;
3166 switch (TREE_CODE (t))
3167 {
3168 case OMP_PARALLEL:
3169 case OMP_SECTIONS:
3170 case OMP_SINGLE:
3171 walk_tree (&OMP_CLAUSES (t), diagnose_sb_1, wi, NULL);
3172 /* FALLTHRU */
3173 case OMP_SECTION:
3174 case OMP_MASTER:
3175 case OMP_ORDERED:
3176 case OMP_CRITICAL:
3177 /* The minimal context here is just a tree of statements. */
3178 inner_context = tree_cons (NULL, t, context);
3179 wi->info = inner_context;
3180 walk_stmts (wi, &OMP_BODY (t));
3181 wi->info = context;
3182 break;
3183
3184 case OMP_FOR:
3185 walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_1, wi, NULL);
3186 inner_context = tree_cons (NULL, t, context);
3187 wi->info = inner_context;
3188 walk_tree (&OMP_FOR_INIT (t), diagnose_sb_1, wi, NULL);
3189 walk_tree (&OMP_FOR_COND (t), diagnose_sb_1, wi, NULL);
3190 walk_tree (&OMP_FOR_INCR (t), diagnose_sb_1, wi, NULL);
3191 walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
3192 walk_stmts (wi, &OMP_FOR_BODY (t));
3193 wi->info = context;
3194 break;
3195
3196 case LABEL_EXPR:
3197 splay_tree_insert (all_labels, (splay_tree_key) LABEL_EXPR_LABEL (t),
3198 (splay_tree_value) context);
3199 break;
3200
3201 default:
3202 break;
3203 }
3204
3205 return NULL_TREE;
3206 }
3207
3208 /* Pass 2: Check each branch and see if its context differs from that of
3209 the destination label's context. */
3210
3211 static tree
3212 diagnose_sb_2 (tree *tp, int *walk_subtrees, void *data)
3213 {
3214 struct walk_stmt_info *wi = data;
3215 tree context = (tree) wi->info;
3216 splay_tree_node n;
3217 tree t = *tp;
3218
3219 *walk_subtrees = 0;
3220 switch (TREE_CODE (t))
3221 {
3222 case OMP_PARALLEL:
3223 case OMP_SECTIONS:
3224 case OMP_SINGLE:
3225 walk_tree (&OMP_CLAUSES (t), diagnose_sb_2, wi, NULL);
3226 /* FALLTHRU */
3227 case OMP_SECTION:
3228 case OMP_MASTER:
3229 case OMP_ORDERED:
3230 case OMP_CRITICAL:
3231 wi->info = t;
3232 walk_stmts (wi, &OMP_BODY (t));
3233 wi->info = context;
3234 break;
3235
3236 case OMP_FOR:
3237 walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_2, wi, NULL);
3238 wi->info = t;
3239 walk_tree (&OMP_FOR_INIT (t), diagnose_sb_2, wi, NULL);
3240 walk_tree (&OMP_FOR_COND (t), diagnose_sb_2, wi, NULL);
3241 walk_tree (&OMP_FOR_INCR (t), diagnose_sb_2, wi, NULL);
3242 walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
3243 walk_stmts (wi, &OMP_FOR_BODY (t));
3244 wi->info = context;
3245 break;
3246
3247 case GOTO_EXPR:
3248 {
3249 tree lab = GOTO_DESTINATION (t);
3250 if (TREE_CODE (lab) != LABEL_DECL)
3251 break;
3252
3253 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
3254 diagnose_sb_0 (tp, context, n ? (tree) n->value : NULL_TREE);
3255 }
3256 break;
3257
3258 case SWITCH_EXPR:
3259 {
3260 tree vec = SWITCH_LABELS (t);
3261 int i, len = TREE_VEC_LENGTH (vec);
3262 for (i = 0; i < len; ++i)
3263 {
3264 tree lab = CASE_LABEL (TREE_VEC_ELT (vec, i));
3265 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
3266 if (diagnose_sb_0 (tp, context, (tree) n->value))
3267 break;
3268 }
3269 }
3270 break;
3271
3272 case RETURN_EXPR:
3273 diagnose_sb_0 (tp, context, NULL_TREE);
3274 break;
3275
3276 default:
3277 break;
3278 }
3279
3280 return NULL_TREE;
3281 }
3282
3283 void
3284 diagnose_omp_structured_block_errors (tree fndecl)
3285 {
3286 tree save_current = current_function_decl;
3287 struct walk_stmt_info wi;
3288
3289 current_function_decl = fndecl;
3290
3291 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
3292
3293 memset (&wi, 0, sizeof (wi));
3294 wi.callback = diagnose_sb_1;
3295 walk_stmts (&wi, &DECL_SAVED_TREE (fndecl));
3296
3297 memset (&wi, 0, sizeof (wi));
3298 wi.callback = diagnose_sb_2;
3299 wi.want_locations = true;
3300 wi.want_return_expr = true;
3301 walk_stmts (&wi, &DECL_SAVED_TREE (fndecl));
3302
3303 splay_tree_delete (all_labels);
3304 all_labels = NULL;
3305
3306 current_function_decl = save_current;
3307 }
3308
3309 #include "gt-omp-low.h"