C_OMP_DIR_INFORMATIONAL, false },
{ "scan", nullptr, nullptr, PRAGMA_OMP_SCAN,
C_OMP_DIR_CONSTRUCT, true },
- /* { "scope", nullptr, nullptr, PRAGMA_OMP_SCOPE,
- C_OMP_DIR_CONSTRUCT, false }, */
+ { "scope", nullptr, nullptr, PRAGMA_OMP_SCOPE,
+ C_OMP_DIR_CONSTRUCT, false },
{ "section", nullptr, nullptr, PRAGMA_OMP_SECTION,
C_OMP_DIR_CONSTRUCT, false },
{ "sections", nullptr, nullptr, PRAGMA_OMP_SECTIONS,
{ "end", PRAGMA_OMP_END_DECLARE_TARGET },
{ "flush", PRAGMA_OMP_FLUSH },
{ "requires", PRAGMA_OMP_REQUIRES },
+ { "scope", PRAGMA_OMP_SCOPE },
{ "section", PRAGMA_OMP_SECTION },
{ "sections", PRAGMA_OMP_SECTIONS },
{ "single", PRAGMA_OMP_SINGLE },
PRAGMA_OMP_PARALLEL,
PRAGMA_OMP_REQUIRES,
PRAGMA_OMP_SCAN,
+ PRAGMA_OMP_SCOPE,
PRAGMA_OMP_SECTION,
PRAGMA_OMP_SECTIONS,
PRAGMA_OMP_SIMD,
return add_stmt (stmt);
}
+/* OpenMP 5.1:
+ # pragma omp scope scope-clause[optseq] new-line
+ structured-block
+
+ LOC is the location of the #pragma.
+*/
+
+#define OMP_SCOPE_CLAUSE_MASK \
+ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
+
+static tree
+c_parser_omp_scope (location_t loc, c_parser *parser, bool *if_p)
+{
+ tree stmt = make_node (OMP_SCOPE);
+ SET_EXPR_LOCATION (stmt, loc);
+ TREE_TYPE (stmt) = void_type_node;
+
+ OMP_SCOPE_CLAUSES (stmt)
+ = c_parser_omp_all_clauses (parser, OMP_SCOPE_CLAUSE_MASK,
+ "#pragma omp scope");
+ OMP_SCOPE_BODY (stmt) = c_parser_omp_structured_block (parser, if_p);
+
+ return add_stmt (stmt);
+}
+
/* OpenMP 3.0:
# pragma omp task task-clause[optseq] new-line
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_parallel (loc, parser, p_name, mask, NULL, if_p);
break;
+ case PRAGMA_OMP_SCOPE:
+ stmt = c_parser_omp_scope (loc, parser, if_p);
+ break;
case PRAGMA_OMP_SECTIONS:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_sections (loc, parser, p_name, mask, NULL);
return add_stmt (stmt);
}
+/* OpenMP 5.1:
+ # pragma omp scope scope-clause[optseq] new-line
+ structured-block */
+
+#define OMP_SCOPE_CLAUSE_MASK \
+ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
+
+static tree
+cp_parser_omp_scope (cp_parser *parser, cp_token *pragma_tok, bool *if_p)
+{
+ tree stmt = make_node (OMP_SCOPE);
+ TREE_TYPE (stmt) = void_type_node;
+ SET_EXPR_LOCATION (stmt, pragma_tok->location);
+
+ OMP_SCOPE_CLAUSES (stmt)
+ = cp_parser_omp_all_clauses (parser, OMP_SCOPE_CLAUSE_MASK,
+ "#pragma omp scope", pragma_tok);
+ OMP_SCOPE_BODY (stmt) = cp_parser_omp_structured_block (parser, if_p);
+
+ return add_stmt (stmt);
+}
+
/* OpenMP 3.0:
# pragma omp task task-clause[optseq] new-line
structured-block */
stmt = cp_parser_omp_parallel (parser, pragma_tok, p_name, mask, NULL,
if_p);
break;
+ case PRAGMA_OMP_SCOPE:
+ stmt = cp_parser_omp_scope (parser, pragma_tok, if_p);
+ break;
case PRAGMA_OMP_SECTIONS:
strcpy (p_name, "#pragma omp");
stmt = cp_parser_omp_sections (parser, pragma_tok, p_name, mask, NULL);
case PRAGMA_OMP_MASKED:
case PRAGMA_OMP_MASTER:
case PRAGMA_OMP_PARALLEL:
+ case PRAGMA_OMP_SCOPE:
case PRAGMA_OMP_SECTIONS:
case PRAGMA_OMP_SIMD:
case PRAGMA_OMP_SINGLE:
omp_parallel_combined_clauses = NULL;
/* FALLTHRU */
case OMP_SINGLE:
+ case OMP_SCOPE:
case OMP_TEAMS:
case OMP_CRITICAL:
case OMP_TASKGROUP:
case GIMPLE_LABEL:
case GIMPLE_EH_MUST_NOT_THROW:
case GIMPLE_OMP_FOR:
+ case GIMPLE_OMP_SCOPE:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SECTIONS_SWITCH:
case GIMPLE_OMP_SECTION:
}
}
+/* Dump a GIMPLE_OMP_SCOPE tuple on the pretty_printer BUFFER. */
+
+static void
+dump_gimple_omp_scope (pretty_printer *buffer, const gimple *gs,
+ int spc, dump_flags_t flags)
+{
+ if (flags & TDF_RAW)
+ {
+ dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs,
+ gimple_omp_body (gs));
+ dump_omp_clauses (buffer, gimple_omp_scope_clauses (gs), spc, flags);
+ dump_gimple_fmt (buffer, spc, flags, " >");
+ }
+ else
+ {
+ pp_string (buffer, "#pragma omp scope");
+ dump_omp_clauses (buffer, gimple_omp_scope_clauses (gs), spc, flags);
+ if (!gimple_seq_empty_p (gimple_omp_body (gs)))
+ {
+ newline_and_indent (buffer, spc + 2);
+ pp_left_brace (buffer);
+ pp_newline (buffer);
+ dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
+ newline_and_indent (buffer, spc + 2);
+ pp_right_brace (buffer);
+ }
+ }
+}
+
/* Dump a GIMPLE_OMP_TARGET tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_masked (buffer, gs, spc, flags);
break;
+ case GIMPLE_OMP_SCOPE:
+ dump_gimple_omp_scope (buffer, gs, spc, flags);
+ break;
+
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_SECTION:
dump_gimple_omp_block (buffer, gs, spc, flags);
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
+ case GIMPLE_OMP_SCOPE:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_TARGET:
}
+/* Build a GIMPLE_OMP_SCOPE statement.
+
+ BODY is the sequence of statements that will be executed once.
+ CLAUSES are any of the OMP scope construct's clauses: private, reduction,
+ nowait. */
+
+gimple *
+gimple_build_omp_scope (gimple_seq body, tree clauses)
+{
+ gimple *p = gimple_alloc (GIMPLE_OMP_SCOPE, 0);
+ gimple_omp_scope_set_clauses (p, clauses);
+ if (body)
+ gimple_omp_set_body (p, body);
+
+ return p;
+}
+
+
/* Build a GIMPLE_OMP_TARGET statement.
BODY is the sequence of statements that will be executed.
}
goto copy_omp_body;
+ case GIMPLE_OMP_SCOPE:
+ t = unshare_expr (gimple_omp_scope_clauses (stmt));
+ gimple_omp_scope_set_clauses (copy, t);
+ goto copy_omp_body;
+
case GIMPLE_OMP_TARGET:
{
gomp_target *omp_target_stmt = as_a <gomp_target *> (stmt);
CLAUSES is an OMP_CLAUSE chain holding the associated clauses. */
DEFGSCODE(GIMPLE_OMP_SCAN, "gimple_omp_scan", GSS_OMP_SINGLE_LAYOUT)
+/* GIMPLE_OMP_SCOPE <BODY, CLAUSES> represents #pragma omp scope
+ BODY is the sequence of statements inside the single section.
+ CLAUSES is an OMP_CLAUSE chain holding the associated clauses. */
+DEFGSCODE(GIMPLE_OMP_SCOPE, "gimple_omp_scope", GSS_OMP_SINGLE_LAYOUT)
+
/* OMP_SECTION <BODY> represents #pragma omp section.
BODY is the sequence of statements in the section body. */
DEFGSCODE(GIMPLE_OMP_SECTION, "gimple_omp_section", GSS_OMP)
};
/* GIMPLE_OMP_SINGLE, GIMPLE_OMP_ORDERED, GIMPLE_OMP_TASKGROUP,
- GIMPLE_OMP_SCAN. */
+ GIMPLE_OMP_SCAN, GIMPLE_OMP_MASKED, GIMPLE_OMP_SCOPE. */
struct GTY((tag("GSS_OMP_SINGLE_LAYOUT")))
gimple_statement_omp_single_layout : public gimple_statement_omp
gomp_task *gimple_build_omp_task (gimple_seq, tree, tree, tree, tree,
tree, tree);
gimple *gimple_build_omp_section (gimple_seq);
+gimple *gimple_build_omp_scope (gimple_seq, tree);
gimple *gimple_build_omp_master (gimple_seq);
gimple *gimple_build_omp_masked (gimple_seq, tree);
gimple *gimple_build_omp_taskgroup (gimple_seq, tree);
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
+ case GIMPLE_OMP_SCOPE:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_TARGET:
}
-/* Return the clauses associated with OMP_MASTER statement GS. */
+/* Return the clauses associated with OMP_MASKED statement GS. */
static inline tree
gimple_omp_masked_clauses (const gimple *gs)
}
+/* Return the clauses associated with OMP_SCOPE statement GS. */
+
+static inline tree
+gimple_omp_scope_clauses (const gimple *gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_OMP_SCOPE);
+ return
+ static_cast <const gimple_statement_omp_single_layout *> (gs)->clauses;
+}
+
+
+/* Return a pointer to the clauses associated with OMP scope statement
+ GS. */
+
+static inline tree *
+gimple_omp_scope_clauses_ptr (gimple *gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_OMP_SCOPE);
+ return &static_cast <gimple_statement_omp_single_layout *> (gs)->clauses;
+}
+
+
+/* Set CLAUSES to be the clauses associated with OMP scope statement
+ GS. */
+
+static inline void
+gimple_omp_scope_set_clauses (gimple *gs, tree clauses)
+{
+ GIMPLE_CHECK (gs, GIMPLE_OMP_SCOPE);
+ static_cast <gimple_statement_omp_single_layout *> (gs)->clauses
+ = clauses;
+}
+
+
/* Return the kind of the OMP_FOR statemement G. */
static inline int
case GIMPLE_OMP_SINGLE: \
case GIMPLE_OMP_TARGET: \
case GIMPLE_OMP_TEAMS: \
+ case GIMPLE_OMP_SCOPE: \
case GIMPLE_OMP_SECTION: \
case GIMPLE_OMP_MASTER: \
case GIMPLE_OMP_MASKED: \
case OMP_LOOP:
case OACC_LOOP:
case OMP_SCAN:
+ case OMP_SCOPE:
case OMP_SECTIONS:
case OMP_SECTION:
case OMP_SINGLE:
case OMP_CLAUSE_REDUCTION:
if (OMP_CLAUSE_REDUCTION_TASK (c))
{
- if (region_type == ORT_WORKSHARE)
+ if (region_type == ORT_WORKSHARE || code == OMP_SCOPE)
{
if (nowait == -1)
nowait = omp_find_clause (*list_p,
{
error_at (OMP_CLAUSE_LOCATION (c),
"invalid %<task%> reduction modifier on construct "
- "other than %<parallel%>, %qs or %<sections%>",
- lang_GNU_Fortran () ? "do" : "for");
+ "other than %<parallel%>, %qs, %<sections%> or "
+ "%<scope%>", lang_GNU_Fortran () ? "do" : "for");
OMP_CLAUSE_REDUCTION_TASK (c) = 0;
}
}
"%qs construct", "taskloop");
OMP_CLAUSE_REDUCTION_INSCAN (c) = 0;
break;
+ case OMP_SCOPE:
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "%<inscan%> %<reduction%> clause on "
+ "%qs construct", "scope");
+ OMP_CLAUSE_REDUCTION_INSCAN (c) = 0;
+ break;
default:
break;
}
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
+ case GIMPLE_OMP_SCOPE:
case GIMPLE_OMP_TARGET:
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_CRITICAL:
case OMP_SINGLE:
ort = ORT_WORKSHARE;
break;
+ case OMP_SCOPE:
+ ort = ORT_TASKGROUP;
+ break;
case OMP_TARGET:
ort = OMP_TARGET_COMBINED (expr) ? ORT_COMBINED_TARGET : ORT_TARGET;
break;
case OMP_SINGLE:
stmt = gimple_build_omp_single (body, OMP_CLAUSES (expr));
break;
+ case OMP_SCOPE:
+ stmt = gimple_build_omp_scope (body, OMP_CLAUSES (expr));
+ break;
case OMP_TARGET:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_REGION,
OMP_CLAUSES (expr));
case OACC_KERNELS:
case OACC_PARALLEL:
case OACC_SERIAL:
+ case OMP_SCOPE:
case OMP_SECTIONS:
case OMP_SINGLE:
case OMP_TARGET:
&& code != OMP_SCAN
&& code != OMP_SECTIONS
&& code != OMP_SECTION
- && code != OMP_SINGLE);
+ && code != OMP_SINGLE
+ && code != OMP_SCOPE);
}
#endif
BT_FN_PTR, ATTR_NOTHROW_LEAF_LIST)
DEF_GOMP_BUILTIN (BUILT_IN_GOMP_SINGLE_COPY_END, "GOMP_single_copy_end",
BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_GOMP_BUILTIN (BUILT_IN_GOMP_SCOPE_START, "GOMP_scope_start",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
DEF_GOMP_BUILTIN (BUILT_IN_GOMP_OFFLOAD_REGISTER, "GOMP_offload_register_ver",
BT_FN_VOID_UINT_PTR_INT_PTR, ATTR_NOTHROW_LIST)
DEF_GOMP_BUILTIN (BUILT_IN_GOMP_OFFLOAD_UNREGISTER,
set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
}
-/* Expand code for an OpenMP single directive. We've already expanded
+/* Expand code for an OpenMP single or scope directive. We've already expanded
much of the code, here we simply place the GOMP_barrier call. */
static void
exit_bb = region->exit;
si = gsi_last_nondebug_bb (entry_bb);
- gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
+ gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
+ || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SCOPE);
gsi_remove (&si, true);
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
break;
case GIMPLE_OMP_SINGLE:
+ case GIMPLE_OMP_SCOPE:
expand_omp_single (region);
break;
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_MASKED:
+ case GIMPLE_OMP_SCOPE:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_SECTION:
{
tree x;
omp_context *outer = ctx->outer;
- while (outer && gimple_code (outer->stmt) == GIMPLE_OMP_TASKGROUP)
- outer = outer->outer;
+ for (; outer; outer = outer->outer)
+ {
+ if (gimple_code (outer->stmt) == GIMPLE_OMP_TASKGROUP)
+ continue;
+ if (gimple_code (outer->stmt) == GIMPLE_OMP_SCOPE
+ && !maybe_lookup_decl (var, outer))
+ continue;
+ break;
+ }
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
x = var;
break;
}
break;
+ case GIMPLE_OMP_SCOPE:
+ for (; ctx != NULL; ctx = ctx->outer)
+ switch (gimple_code (ctx->stmt))
+ {
+ case GIMPLE_OMP_FOR:
+ if (gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR
+ && gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_TASKLOOP)
+ break;
+ /* FALLTHRU */
+ case GIMPLE_OMP_SECTIONS:
+ case GIMPLE_OMP_SINGLE:
+ case GIMPLE_OMP_TASK:
+ case GIMPLE_OMP_CRITICAL:
+ case GIMPLE_OMP_ORDERED:
+ case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
+ error_at (gimple_location (stmt),
+ "%<scope%> region may not be closely nested inside "
+ "of work-sharing, %<loop%>, explicit %<task%>, "
+ "%<taskloop%>, %<critical%>, %<ordered%>, %<master%>, "
+ "or %<masked%> region");
+ return false;
+ case GIMPLE_OMP_PARALLEL:
+ case GIMPLE_OMP_TEAMS:
+ return true;
+ case GIMPLE_OMP_TARGET:
+ if (gimple_omp_target_kind (ctx->stmt)
+ == GF_OMP_TARGET_KIND_REGION)
+ return true;
+ break;
+ default:
+ break;
+ }
+ break;
case GIMPLE_OMP_TASK:
for (c = gimple_omp_task_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
scan_omp_for (as_a <gomp_for *> (stmt), ctx);
break;
+ case GIMPLE_OMP_SCOPE:
+ ctx = new_omp_context (stmt, ctx);
+ scan_sharing_clauses (gimple_omp_scope_clauses (stmt), ctx);
+ scan_omp (gimple_omp_body_ptr (stmt), ctx);
+ break;
+
case GIMPLE_OMP_SECTIONS:
scan_omp_sections (as_a <gomp_sections *> (stmt), ctx);
break;
gimple_seq_add_stmt (body, g);
gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
}
- else if (gimple_code (outer->stmt) != GIMPLE_OMP_TASKGROUP)
+ else if (gimple_code (outer->stmt) != GIMPLE_OMP_TASKGROUP
+ && gimple_code (outer->stmt) != GIMPLE_OMP_SCOPE)
return;
}
}
+/* Lower code for an OMP scope directive. */
+
+static void
+lower_omp_scope (gimple_stmt_iterator *gsi_p, omp_context *ctx)
+{
+ tree block;
+ gimple *scope_stmt = gsi_stmt (*gsi_p);
+ gbind *bind;
+ gimple_seq bind_body, bind_body_tail = NULL, dlist;
+ gimple_seq tred_dlist = NULL;
+
+ push_gimplify_context ();
+
+ block = make_node (BLOCK);
+ bind = gimple_build_bind (NULL, NULL, block);
+ gsi_replace (gsi_p, bind, true);
+ bind_body = NULL;
+ dlist = NULL;
+
+ tree rclauses
+ = omp_task_reductions_find_first (gimple_omp_scope_clauses (scope_stmt),
+ OMP_SCOPE, OMP_CLAUSE_REDUCTION);
+ if (rclauses)
+ {
+ tree type = build_pointer_type (pointer_sized_int_node);
+ tree temp = create_tmp_var (type);
+ tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__REDUCTEMP_);
+ OMP_CLAUSE_DECL (c) = temp;
+ OMP_CLAUSE_CHAIN (c) = gimple_omp_scope_clauses (scope_stmt);
+ gimple_omp_scope_set_clauses (scope_stmt, c);
+ lower_omp_task_reductions (ctx, OMP_SCOPE,
+ gimple_omp_scope_clauses (scope_stmt),
+ &bind_body, &tred_dlist);
+ rclauses = c;
+ tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_SCOPE_START);
+ gimple *stmt = gimple_build_call (fndecl, 1, temp);
+ gimple_seq_add_stmt (&bind_body, stmt);
+ }
+
+ lower_rec_input_clauses (gimple_omp_scope_clauses (scope_stmt),
+ &bind_body, &dlist, ctx, NULL);
+ lower_omp (gimple_omp_body_ptr (scope_stmt), ctx);
+
+ gimple_seq_add_stmt (&bind_body, scope_stmt);
+
+ gimple_seq_add_seq (&bind_body, gimple_omp_body (scope_stmt));
+
+ gimple_omp_set_body (scope_stmt, NULL);
+
+ gimple_seq clist = NULL;
+ lower_reduction_clauses (gimple_omp_scope_clauses (scope_stmt),
+ &bind_body, &clist, ctx);
+ if (clist)
+ {
+ tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
+ gcall *g = gimple_build_call (fndecl, 0);
+ gimple_seq_add_stmt (&bind_body, g);
+ gimple_seq_add_seq (&bind_body, clist);
+ fndecl = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
+ g = gimple_build_call (fndecl, 0);
+ gimple_seq_add_stmt (&bind_body, g);
+ }
+
+ gimple_seq_add_seq (&bind_body, dlist);
+
+ bind_body = maybe_catch_exception (bind_body);
+
+ bool nowait = omp_find_clause (gimple_omp_scope_clauses (scope_stmt),
+ OMP_CLAUSE_NOWAIT) != NULL_TREE;
+ gimple *g = gimple_build_omp_return (nowait);
+ gimple_seq_add_stmt (&bind_body_tail, g);
+ gimple_seq_add_seq (&bind_body_tail, tred_dlist);
+ maybe_add_implicit_barrier_cancel (ctx, g, &bind_body_tail);
+ if (ctx->record_type)
+ {
+ gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
+ tree clobber = build_clobber (ctx->record_type);
+ gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
+ clobber), GSI_SAME_STMT);
+ }
+ gimple_seq_add_seq (&bind_body, bind_body_tail);
+
+ gimple_bind_set_body (bind, bind_body);
+
+ pop_gimplify_context (bind);
+
+ gimple_bind_append_vars (bind, ctx->block_vars);
+ BLOCK_VARS (block) = ctx->block_vars;
+ if (BLOCK_VARS (block))
+ TREE_USED (block) = 1;
+}
/* Expand code for an OpenMP master or masked directive. */
static void
clauses = omp_task_reductions_find_first (clauses, code, ccode);
if (clauses == NULL_TREE)
return;
- if (code == OMP_FOR || code == OMP_SECTIONS)
+ if (code == OMP_FOR || code == OMP_SECTIONS || code == OMP_SCOPE)
{
for (omp_context *outer = ctx->outer; outer; outer = outer->outer)
if (gimple_code (outer->stmt) == GIMPLE_OMP_PARALLEL
cancellable = error_mark_node;
break;
}
- else if (gimple_code (outer->stmt) != GIMPLE_OMP_TASKGROUP)
+ else if (gimple_code (outer->stmt) != GIMPLE_OMP_TASKGROUP
+ && gimple_code (outer->stmt) != GIMPLE_OMP_SCOPE)
break;
}
tree record_type = lang_hooks.types.make_type (RECORD_TYPE);
tree lab2 = create_artificial_label (UNKNOWN_LOCATION);
tree lab3 = NULL_TREE, lab7 = NULL_TREE;
gimple *g;
- if (code == OMP_FOR || code == OMP_SECTIONS)
+ if (code == OMP_FOR || code == OMP_SECTIONS || code == OMP_SCOPE)
{
- /* For worksharing constructs, only perform it in the master thread,
- with the exception of cancelled implicit barriers - then only handle
- the current thread. */
+ /* For worksharing constructs or scope, only perform it in the master
+ thread, with the exception of cancelled implicit barriers - then only
+ handle the current thread. */
tree lab4 = create_artificial_label (UNKNOWN_LOCATION);
t = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
tree thr_num = create_tmp_var (integer_type_node);
lab3 = create_artificial_label (UNKNOWN_LOCATION);
if (code == OMP_FOR)
c = gimple_omp_for_clauses (ctx->stmt);
- else /* if (code == OMP_SECTIONS) */
+ else if (code == OMP_SECTIONS)
c = gimple_omp_sections_clauses (ctx->stmt);
+ else /* if (code == OMP_SCOPE) */
+ c = gimple_omp_scope_clauses (ctx->stmt);
c = OMP_CLAUSE_DECL (omp_find_clause (c, OMP_CLAUSE__REDUCTEMP_));
cancellable = c;
g = gimple_build_cond (NE_EXPR, c, build_zero_cst (TREE_TYPE (c)),
tree bfield = DECL_CHAIN (field);
tree cond;
- if (code == OMP_PARALLEL || code == OMP_FOR || code == OMP_SECTIONS)
- /* In parallel or worksharing all threads unconditionally
+ if (code == OMP_PARALLEL
+ || code == OMP_FOR
+ || code == OMP_SECTIONS
+ || code == OMP_SCOPE)
+ /* In parallel, worksharing or scope all threads unconditionally
initialize all their task reduction private variables. */
cond = boolean_true_node;
else if (TREE_TYPE (ptr) == ptr_type_node)
c = gimple_omp_for_clauses (ctx->stmt);
else if (code == OMP_SECTIONS)
c = gimple_omp_sections_clauses (ctx->stmt);
+ else if (code == OMP_SCOPE)
+ c = gimple_omp_scope_clauses (ctx->stmt);
else
c = gimple_omp_taskreg_clauses (ctx->stmt);
c = omp_find_clause (c, OMP_CLAUSE__REDUCTEMP_);
g = gimple_build_cond (NE_EXPR, idx, num_thr_sz, lab1, lab2);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (lab2));
- if (code == OMP_FOR || code == OMP_SECTIONS)
+ if (code == OMP_FOR || code == OMP_SECTIONS || code == OMP_SCOPE)
{
enum built_in_function bfn
= BUILT_IN_GOMP_WORKSHARE_TASK_REDUCTION_UNREGISTER;
ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_sections (gsi_p, ctx);
break;
+ case GIMPLE_OMP_SCOPE:
+ ctx = maybe_lookup_ctx (stmt);
+ gcc_assert (ctx);
+ lower_omp_scope (gsi_p, ctx);
+ break;
case GIMPLE_OMP_SINGLE:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
if (gimple_code (up->stmt) == GIMPLE_OMP_ORDERED
|| gimple_code (up->stmt) == GIMPLE_OMP_CRITICAL
|| gimple_code (up->stmt) == GIMPLE_OMP_TASKGROUP
+ || gimple_code (up->stmt) == GIMPLE_OMP_SCOPE
|| gimple_code (up->stmt) == GIMPLE_OMP_SECTION
|| gimple_code (up->stmt) == GIMPLE_OMP_SCAN
|| (gimple_code (up->stmt) == GIMPLE_OMP_TARGET
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
+ case GIMPLE_OMP_SCOPE:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
+ case GIMPLE_OMP_SCOPE:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_SECTION:
#pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */
#pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */
}
+ #pragma omp masked
+ {
+ #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */
+ #pragma omp cancel for /* { dg-error "not closely nested inside" } */
+ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */
+ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */
+ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */
+ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */
+ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */
+ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */
+ }
+ #pragma omp scope
+ {
+ #pragma omp cancel parallel /* { dg-error "not closely nested inside" } */
+ #pragma omp cancel for /* { dg-error "not closely nested inside" } */
+ #pragma omp cancel sections /* { dg-error "not closely nested inside" } */
+ #pragma omp cancel taskgroup /* { dg-error "not closely nested inside" } */
+ #pragma omp cancellation point parallel /* { dg-error "not closely nested inside" } */
+ #pragma omp cancellation point for /* { dg-error "not closely nested inside" } */
+ #pragma omp cancellation point sections /* { dg-error "not closely nested inside" } */
+ #pragma omp cancellation point taskgroup /* { dg-error "not closely nested inside" } */
+ }
#pragma omp single
{
#pragma omp cancel parallel /* { dg-error "not closely nested inside" } */
i = p[0]++;
#pragma omp masked filter (0) filter (0) /* { dg-error "too many 'filter' clauses" } */
f0 ();
+ #pragma omp scope nowait nowait /* { dg-error "too many 'nowait' clauses" } */
+ ;
}
#pragma omp declare simd simdlen (4) simdlen (4) /* { dg-error "too many 'simdlen' clauses" } */
v = a[i]; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" "" { target c } } */
}
#pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp master /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" } */
+ foo ();
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp masked /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" } */
+ foo ();
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp scope /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" } */
+ foo ();
+ }
+ #pragma omp loop
for (i = 0; i < 64; i++)
a[i] += omp_get_thread_num (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_thread_num\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
#pragma omp loop
#pragma omp barrier /* { dg-error "region may not be closely nested inside of" } */
#pragma omp master /* { dg-error "region may not be closely nested inside of" } */
;
+ #pragma omp masked /* { dg-error "region may not be closely nested inside of" } */
+ ;
+ #pragma omp scope /* { dg-error "region may not be closely nested inside of" } */
+ ;
#pragma omp ordered /* { dg-error "region may not be closely nested inside of" } */
;
#pragma omp ordered threads /* { dg-error "region may not be closely nested inside of" } */
#pragma omp barrier
#pragma omp master
;
+ #pragma omp masked
+ ;
+ #pragma omp scope
+ ;
#pragma omp ordered /* { dg-error ".ordered. region must be closely nested inside a loop region with an .ordered. clause" } */
;
#pragma omp ordered threads /* { dg-error ".ordered. region must be closely nested inside a loop region with an .ordered. clause" } */
#pragma omp barrier
#pragma omp master
;
+ #pragma omp masked
+ ;
+ #pragma omp scope
+ ;
#pragma omp ordered /* { dg-error ".ordered. region must be closely nested inside a loop region with an .ordered. clause" } */
;
#pragma omp ordered threads /* { dg-error ".ordered. region must be closely nested inside a loop region with an .ordered. clause" } */
#pragma omp section
foo (-3);
}
- #pragma omp simd reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'for' or 'sections'" } */
+ #pragma omp scope reduction (task, +: v) nowait /* { dg-error "'task' reduction modifier on a construct with a 'nowait' clause" } */
+ foo (-4);
+ #pragma omp simd reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'for', 'sections' or 'scope'" } */
for (i = 0; i < 64; i++)
v++;
#pragma omp for simd reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct combined with 'simd'" } */
#pragma omp teams distribute parallel for simd reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct combined with 'simd'" } */
for (i = 0; i < 64; i++)
v++;
- #pragma omp taskloop reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'for' or 'sections'" } */
+ #pragma omp taskloop reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'for', 'sections' or 'scope'" } */
for (i = 0; i < 64; i++)
foo (i);
#pragma omp taskloop simd reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct combined with 'simd'" } */
for (i = 0; i < 64; i++)
v++;
- #pragma omp teams reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'for' or 'sections'" } */
+ #pragma omp teams reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'for', 'sections' or 'scope'" } */
foo (i);
#pragma omp teams distribute reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct not combined with 'parallel', 'for' or 'sections'" } */
for (i = 0; i < 64; i++)
#pragma omp section
;
}
+ #pragma omp scope reduction (inscan, +: a) /* { dg-error "'inscan' 'reduction' clause on 'scope' construct" } */
+ ;
#pragma omp target parallel for reduction (inscan, +: a) map (c[:64], d[:64]) /* { dg-error "'inscan' 'reduction' clause on construct other than 'for', 'simd', 'for simd', 'parallel for', 'parallel for simd'" } */
for (i = 0; i < 64; i++)
{
--- /dev/null
+int r, r2, r3;
+
+void
+foo (void)
+{
+ int i = 0, j = 0, k = 0;
+ #pragma omp scope private (i) reduction (+:r) nowait
+ {
+ i = 1;
+ r++;
+ }
+ #pragma omp scope private (i) reduction (task, +:r)
+ #pragma omp scope private (j) reduction (task, +:r2)
+ #pragma omp scope private (k) reduction (task, +:r3)
+ {
+ i = 1;
+ j = 2;
+ k = 3;
+ r++;
+ r2++;
+ r3++;
+ }
+ #pragma omp parallel
+ {
+ #pragma omp scope reduction (+:r) private (i) nowait
+ {
+ #pragma omp scope reduction (+:r2) private (j) nowait
+ {
+ #pragma omp single
+ {
+ i = 1;
+ j = 2;
+ r++;
+ r2++;
+ }
+ }
+ }
+ }
+}
--- /dev/null
+int r, r2, r3 = 1;
+int bar (void);
+
+void
+foo (void)
+{
+ int i = 0, j = 0, k = 0;
+ #pragma omp parallel
+ {
+ if (bar ())
+ {
+ #pragma omp cancel parallel
+ }
+ #pragma omp scope reduction (+:r) private (i)
+ {
+ #pragma omp scope reduction (+:r2) private (j)
+ {
+ #pragma omp single nowait
+ {
+ i = 1;
+ j = 2;
+ r++;
+ r2++;
+ }
+ }
+ }
+ }
+ #pragma omp parallel
+ {
+ if (bar ())
+ {
+ #pragma omp cancel parallel
+ }
+ #pragma omp scope reduction (task, +:r) private (i)
+ #pragma omp scope reduction (task, *:r3)
+ {
+ r++;
+ r3++;
+ }
+ }
+}
[[omp::directive (cancellation point parallel)]];
}
}
+ [[omp::directive (scope private (p) reduction(+:r) nowait)]]
+ ;
+ [[omp::directive (scope private (p) reduction(task, +:r))]]
+ ;
extern int t2;
[[omp::directive (threadprivate (t2))]];
extern int t2;
[[omp::directive (cancellation point, parallel)]];
}
}
+ [[omp::directive (scope, private (p), reduction(+:r), nowait)]]
+ ;
+ [[using omp:directive (scope, private (p), reduction(task, +:r))]]
+ ;
extern int t2;
[[omp::directive (threadprivate (t2))]];
extern int t2;
#pragma omp masked /* { dg-error "may not be closely nested" } */
;
#pragma omp barrier /* { dg-error "may not be closely nested" } */
+ #pragma omp scope /* { dg-error "may not be closely nested" } */
+ ;
}
#pragma omp sections
{
;
}
#pragma omp sections
+ {
+ #pragma omp scope /* { dg-error "may not be closely nested" } */
+ ;
+ }
+ #pragma omp sections
{
#pragma omp section
;
#pragma omp masked /* { dg-error "may not be closely nested" } */
;
}
+ #pragma omp sections
+ {
+ #pragma omp section
+ #pragma omp scope /* { dg-error "may not be closely nested" } */
+ ;
+ }
#pragma omp single
{
#pragma omp for /* { dg-error "may not be closely nested" } */
#pragma omp masked /* { dg-error "may not be closely nested" } */
;
#pragma omp barrier /* { dg-error "may not be closely nested" } */
+ #pragma omp scope /* { dg-error "may not be closely nested" } */
+ ;
}
#pragma omp master
{
#pragma omp master
;
#pragma omp barrier /* { dg-error "may not be closely nested" } */
+ #pragma omp scope /* { dg-error "may not be closely nested" } */
+ ;
}
#pragma omp masked filter (1)
{
#pragma omp master
;
#pragma omp barrier /* { dg-error "may not be closely nested" } */
+ #pragma omp scope /* { dg-error "may not be closely nested" } */
+ ;
}
#pragma omp task
{
#pragma omp masked /* { dg-error "may not be closely nested" } */
;
#pragma omp barrier /* { dg-error "may not be closely nested" } */
+ #pragma omp scope /* { dg-error "may not be closely nested" } */
+ ;
}
#pragma omp parallel
{
#pragma omp masked
;
#pragma omp barrier
+ #pragma omp scope
+ ;
+ #pragma omp scope
+ {
+ #pragma omp scope
+ ;
+ }
+ }
+ #pragma omp scope
+ {
+ #pragma omp for
+ for (j = 0; j < 3; j++)
+ ;
+ #pragma omp sections
+ {
+ ;
+ #pragma omp section
+ ;
+ }
+ #pragma omp single
+ ;
+ #pragma omp master
+ ;
+ #pragma omp masked
+ ;
+ #pragma omp barrier
+ #pragma omp scope
+ ;
+ #pragma omp scope
+ {
+ #pragma omp scope
+ ;
+ }
}
}
#pragma omp masked
;
#pragma omp barrier /* { dg-error "may not be closely nested" } */
+ #pragma omp scope /* { dg-error "may not be closely nested" } */
+ ;
}
}
{
#pragma omp ordered /* { dg-error "may not be closely nested" } */
;
+ #pragma omp scope /* { dg-error "may not be closely nested" } */
+ ;
}
}
{
#pragma omp ordered /* { dg-error "may not be closely nested" } */
;
+ #pragma omp scope /* { dg-error "may not be closely nested" } */
+ ;
}
}
a = a + 1
end do
-!$omp simd reduction(task,+:a) ! { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'do' or 'sections'" }
+!$omp simd reduction(task,+:a) ! { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'do', 'sections' or 'scope'" }
do i=1,10
a = a + 1
end do
integer :: a, b, i
a = 0
-!$omp simd reduction(task,+:a) ! { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'do' or 'sections'" }
+!$omp simd reduction(task,+:a) ! { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'do', 'sections' or 'scope'" }
do i=1,10
a = a + 1
end do
(s1, gimple_omp_masked_clauses (stmt));
break;
+ case GIMPLE_OMP_SCOPE:
+ s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
+ copy = gimple_build_omp_scope
+ (s1, gimple_omp_scope_clauses (stmt));
+ break;
+
case GIMPLE_OMP_TASKGROUP:
s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
copy = gimple_build_omp_taskgroup
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_MASKED:
+ case GIMPLE_OMP_SCOPE:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SCAN:
info->suppress_expansion = save_suppress;
break;
+ case GIMPLE_OMP_SCOPE:
+ save_suppress = info->suppress_expansion;
+ convert_nonlocal_omp_clauses (gimple_omp_scope_clauses_ptr (stmt), wi);
+ walk_body (convert_nonlocal_reference_stmt, convert_nonlocal_reference_op,
+ info, gimple_omp_body_ptr (stmt));
+ info->suppress_expansion = save_suppress;
+ break;
+
case GIMPLE_OMP_TASKGROUP:
save_suppress = info->suppress_expansion;
convert_nonlocal_omp_clauses (gimple_omp_taskgroup_clauses_ptr (stmt), wi);
info->suppress_expansion = save_suppress;
break;
+ case GIMPLE_OMP_SCOPE:
+ save_suppress = info->suppress_expansion;
+ convert_local_omp_clauses (gimple_omp_scope_clauses_ptr (stmt), wi);
+ walk_body (convert_local_reference_stmt, convert_local_reference_op,
+ info, gimple_omp_body_ptr (stmt));
+ info->suppress_expansion = save_suppress;
+ break;
+
case GIMPLE_OMP_TASKGROUP:
save_suppress = info->suppress_expansion;
convert_local_omp_clauses (gimple_omp_taskgroup_clauses_ptr (stmt), wi);
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_SINGLE:
+ case GIMPLE_OMP_SCOPE:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_MASKED:
case GIMPLE_OMP_TASKGROUP:
dump_omp_clauses (pp, OMP_SINGLE_CLAUSES (node), spc, flags);
goto dump_omp_body;
+ case OMP_SCOPE:
+ pp_string (pp, "#pragma omp scope");
+ dump_omp_clauses (pp, OMP_SCOPE_CLAUSES (node), spc, flags);
+ goto dump_omp_body;
+
case OMP_CLAUSE:
/* If we come here, we're dumping something that's not an OMP construct,
for example, OMP clauses attached to a function's '__attribute__'.
Operand 1: OMP_SINGLE_CLAUSES: List of clauses. */
DEFTREECODE (OMP_SINGLE, "omp_single", tcc_statement, 2)
+/* OpenMP - #pragma omp scope
+ Operand 0: OMP_SCOPE_BODY: Masked section body.
+ Operand 1: OMP_SCOPE_CLAUSES: List of clauses. */
+DEFTREECODE (OMP_SCOPE, "omp_scope", tcc_statement, 2)
+
/* OpenMP - #pragma omp taskgroup
Operand 0: OMP_TASKGROUP_BODY: Taskgroup body.
Operand 1: OMP_SINGLE_CLAUSES: List of clauses. */
#define OMP_SINGLE_BODY(NODE) TREE_OPERAND (OMP_SINGLE_CHECK (NODE), 0)
#define OMP_SINGLE_CLAUSES(NODE) TREE_OPERAND (OMP_SINGLE_CHECK (NODE), 1)
+#define OMP_SCOPE_BODY(NODE) TREE_OPERAND (OMP_SCOPE_CHECK (NODE), 0)
+#define OMP_SCOPE_CLAUSES(NODE) TREE_OPERAND (OMP_SCOPE_CHECK (NODE), 1)
+
#define OMP_MASTER_BODY(NODE) TREE_OPERAND (OMP_MASTER_CHECK (NODE), 0)
#define OMP_MASKED_BODY(NODE) TREE_OPERAND (OMP_MASKED_CHECK (NODE), 0)
libgomp_la_SOURCES = alloc.c atomic.c barrier.c critical.c env.c error.c \
icv.c icv-device.c iter.c iter_ull.c loop.c loop_ull.c ordered.c \
- parallel.c sections.c single.c task.c team.c work.c lock.c mutex.c \
- proc.c sem.c bar.c ptrlock.c time.c fortran.c affinity.c target.c \
- splay-tree.c libgomp-plugin.c oacc-parallel.c oacc-host.c oacc-init.c \
- oacc-mem.c oacc-async.c oacc-plugin.c oacc-cuda.c priority_queue.c \
- affinity-fmt.c teams.c allocator.c oacc-profiling.c oacc-target.c
+ parallel.c scope.c sections.c single.c task.c team.c work.c lock.c \
+ mutex.c proc.c sem.c bar.c ptrlock.c time.c fortran.c affinity.c \
+ target.c splay-tree.c libgomp-plugin.c oacc-parallel.c oacc-host.c \
+ oacc-init.c oacc-mem.c oacc-async.c oacc-plugin.c oacc-cuda.c \
+ priority_queue.c affinity-fmt.c teams.c allocator.c oacc-profiling.c \
+ oacc-target.c
include $(top_srcdir)/plugin/Makefrag.am
@USE_FORTRAN_TRUE@am__objects_1 = openacc.lo
am_libgomp_la_OBJECTS = alloc.lo atomic.lo barrier.lo critical.lo \
env.lo error.lo icv.lo icv-device.lo iter.lo iter_ull.lo \
- loop.lo loop_ull.lo ordered.lo parallel.lo sections.lo \
- single.lo task.lo team.lo work.lo lock.lo mutex.lo proc.lo \
- sem.lo bar.lo ptrlock.lo time.lo fortran.lo affinity.lo \
- target.lo splay-tree.lo libgomp-plugin.lo oacc-parallel.lo \
- oacc-host.lo oacc-init.lo oacc-mem.lo oacc-async.lo \
- oacc-plugin.lo oacc-cuda.lo priority_queue.lo affinity-fmt.lo \
- teams.lo allocator.lo oacc-profiling.lo oacc-target.lo \
- $(am__objects_1)
+ loop.lo loop_ull.lo ordered.lo parallel.lo scope.lo \
+ sections.lo single.lo task.lo team.lo work.lo lock.lo mutex.lo \
+ proc.lo sem.lo bar.lo ptrlock.lo time.lo fortran.lo \
+ affinity.lo target.lo splay-tree.lo libgomp-plugin.lo \
+ oacc-parallel.lo oacc-host.lo oacc-init.lo oacc-mem.lo \
+ oacc-async.lo oacc-plugin.lo oacc-cuda.lo priority_queue.lo \
+ affinity-fmt.lo teams.lo allocator.lo oacc-profiling.lo \
+ oacc-target.lo $(am__objects_1)
libgomp_la_OBJECTS = $(am_libgomp_la_OBJECTS)
AM_V_P = $(am__v_P_@AM_V@)
am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
libgomp_la_LINK = $(LINK) $(libgomp_la_LDFLAGS)
libgomp_la_SOURCES = alloc.c atomic.c barrier.c critical.c env.c \
error.c icv.c icv-device.c iter.c iter_ull.c loop.c loop_ull.c \
- ordered.c parallel.c sections.c single.c task.c team.c work.c \
- lock.c mutex.c proc.c sem.c bar.c ptrlock.c time.c fortran.c \
- affinity.c target.c splay-tree.c libgomp-plugin.c \
+ ordered.c parallel.c scope.c sections.c single.c task.c team.c \
+ work.c lock.c mutex.c proc.c sem.c bar.c ptrlock.c time.c \
+ fortran.c affinity.c target.c splay-tree.c libgomp-plugin.c \
oacc-parallel.c oacc-host.c oacc-init.c oacc-mem.c \
oacc-async.c oacc-plugin.c oacc-cuda.c priority_queue.c \
affinity-fmt.c teams.c allocator.c oacc-profiling.c \
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/priority_queue.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ptrlock.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scope.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sections.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sem.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/single.Plo@am__quote@
GOMP_free;
} GOMP_5.0;
+GOMP_5.1 {
+ global:
+ GOMP_scope_start;
+} GOMP_5.0.1;
+
OACC_2.0 {
global:
acc_get_num_devices;
extern void *GOMP_single_copy_start (void);
extern void GOMP_single_copy_end (void *);
+/* scope.c */
+
+extern void GOMP_scope_start (uintptr_t *);
+
/* target.c */
extern void GOMP_target (int, void (*) (void *), const void *,
--- /dev/null
+/* Copyright (C) 2021 Free Software Foundation, Inc.
+ Contributed by Jakub Jelinek <jakub@redhat.com>.
+
+ This file is part of the GNU Offloading and Multi Processing Library
+ (libgomp).
+
+ Libgomp is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* This file handles the SCOPE construct with task reductions. */
+
+#include "libgomp.h"
+#include <string.h>
+
+
+ialias_redirect (GOMP_taskgroup_reduction_register)
+
+/* This routine is called when first encountering a scope construct
+ with task reductions. While scope is not a work-sharing construct,
+ if it has task reductions on it, we treat it as one, but as if it is
+ nowait, so the work-sharing behavior is done solely to choose which
+ thread does the initial initialization of task reductions and which
+ threads follow. scope with task reductions must not be nowait,
+ but the barrier and GOMP_workshare_task_reduction_unregister are emitted
+ by the lowered code later. */
+
+void
+GOMP_scope_start (uintptr_t *reductions)
+{
+ struct gomp_thread *thr = gomp_thread ();
+
+ gomp_workshare_taskgroup_start ();
+ if (gomp_work_share_start (0))
+ {
+ GOMP_taskgroup_reduction_register (reductions);
+ thr->task->taskgroup->workshare = true;
+ thr->ts.work_share->task_reductions = reductions;
+ gomp_work_share_init_done ();
+ }
+ else
+ {
+ uintptr_t *first_reductions = thr->ts.work_share->task_reductions;
+ gomp_workshare_task_reduction_register (reductions,
+ first_reductions);
+ }
+}
--- /dev/null
+#ifdef __cplusplus
+extern "C"
+#endif
+void abort ();
+
+int
+main ()
+{
+ int a[64] = {};
+ int r = 0, r2 = 0, i;
+ #pragma omp parallel
+ {
+ #pragma omp scope nowait
+ #pragma omp scope nowait
+ #pragma omp for
+ for (i = 0; i < 64; i++)
+ a[i] += 1;
+ #pragma omp scope reduction(+: r) nowait
+ {
+ #pragma omp for nowait
+ for (i = 0; i < 64; i++)
+ {
+ r += i;
+ if (a[i] != 1)
+ abort ();
+ }
+ #pragma omp barrier
+ }
+ #pragma omp barrier
+ if (r != 64 * 63 / 2)
+ abort ();
+ #pragma omp scope nowait private (i)
+ #pragma omp scope reduction(+: r2)
+ {
+ #pragma omp for nowait
+ for (i = 0; i < 64; i++)
+ {
+ r2 += 2 * i;
+ a[i] += i;
+ }
+ }
+ if (r2 != 64 * 63)
+ abort ();
+ #pragma omp for nowait
+ for (i = 0; i < 64; i++)
+ if (a[i] != i + 1)
+ abort ();
+ }
+ return 0;
+}
--- /dev/null
+extern
+#ifdef __cplusplus
+"C"
+#endif
+void abort (void);
+int a, b[3] = { 1, 1, 1 };
+unsigned long int c[2] = { ~0UL, ~0UL };
+
+void
+bar (int i)
+{
+ #pragma omp task in_reduction (*: b[:3]) in_reduction (&: c[1:]) \
+ in_reduction (+: a)
+ {
+ a += 4;
+ b[1] *= 4;
+ c[1] &= ~(1UL << (i + 16));
+ }
+}
+
+void
+foo (int x)
+{
+ #pragma omp scope reduction (task, +: a)
+ {
+ #pragma omp scope reduction (task, *: b)
+ {
+ #pragma omp scope reduction (task, &: c[1:1])
+ {
+ #pragma omp barrier
+ #pragma omp sections
+ {
+ {
+ a++; b[0] *= 2; bar (2); b[2] *= 3; c[1] &= ~(1UL << 2);
+ }
+ #pragma omp section
+ { b[0] *= 2; bar (4); b[2] *= 3; c[1] &= ~(1UL << 4); a++; }
+ #pragma omp section
+ { bar (6); b[2] *= 3; c[1] &= ~(1UL << 6); a++; b[0] *= 2; }
+ #pragma omp section
+ { b[2] *= 3; c[1] &= ~(1UL << 8); a++; b[0] *= 2; bar (8); }
+ #pragma omp section
+ { c[1] &= ~(1UL << 10); a++; b[0] *= 2; bar (10); b[2] *= 3; }
+ #pragma omp section
+ { a++; b[0] *= 2; b[2] *= 3; c[1] &= ~(1UL << 12); bar (12); }
+ #pragma omp section
+ if (x)
+ {
+ a++; b[0] *= 2; b[2] *= 3; bar (14); c[1] &= ~(1UL << 14);
+ }
+ }
+ }
+ }
+ }
+}
+
+int
+main ()
+{
+ volatile int one = 1;
+ foo (!one);
+ if (a != 30 || b[0] != 64 || b[1] != (1 << 12) || b[2] != 3 * 3 * 3 * 3 * 3 * 3
+ || c[0] != ~0UL || c[1] != ~0x15541554UL)
+ abort ();
+ a = 0;
+ b[0] = 1;
+ b[1] = 1;
+ b[2] = 1;
+ c[1] = ~0UL;
+ #pragma omp parallel
+ foo (one);
+ if (a != 35 || b[0] != 128 || b[1] != (1 << 14) || b[2] != 3 * 3 * 3 * 3 * 3 * 3 * 3
+ || c[0] != ~0UL || c[1] != ~0x55545554UL)
+ abort ();
+ return 0;
+}