C_OMP_CLAUSE_SPLIT_COUNT,
C_OMP_CLAUSE_SPLIT_SECTIONS = C_OMP_CLAUSE_SPLIT_FOR,
C_OMP_CLAUSE_SPLIT_TASKLOOP = C_OMP_CLAUSE_SPLIT_FOR,
- C_OMP_CLAUSE_SPLIT_LOOP = C_OMP_CLAUSE_SPLIT_FOR
+ C_OMP_CLAUSE_SPLIT_LOOP = C_OMP_CLAUSE_SPLIT_FOR,
+ C_OMP_CLAUSE_SPLIT_MASKED = C_OMP_CLAUSE_SPLIT_DISTRIBUTE
};
enum c_omp_region_type
};
extern tree c_finish_omp_master (location_t, tree);
+extern tree c_finish_omp_masked (location_t, tree, tree);
extern tree c_finish_omp_taskgroup (location_t, tree, tree);
extern tree c_finish_omp_critical (location_t, tree, tree, tree);
extern tree c_finish_omp_ordered (location_t, tree, tree);
return t;
}
+/* Complete a #pragma omp masked construct. BODY is the structured-block
+ that follows the pragma. LOC is the location of the #pragma. */
+
+tree
+c_finish_omp_masked (location_t loc, tree body, tree clauses)
+{
+ tree stmt = make_node (OMP_MASKED);
+ TREE_TYPE (stmt) = void_type_node;
+ OMP_MASKED_BODY (stmt) = body;
+ OMP_MASKED_CLAUSES (stmt) = clauses;
+ SET_EXPR_LOCATION (stmt, loc);
+ return add_stmt (stmt);
+}
+
/* Complete a #pragma omp taskgroup construct. BODY is the structured-block
that follows the pragma. LOC is the location of the #pragma. */
#pragma omp distribute parallel for simd
#pragma omp distribute simd
#pragma omp for simd
+ #pragma omp masked taskloop
+ #pragma omp masked taskloop simd
#pragma omp master taskloop
#pragma omp master taskloop simd
#pragma omp parallel for
#pragma omp parallel for simd
#pragma omp parallel loop
+ #pragma omp parallel masked
+ #pragma omp parallel masked taskloop
+ #pragma omp parallel masked taskloop simd
#pragma omp parallel master
#pragma omp parallel master taskloop
#pragma omp parallel master taskloop simd
case OMP_CLAUSE_BIND:
s = C_OMP_CLAUSE_SPLIT_LOOP;
break;
+ case OMP_CLAUSE_FILTER:
+ s = C_OMP_CLAUSE_SPLIT_MASKED;
+ break;
/* Duplicate this to all of taskloop, distribute, for, simd and
loop. */
case OMP_CLAUSE_COLLAPSE:
else
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
break;
- /* Private clause is supported on all constructs but master,
- it is enough to put it on the innermost one other than master. For
- #pragma omp {for,sections} put it on parallel though,
- as that's what we did for OpenMP 3.1. */
+ /* Private clause is supported on all constructs but master/masked,
+ it is enough to put it on the innermost one other than
+ master/masked. For #pragma omp {for,sections} put it on parallel
+ though, as that's what we did for OpenMP 3.1. */
case OMP_CLAUSE_PRIVATE:
switch (code)
{
case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
case OMP_MASTER: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
+ case OMP_MASKED: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
case OMP_TASKLOOP: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break;
case OMP_LOOP: s = C_OMP_CLAUSE_SPLIT_LOOP; break;
default: gcc_unreachable ();
}
break;
/* Firstprivate clause is supported on all constructs but
- simd, master and loop. Put it on the outermost of those and
- duplicate on teams and parallel. */
+ simd, master, masked and loop. Put it on the outermost of those
+ and duplicate on teams and parallel. */
case OMP_CLAUSE_FIRSTPRIVATE:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
!= 0)
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
/* This must be
- #pragma omp parallel master taskloop{, simd}. */
+ #pragma omp parallel mas{ked,ter} taskloop{, simd}. */
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
else
/* This must be
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
{
- /* This must be #pragma omp {,{,parallel }master }taskloop simd
+ /* This must be
+ #pragma omp {,{,parallel }mas{ked,ter} }taskloop simd
or
- #pragma omp {,parallel }master taskloop. */
+ #pragma omp {,parallel }mas{ked,ter} taskloop. */
gcc_assert (code == OMP_SIMD || code == OMP_TASKLOOP);
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
}
}
else if (code == OMP_SECTIONS
|| code == OMP_PARALLEL
- || code == OMP_MASTER)
+ || code == OMP_MASTER
+ || code == OMP_MASKED)
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else if (code == OMP_TASKLOOP)
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE);
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE);
- if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
+ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0
+ && (mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FILTER)) == 0)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE);
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE);
C_OMP_DIR_STANDALONE, false }, */
{ "loop", nullptr, nullptr, PRAGMA_OMP_LOOP,
C_OMP_DIR_CONSTRUCT, true },
- /* { "masked", nullptr, nullptr, PRAGMA_OMP_MASKED,
- C_OMP_DIR_CONSTRUCT, true }, */
+ { "masked", nullptr, nullptr, PRAGMA_OMP_MASKED,
+ C_OMP_DIR_CONSTRUCT, true },
{ "master", nullptr, nullptr, PRAGMA_OMP_MASTER,
C_OMP_DIR_CONSTRUCT, true },
/* { "metadirective", nullptr, nullptr, PRAGMA_OMP_METADIRECTIVE,
{ "distribute", PRAGMA_OMP_DISTRIBUTE },
{ "for", PRAGMA_OMP_FOR },
{ "loop", PRAGMA_OMP_LOOP },
+ { "masked", PRAGMA_OMP_MASKED },
{ "master", PRAGMA_OMP_MASTER },
{ "ordered", PRAGMA_OMP_ORDERED },
{ "parallel", PRAGMA_OMP_PARALLEL },
PRAGMA_OMP_FLUSH,
PRAGMA_OMP_FOR,
PRAGMA_OMP_LOOP,
+ PRAGMA_OMP_MASKED,
PRAGMA_OMP_MASTER,
PRAGMA_OMP_ORDERED,
PRAGMA_OMP_PARALLEL,
PRAGMA_OMP_CLAUSE_DEVICE,
PRAGMA_OMP_CLAUSE_DEVICE_TYPE,
PRAGMA_OMP_CLAUSE_DIST_SCHEDULE,
+ PRAGMA_OMP_CLAUSE_FILTER,
PRAGMA_OMP_CLAUSE_FINAL,
PRAGMA_OMP_CLAUSE_FIRSTPRIVATE,
PRAGMA_OMP_CLAUSE_FOR,
result = PRAGMA_OMP_CLAUSE_DIST_SCHEDULE;
break;
case 'f':
- if (!strcmp ("final", p))
+ if (!strcmp ("filter", p))
+ result = PRAGMA_OMP_CLAUSE_FILTER;
+ else if (!strcmp ("final", p))
result = PRAGMA_OMP_CLAUSE_FINAL;
else if (!strcmp ("finalize", p))
result = PRAGMA_OACC_CLAUSE_FINALIZE;
return list;
}
+/* OpenMP 5.1:
+ filter ( integer-expression ) */
+
+static tree
+c_parser_omp_clause_filter (c_parser *parser, tree list)
+{
+ location_t hint_loc = c_parser_peek_token (parser)->location;
+ matching_parens parens;
+ if (parens.require_open (parser))
+ {
+ location_t expr_loc = c_parser_peek_token (parser)->location;
+ c_expr expr = c_parser_expr_no_commas (parser, NULL);
+ expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
+ tree c, t = expr.value;
+ t = c_fully_fold (t, false, NULL);
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
+ {
+ c_parser_error (parser, "expected integer expression");
+ return list;
+ }
+ parens.skip_until_found_close (parser);
+ check_no_duplicate_clause (list, OMP_CLAUSE_FILTER, "filter");
+
+ c = build_omp_clause (hint_loc, OMP_CLAUSE_FILTER);
+ OMP_CLAUSE_FILTER_EXPR (c) = t;
+ OMP_CLAUSE_CHAIN (c) = list;
+ list = c;
+ }
+
+ return list;
+}
+
/* OpenMP 4.5:
defaultmap ( tofrom : scalar )
clauses = c_parser_omp_clause_detach (parser, clauses);
c_name = "detach";
break;
+ case PRAGMA_OMP_CLAUSE_FILTER:
+ clauses = c_parser_omp_clause_filter (parser, clauses);
+ c_name = "filter";
+ break;
case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE:
clauses = c_parser_omp_clause_firstprivate (parser, clauses);
c_name = "firstprivate";
if_p));
}
+/* OpenMP 5.1:
+ # pragma omp masked masked-clauses new-line
+ structured-block
+
+ LOC is the location of the #pragma token.
+*/
+
+#define OMP_MASKED_CLAUSE_MASK \
+ (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FILTER)
+
+static tree
+c_parser_omp_masked (location_t loc, c_parser *parser,
+ char *p_name, omp_clause_mask mask, tree *cclauses,
+ bool *if_p)
+{
+ tree block, clauses, ret;
+
+ strcat (p_name, " masked");
+ mask |= OMP_MASKED_CLAUSE_MASK;
+
+ if (c_parser_next_token_is (parser, CPP_NAME))
+ {
+ const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
+
+ if (strcmp (p, "taskloop") == 0)
+ {
+ tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
+ if (cclauses == NULL)
+ cclauses = cclauses_buf;
+
+ c_parser_consume_token (parser);
+ if (!flag_openmp) /* flag_openmp_simd */
+ return c_parser_omp_taskloop (loc, parser, p_name, mask, cclauses,
+ if_p);
+ block = c_begin_compound_stmt (true);
+ ret = c_parser_omp_taskloop (loc, parser, p_name, mask, cclauses,
+ if_p);
+ block = c_end_compound_stmt (loc, block, true);
+ if (ret == NULL_TREE)
+ return ret;
+ ret = c_finish_omp_masked (loc, block,
+ cclauses[C_OMP_CLAUSE_SPLIT_MASKED]);
+ OMP_MASKED_COMBINED (ret) = 1;
+ return ret;
+ }
+ }
+ if (!flag_openmp) /* flag_openmp_simd */
+ {
+ c_parser_skip_to_pragma_eol (parser, false);
+ return NULL_TREE;
+ }
+
+ clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
+ if (cclauses)
+ {
+ omp_split_clauses (loc, OMP_MASKED, mask, clauses, cclauses);
+ clauses = cclauses[C_OMP_CLAUSE_SPLIT_MASKED];
+ }
+
+ return c_finish_omp_masked (loc, c_parser_omp_structured_block (parser,
+ if_p),
+ clauses);
+}
+
/* OpenMP 2.5:
# pragma omp ordered new-line
structured-block
else if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
- if (cclauses == NULL && strcmp (p, "master") == 0)
+ if (cclauses == NULL && strcmp (p, "masked") == 0)
+ {
+ tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
+ cclauses = cclauses_buf;
+
+ c_parser_consume_token (parser);
+ if (!flag_openmp) /* flag_openmp_simd */
+ return c_parser_omp_masked (loc, parser, p_name, mask, cclauses,
+ if_p);
+ block = c_begin_omp_parallel ();
+ tree ret = c_parser_omp_masked (loc, parser, p_name, mask, cclauses,
+ if_p);
+ stmt = c_finish_omp_parallel (loc,
+ cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL],
+ block);
+ if (ret == NULL)
+ return ret;
+ /* masked does have just filter clause, but during gimplification
+ isn't represented by a gimplification omp context, so for
+ #pragma omp parallel masked don't set OMP_PARALLEL_COMBINED,
+ so that
+ #pragma omp parallel masked
+ #pragma omp taskloop simd lastprivate (x)
+ isn't confused with
+ #pragma omp parallel masked taskloop simd lastprivate (x) */
+ if (OMP_MASKED_COMBINED (ret))
+ OMP_PARALLEL_COMBINED (stmt) = 1;
+ return stmt;
+ }
+ else if (cclauses == NULL && strcmp (p, "master") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
cclauses = cclauses_buf;
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_loop (loc, parser, p_name, mask, NULL, if_p);
break;
+ case PRAGMA_OMP_MASKED:
+ strcpy (p_name, "#pragma omp");
+ stmt = c_parser_omp_masked (loc, parser, p_name, mask, NULL, if_p);
+ break;
case PRAGMA_OMP_MASTER:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_master (loc, parser, p_name, mask, NULL, if_p);
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_HINT:
+ case OMP_CLAUSE_FILTER:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_BIND:
case OMP_CLAUSE_NUM_GANGS:
result = PRAGMA_OMP_CLAUSE_DIST_SCHEDULE;
break;
case 'f':
- if (!strcmp ("final", p))
+ if (!strcmp ("filter", p))
+ result = PRAGMA_OMP_CLAUSE_FILTER;
+ else if (!strcmp ("final", p))
result = PRAGMA_OMP_CLAUSE_FINAL;
else if (!strcmp ("finalize", p))
result = PRAGMA_OACC_CLAUSE_FINALIZE;
return c;
}
+/* OpenMP 5.1:
+ filter ( integer-expression ) */
+
+static tree
+cp_parser_omp_clause_filter (cp_parser *parser, tree list, location_t location)
+{
+ tree t, c;
+
+ matching_parens parens;
+ if (!parens.require_open (parser))
+ return list;
+
+ t = cp_parser_assignment_expression (parser);
+
+ if (t == error_mark_node
+ || !parens.require_close (parser))
+ cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
+ /*or_comma=*/false,
+ /*consume_paren=*/true);
+ check_no_duplicate_clause (list, OMP_CLAUSE_FILTER, "filter", location);
+
+ c = build_omp_clause (location, OMP_CLAUSE_FILTER);
+ OMP_CLAUSE_FILTER_EXPR (c) = t;
+ OMP_CLAUSE_CHAIN (c) = list;
+
+ return c;
+}
+
/* OpenMP 4.5:
defaultmap ( tofrom : scalar )
token->location, false);
c_name = "default";
break;
+ case PRAGMA_OMP_CLAUSE_FILTER:
+ clauses = cp_parser_omp_clause_filter (parser, clauses,
+ token->location);
+ c_name = "filter";
+ break;
case PRAGMA_OMP_CLAUSE_FINAL:
clauses = cp_parser_omp_clause_final (parser, clauses, token->location);
c_name = "final";
cp_parser_omp_structured_block (parser, if_p));
}
+/* OpenMP 5.1:
+ # pragma omp masked masked-clauses new-line
+ structured-block */
+
+#define OMP_MASKED_CLAUSE_MASK \
+ (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FILTER)
+
+static tree
+cp_parser_omp_masked (cp_parser *parser, cp_token *pragma_tok,
+ char *p_name, omp_clause_mask mask, tree *cclauses,
+ bool *if_p)
+{
+ tree clauses, sb, ret;
+ unsigned int save;
+ location_t loc = cp_lexer_peek_token (parser->lexer)->location;
+
+ strcat (p_name, " masked");
+ mask |= OMP_MASKED_CLAUSE_MASK;
+
+ if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
+ {
+ tree id = cp_lexer_peek_token (parser->lexer)->u.value;
+ const char *p = IDENTIFIER_POINTER (id);
+
+ if (strcmp (p, "taskloop") == 0)
+ {
+ tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
+ if (cclauses == NULL)
+ cclauses = cclauses_buf;
+
+ cp_lexer_consume_token (parser->lexer);
+ if (!flag_openmp) /* flag_openmp_simd */
+ return cp_parser_omp_taskloop (parser, pragma_tok, p_name, mask,
+ cclauses, if_p);
+ sb = begin_omp_structured_block ();
+ save = cp_parser_begin_omp_structured_block (parser);
+ ret = cp_parser_omp_taskloop (parser, pragma_tok, p_name, mask,
+ cclauses, if_p);
+ cp_parser_end_omp_structured_block (parser, save);
+ tree body = finish_omp_structured_block (sb);
+ if (ret == NULL)
+ return ret;
+ ret = c_finish_omp_masked (loc, body,
+ cclauses[C_OMP_CLAUSE_SPLIT_MASKED]);
+ OMP_MASKED_COMBINED (ret) = 1;
+ return ret;
+ }
+ }
+ if (!flag_openmp) /* flag_openmp_simd */
+ {
+ cp_parser_skip_to_pragma_eol (parser, pragma_tok);
+ return NULL_TREE;
+ }
+
+ clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok,
+ cclauses == NULL);
+ if (cclauses)
+ {
+ cp_omp_split_clauses (loc, OMP_MASTER, mask, clauses, cclauses);
+ clauses = cclauses[C_OMP_CLAUSE_SPLIT_MASKED];
+ }
+
+ return c_finish_omp_masked (loc,
+ cp_parser_omp_structured_block (parser, if_p),
+ clauses);
+}
+
/* OpenMP 2.5:
# pragma omp ordered new-line
structured-block
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
- if (cclauses == NULL && strcmp (p, "master") == 0)
+ if (cclauses == NULL && strcmp (p, "masked") == 0)
+ {
+ tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
+ cclauses = cclauses_buf;
+
+ cp_lexer_consume_token (parser->lexer);
+ if (!flag_openmp) /* flag_openmp_simd */
+ return cp_parser_omp_masked (parser, pragma_tok, p_name, mask,
+ cclauses, if_p);
+ block = begin_omp_parallel ();
+ save = cp_parser_begin_omp_structured_block (parser);
+ tree ret = cp_parser_omp_masked (parser, pragma_tok, p_name, mask,
+ cclauses, if_p);
+ cp_parser_end_omp_structured_block (parser, save);
+ stmt = finish_omp_parallel (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL],
+ block);
+ if (ret == NULL_TREE)
+ return ret;
+ /* masked does have just filter clause, but during gimplification
+ isn't represented by a gimplification omp context, so for
+ #pragma omp parallel masked don't set OMP_PARALLEL_COMBINED,
+ so that
+ #pragma omp parallel masked
+ #pragma omp taskloop simd lastprivate (x)
+ isn't confused with
+ #pragma omp parallel masked taskloop simd lastprivate (x) */
+ if (OMP_MASKED_COMBINED (ret))
+ OMP_PARALLEL_COMBINED (stmt) = 1;
+ return stmt;
+ }
+ else if (cclauses == NULL && strcmp (p, "master") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
cclauses = cclauses_buf;
stmt = cp_parser_omp_loop (parser, pragma_tok, p_name, mask, NULL,
if_p);
break;
+ case PRAGMA_OMP_MASKED:
+ strcpy (p_name, "#pragma omp");
+ stmt = cp_parser_omp_masked (parser, pragma_tok, p_name, mask, NULL,
+ if_p);
+ break;
case PRAGMA_OMP_MASTER:
strcpy (p_name, "#pragma omp");
stmt = cp_parser_omp_master (parser, pragma_tok, p_name, mask, NULL,
case PRAGMA_OMP_DISTRIBUTE:
case PRAGMA_OMP_FOR:
case PRAGMA_OMP_LOOP:
+ case PRAGMA_OMP_MASKED:
case PRAGMA_OMP_MASTER:
case PRAGMA_OMP_PARALLEL:
case PRAGMA_OMP_SECTIONS:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_HINT:
+ case OMP_CLAUSE_FILTER:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
break;
case OMP_SECTIONS:
+ case OMP_MASKED:
omp_parallel_combined_clauses = NULL;
/* FALLTHRU */
case OMP_SINGLE:
}
break;
+ case OMP_CLAUSE_FILTER:
+ t = OMP_CLAUSE_FILTER_EXPR (c);
+ if (t == error_mark_node)
+ remove = true;
+ else if (!type_dependent_expression_p (t)
+ && !INTEGRAL_TYPE_P (TREE_TYPE (t)))
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "%<filter%> expression must be integral");
+ remove = true;
+ }
+ else
+ {
+ t = mark_rvalue_use (t);
+ if (!processing_template_decl)
+ {
+ t = maybe_constant_value (t);
+ t = fold_build_cleanup_point_expr (TREE_TYPE (t), t);
+ }
+ OMP_CLAUSE_FILTER_EXPR (c) = t;
+ }
+ break;
+
case OMP_CLAUSE_IS_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_PTR:
field_ok = (ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP;
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SCAN:
}
}
+/* Dump a GIMPLE_OMP_MASKED tuple on the pretty_printer BUFFER. */
+
+static void
+dump_gimple_omp_masked (pretty_printer *buffer, const gimple *gs,
+ int spc, dump_flags_t flags)
+{
+ if (flags & TDF_RAW)
+ {
+ dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs,
+ gimple_omp_body (gs));
+ dump_omp_clauses (buffer, gimple_omp_masked_clauses (gs), spc, flags);
+ dump_gimple_fmt (buffer, spc, flags, " >");
+ }
+ else
+ {
+ pp_string (buffer, "#pragma omp masked");
+ dump_omp_clauses (buffer, gimple_omp_masked_clauses (gs), spc, flags);
+ if (!gimple_seq_empty_p (gimple_omp_body (gs)))
+ {
+ newline_and_indent (buffer, spc + 2);
+ pp_left_brace (buffer);
+ pp_newline (buffer);
+ dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
+ newline_and_indent (buffer, spc + 2);
+ pp_right_brace (buffer);
+ }
+ }
+}
+
/* Dump a GIMPLE_OMP_TARGET tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_taskgroup (buffer, gs, spc, flags);
break;
+ case GIMPLE_OMP_MASKED:
+ dump_gimple_omp_masked (buffer, gs, spc, flags);
+ break;
+
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_SECTION:
dump_gimple_omp_block (buffer, gs, spc, flags);
/* FALL THROUGH. */
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SCAN:
return p;
}
+/* Build a GIMPLE_OMP_MASKED statement.
+
+ BODY is the sequence of statements to be executed by the selected thread(s). */
+
+gimple *
+gimple_build_omp_masked (gimple_seq body, tree clauses)
+{
+ gimple *p = gimple_alloc (GIMPLE_OMP_MASKED, 0);
+ gimple_omp_masked_set_clauses (p, clauses);
+ if (body)
+ gimple_omp_set_body (p, body);
+
+ return p;
+}
+
/* Build a GIMPLE_OMP_TASKGROUP statement.
BODY is the sequence of statements to be executed by the taskgroup
gimple_omp_set_body (copy, new_seq);
break;
+ case GIMPLE_OMP_MASKED:
+ t = unshare_expr (gimple_omp_masked_clauses (stmt));
+ gimple_omp_masked_set_clauses (copy, t);
+ goto copy_omp_body;
+
case GIMPLE_TRANSACTION:
new_seq = gimple_seq_copy (gimple_transaction_body (
as_a <gtransaction *> (stmt)));
BODY is the sequence of statements to execute in the master section. */
DEFGSCODE(GIMPLE_OMP_MASTER, "gimple_omp_master", GSS_OMP)
+/* GIMPLE_OMP_MASKED <BODY, CLAUSES> represents #pragma omp masked.
+ BODY is the sequence of statements to execute in the masked section. */
+DEFGSCODE(GIMPLE_OMP_MASKED, "gimple_omp_masked", GSS_OMP_SINGLE_LAYOUT)
+
/* GIMPLE_OMP_TASKGROUP <BODY, CLAUSES> represents #pragma omp taskgroup.
BODY is the sequence of statements inside the taskgroup section.
CLAUSES is an OMP_CLAUSE chain holding the associated clauses. */
tree, tree);
gimple *gimple_build_omp_section (gimple_seq);
gimple *gimple_build_omp_master (gimple_seq);
+gimple *gimple_build_omp_masked (gimple_seq, tree);
gimple *gimple_build_omp_taskgroup (gimple_seq, tree);
gomp_continue *gimple_build_omp_continue (tree, tree);
gomp_ordered *gimple_build_omp_ordered (gimple_seq, tree);
case GIMPLE_TRY:
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SECTION:
}
+/* Return the clauses associated with OMP_MASTER statement GS. */
+
+static inline tree
+gimple_omp_masked_clauses (const gimple *gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_OMP_MASKED);
+ return
+ static_cast <const gimple_statement_omp_single_layout *> (gs)->clauses;
+}
+
+
+/* Return a pointer to the clauses associated with OMP masked statement
+ GS. */
+
+static inline tree *
+gimple_omp_masked_clauses_ptr (gimple *gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_OMP_MASKED);
+ return &static_cast <gimple_statement_omp_single_layout *> (gs)->clauses;
+}
+
+
+/* Set CLAUSES to be the clauses associated with OMP masked statement
+ GS. */
+
+static inline void
+gimple_omp_masked_set_clauses (gimple *gs, tree clauses)
+{
+ GIMPLE_CHECK (gs, GIMPLE_OMP_MASKED);
+ static_cast <gimple_statement_omp_single_layout *> (gs)->clauses
+ = clauses;
+}
+
+
/* Return the kind of the OMP_FOR statemement G. */
static inline int
case GIMPLE_OMP_TEAMS: \
case GIMPLE_OMP_SECTION: \
case GIMPLE_OMP_MASTER: \
+ case GIMPLE_OMP_MASKED: \
case GIMPLE_OMP_TASKGROUP: \
case GIMPLE_OMP_ORDERED: \
case GIMPLE_OMP_CRITICAL: \
case OMP_SECTION:
case OMP_SINGLE:
case OMP_MASTER:
+ case OMP_MASKED:
case OMP_TASKGROUP:
case OMP_ORDERED:
case OMP_CRITICAL:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
+ case OMP_CLAUSE_FILTER:
case OMP_CLAUSE_HINT:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_VECTOR_LENGTH:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
- if (gimplify_expr (&OMP_CLAUSE_OPERAND (c, 0), pre_p, NULL,
- is_gimple_val, fb_rvalue) == GS_ERROR)
- remove = true;
+ if (OMP_CLAUSE_OPERAND (c, 0)
+ && !is_gimple_min_invariant (OMP_CLAUSE_OPERAND (c, 0)))
+ {
+ if (error_operand_p (OMP_CLAUSE_OPERAND (c, 0)))
+ {
+ remove = true;
+ break;
+ }
+ /* All these clauses care about value, not a particular decl,
+ so try to force it into a SSA_NAME or fresh temporary. */
+ OMP_CLAUSE_OPERAND (c, 0)
+ = get_initialized_tmp_var (OMP_CLAUSE_OPERAND (c, 0),
+ pre_p, NULL, true);
+ }
break;
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
+ case OMP_CLAUSE_FILTER:
case OMP_CLAUSE_HINT:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_ORDER:
case OMP_SECTION:
case OMP_MASTER:
+ case OMP_MASKED:
case OMP_ORDERED:
case OMP_CRITICAL:
case OMP_SCAN:
case OMP_ORDERED:
g = gimplify_omp_ordered (*expr_p, body);
break;
+ case OMP_MASKED:
+ gimplify_scan_omp_clauses (&OMP_MASKED_CLAUSES (*expr_p),
+ pre_p, ORT_WORKSHARE, OMP_MASKED);
+ gimplify_adjust_omp_clauses (pre_p, body,
+ &OMP_MASKED_CLAUSES (*expr_p),
+ OMP_MASKED);
+ g = gimple_build_omp_masked (body,
+ OMP_MASKED_CLAUSES (*expr_p));
+ break;
case OMP_CRITICAL:
gimplify_scan_omp_clauses (&OMP_CRITICAL_CLAUSES (*expr_p),
pre_p, ORT_WORKSHARE, OMP_CRITICAL);
&& code != OMP_FOR
&& code != OACC_LOOP
&& code != OMP_MASTER
+ && code != OMP_MASKED
&& code != OMP_TASKGROUP
&& code != OMP_ORDERED
&& code != OMP_PARALLEL
si = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
|| gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
+ || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASKED
|| gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
|| gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
|| gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
}
/* FALLTHRU */
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_SECTION:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case OMP_CLAUSE_DETACH:
+ case OMP_CLAUSE_FILTER:
if (ctx->outer)
scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
break;
case OMP_CLAUSE__SIMT_:
case OMP_CLAUSE_IF_PRESENT:
case OMP_CLAUSE_FINALIZE:
+ case OMP_CLAUSE_FILTER:
case OMP_CLAUSE__CONDTEMP_:
break;
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_CRITICAL:
if (is_gimple_call (stmt))
error_at (gimple_location (stmt),
"barrier region may not be closely nested inside "
"of work-sharing, %<loop%>, %<critical%>, "
- "%<ordered%>, %<master%>, explicit %<task%> or "
- "%<taskloop%> region");
+ "%<ordered%>, %<master%>, %<masked%>, explicit "
+ "%<task%> or %<taskloop%> region");
return false;
}
error_at (gimple_location (stmt),
"work-sharing region may not be closely nested inside "
"of work-sharing, %<loop%>, %<critical%>, %<ordered%>, "
- "%<master%>, explicit %<task%> or %<taskloop%> region");
+ "%<master%>, %<masked%>, explicit %<task%> or "
+ "%<taskloop%> region");
return false;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
}
break;
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_TASK:
error_at (gimple_location (stmt),
- "%<master%> region may not be closely nested inside "
+ "%qs region may not be closely nested inside "
"of work-sharing, %<loop%>, explicit %<task%> or "
- "%<taskloop%> region");
+ "%<taskloop%> region",
+ gimple_code (stmt) == GIMPLE_OMP_MASTER
+ ? "master" : "masked");
return false;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
scan_omp (gimple_omp_body_ptr (stmt), ctx);
break;
+ case GIMPLE_OMP_MASKED:
+ ctx = new_omp_context (stmt, ctx);
+ scan_sharing_clauses (gimple_omp_masked_clauses (stmt), ctx);
+ scan_omp (gimple_omp_body_ptr (stmt), ctx);
+ break;
+
case GIMPLE_OMP_TASKGROUP:
ctx = new_omp_context (stmt, ctx);
scan_sharing_clauses (gimple_omp_taskgroup_clauses (stmt), ctx);
}
-/* Expand code for an OpenMP master directive. */
+/* Expand code for an OpenMP master or masked directive. */
static void
lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gbind *bind;
location_t loc = gimple_location (stmt);
gimple_seq tseq;
+ tree filter = integer_zero_node;
push_gimplify_context ();
+ if (gimple_code (stmt) == GIMPLE_OMP_MASKED)
+ {
+ filter = omp_find_clause (gimple_omp_masked_clauses (stmt),
+ OMP_CLAUSE_FILTER);
+ if (filter)
+ filter = fold_convert (integer_type_node,
+ OMP_CLAUSE_FILTER_EXPR (filter));
+ else
+ filter = integer_zero_node;
+ }
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
x = build_call_expr_loc (loc, bfn_decl, 0);
- x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
+ x = build2 (EQ_EXPR, boolean_type_node, x, filter);
x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
tseq = NULL;
gimplify_and_add (x, &tseq);
lower_omp_single (gsi_p, ctx);
break;
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_master (gsi_p, ctx);
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SCAN:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SCAN:
case GIMPLE_OMP_CRITICAL:
/* { dg-do compile } */
/* { dg-additional-options "-Wuninitialized" } */
-/* { dg-excess-errors "PR70392" { xfail c++ } } */
#include <stdbool.h>
#pragma acc parallel if(l) /* { dg-warning "is used uninitialized" } */
;
- #pragma acc parallel if(b) /* { dg-warning "is used uninitialized" "" { xfail c++ } } */
+ #pragma acc parallel if(b) /* { dg-warning "is used uninitialized" } */
;
#pragma acc kernels if(l2) /* { dg-warning "is used uninitialized" } */
;
- #pragma acc kernels if(b2) /* { dg-warning "is used uninitialized" "" { xfail c++ } } */
+ #pragma acc kernels if(b2) /* { dg-warning "is used uninitialized" } */
;
#pragma acc data if(l3) /* { dg-warning "is used uninitialized" } */
;
- #pragma acc data if(b3) /* { dg-warning "is used uninitialized" "" { xfail c++ } } */
+ #pragma acc data if(b3) /* { dg-warning "is used uninitialized" } */
;
#pragma acc update if(l4) self(i) /* { dg-warning "is used uninitialized" } */
;
- #pragma acc update if(b4) self(i2) /* { dg-warning "is used uninitialized" "" { xfail c++ } } */
+ #pragma acc update if(b4) self(i2) /* { dg-warning "is used uninitialized" } */
;
}
i = p[0]++;
#pragma omp atomic capture hint(0) hint (0) /* { dg-error "too many 'hint' clauses" } */
i = p[0]++;
-
+ #pragma omp masked filter (0) filter (0) /* { dg-error "too many 'filter' clauses" } */
+ f0 ();
}
#pragma omp declare simd simdlen (4) simdlen (4) /* { dg-error "too many 'simdlen' clauses" } */
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) reduction(+:r) \
num_threads (nth) proc_bind(spread) copyin(t) allocate (f)
;
+ #pragma omp parallel masked \
+ private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) reduction(+:r) \
+ num_threads (nth) proc_bind(spread) copyin(t) allocate (f) filter (d)
+ ;
#pragma omp taskgroup task_reduction (+:r2) allocate (r2)
#pragma omp master taskloop \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) final(fi) mergeable priority (pp) \
for (int i = 0; i < 64; i++)
ll++;
#pragma omp taskgroup task_reduction (+:r2) allocate (r2)
+ #pragma omp masked taskloop \
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) final(fi) mergeable priority (pp) \
+ reduction(default, +:r) in_reduction(+:r2) allocate (f) filter (d)
+ for (int i = 0; i < 64; i++)
+ ll++;
+ #pragma omp taskgroup task_reduction (+:r2) allocate (r2)
#pragma omp master taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm) \
order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
+ #pragma omp taskgroup task_reduction (+:r2) allocate (r2)
+ #pragma omp masked taskloop simd \
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp) \
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm) \
+ order(concurrent) allocate (f) filter (d)
+ for (int i = 0; i < 64; i++)
+ ll++;
#pragma omp parallel master taskloop \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) final(fi) mergeable priority (pp) \
reduction(default, +:r) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
+ #pragma omp parallel masked taskloop \
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) final(fi) mergeable priority (pp) \
+ reduction(default, +:r) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t) allocate (f) filter (d)
+ for (int i = 0; i < 64; i++)
+ ll++;
#pragma omp parallel master taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t) \
order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
+ #pragma omp parallel masked taskloop simd \
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp) \
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t) \
+ order(concurrent) allocate (f) filter (d)
+ for (int i = 0; i < 64; i++)
+ ll++;
#pragma omp taskgroup task_reduction (+:r2) allocate (r2)
#pragma omp master taskloop \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
for (int i = 0; i < 64; i++)
ll++;
#pragma omp taskgroup task_reduction (+:r2) allocate (r2)
+ #pragma omp mastked taskloop \
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
+ reduction(default, +:r) in_reduction(+:r2) filter (d)
+ for (int i = 0; i < 64; i++)
+ ll++;
+ #pragma omp taskgroup task_reduction (+:r2) allocate (r2)
#pragma omp master taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm) \
order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
+ #pragma omp taskgroup task_reduction (+:r2) allocate (r2)
+ #pragma omp masked taskloop simd \
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm) \
+ order(concurrent) allocate (f) filter (d)
+ for (int i = 0; i < 64; i++)
+ ll++;
#pragma omp parallel master taskloop \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
reduction(default, +:r) num_threads (nth) proc_bind(spread) copyin(t) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
+ #pragma omp parallel masked taskloop \
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
+ reduction(default, +:r) num_threads (nth) proc_bind(spread) copyin(t) allocate (f) filter (d)
+ for (int i = 0; i < 64; i++)
+ ll++;
#pragma omp parallel master taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) num_threads (nth) proc_bind(spread) copyin(t) \
order(concurrent) allocate (f)
for (int i = 0; i < 64; i++)
ll++;
+ #pragma omp parallel masked taskloop simd \
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) num_threads (nth) proc_bind(spread) copyin(t) \
+ order(concurrent) allocate (f) filter (d)
+ for (int i = 0; i < 64; i++)
+ ll++;
#pragma omp loop bind(thread) order(concurrent) \
private (p) lastprivate (l) collapse(1) reduction(+:r)
for (l = 0; l < 64; ++l)
;
#pragma omp critical (baz) hint (2, 3) /* { dg-error "expected" } */
;
+ #pragma omp masked filter (3, 4) /* { dg-error "expected" } */
+ ;
}
--- /dev/null
+void bar (void);
+
+void
+foo (int x, int *a)
+{
+ #pragma omp masked
+ bar ();
+ #pragma omp masked filter (0)
+ bar ();
+ #pragma omp masked filter (7)
+ bar ();
+ #pragma omp masked filter (x)
+ bar ();
+ #pragma omp masked taskloop simd filter (x) grainsize (12) simdlen (4)
+ for (int i = 0; i < 128; i++)
+ a[i] = i;
+ #pragma omp parallel masked filter (x) firstprivate (x)
+ bar ();
+ #pragma omp masked
+ #pragma omp masked filter (0)
+ #pragma omp masked filter (x)
+ ;
+}
--- /dev/null
+void bar (void);
+struct S { int s; };
+
+void
+foo (float f, struct S s)
+{
+ #pragma omp masked filter (0.0) /* { dg-error "integral|integer" } */
+ bar ();
+ #pragma omp masked filter (s) /* { dg-error "integral|integer" } */
+ bar ();
+}
--- /dev/null
+void bar (int *);
+
+void
+foo (int *a, int f)
+{
+ int i, j, k, u = 0, v = 0, w = 0, x = 0, y = 0, z = 0;
+ #pragma omp parallel masked default(none) private (k) filter (f) firstprivate (f)
+ bar (&k);
+ #pragma omp parallel masked default(none) private (k)
+ bar (&k);
+ #pragma omp parallel default(none) firstprivate(a, f) shared(x, y, z)
+ {
+ #pragma omp masked taskloop reduction (+:x) default(none) firstprivate(a) filter (f)
+ for (i = 0; i < 64; i++)
+ x += a[i];
+ #pragma omp masked taskloop simd reduction (+:y) default(none) firstprivate(a) private (i) filter (f)
+ for (i = 0; i < 64; i++)
+ y += a[i];
+ #pragma omp masked taskloop simd reduction (+:y) default(none) firstprivate(a) private (i)
+ for (i = 0; i < 64; i++)
+ y += a[i];
+ #pragma omp masked taskloop simd collapse(2) reduction (+:z) default(none) firstprivate(a) private (i, j) filter (f)
+ for (j = 0; j < 1; j++)
+ for (i = 0; i < 64; ++i)
+ z += a[i];
+ }
+ #pragma omp parallel masked taskloop reduction (+:u) default(none) firstprivate(a, f) filter (f)
+ for (i = 0; i < 64; i++)
+ u += a[i];
+ #pragma omp parallel masked taskloop simd reduction (+:v) default(none) firstprivate(a, f) filter (f)
+ for (i = 0; i < 64; i++)
+ v += a[i];
+ #pragma omp parallel masked taskloop simd collapse(2) reduction (+:w) default(none) firstprivate(a, f) filter (f)
+ for (j = 0; j < 1; j++)
+ for (i = 0; i < 64; ++i)
+ w += a[i];
+}
--- /dev/null
+void
+foo (int *a)
+{
+ int i, r = 0, s = 0;
+ #pragma omp taskgroup task_reduction(+:r)
+ #pragma omp parallel masked taskloop in_reduction(+:r) /* { dg-error "'in_reduction' is not valid for '#pragma omp parallel masked taskloop'" } */
+ for (i = 0; i < 64; i++)
+ r += a[i];
+ #pragma omp taskgroup task_reduction(+:s)
+ #pragma omp parallel masked taskloop simd in_reduction(+:s) /* { dg-error "'in_reduction' is not valid for '#pragma omp parallel masked taskloop simd'" } */
+ for (i = 0; i < 64; i++)
+ s += a[i];
+}
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) reduction(+:r)
num_threads (nth) proc_bind(spread) copyin(t) allocate (f))]]
;
+ [[omp::directive (parallel masked
+ private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) reduction(+:r)
+ num_threads (nth) proc_bind(spread) copyin(t) allocate (f) filter (d))]]
+ ;
[[omp::directive (parallel
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) reduction(+:r)
num_threads (nth) proc_bind(spread) copyin(t) allocate (f))]]
reduction(default, +:r) in_reduction(+:r2) allocate (f)))]]
for (int i = 0; i < 64; i++)
ll++;
+ [[omp::sequence (directive (taskgroup task_reduction (+:r2) allocate (r2)),
+ omp::directive (masked taskloop
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) final(fi) mergeable priority (pp)
+ reduction(default, +:r) in_reduction(+:r2) allocate (f) filter (d)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
[[omp::directive (master)]];
+ [[omp::directive (masked)]];
+ [[omp::directive (masked filter (d))]];
[[omp::sequence (omp::directive (taskgroup task_reduction (+:r2) allocate (r2)),
directive (master taskloop simd
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp)
order(concurrent) allocate (f)))]]
for (int i = 0; i < 64; i++)
ll++;
+ [[omp::sequence (omp::directive (taskgroup task_reduction (+:r2) allocate (r2)),
+ directive (masked taskloop simd
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm)
+ order(concurrent) allocate (f) filter (d)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
[[omp::directive (parallel master taskloop
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) final(fi) mergeable priority (pp)
reduction(default, +:r) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t) allocate (f))]]
for (int i = 0; i < 64; i++)
ll++;
+ [[omp::directive (parallel masked taskloop
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) final(fi) mergeable priority (pp)
+ reduction(default, +:r) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t) allocate (f) filter (d))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
[[omp::directive (parallel master taskloop simd
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp)
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t)
order(concurrent) allocate (f))]]
for (int i = 0; i < 64; i++)
ll++;
+ [[omp::directive (parallel masked taskloop simd
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t)
+ order(concurrent) allocate (f) filter (d))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
[[omp::sequence (directive (taskgroup task_reduction (+:r2) allocate (r2)),
directive (master taskloop
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp)
reduction(default, +:r) in_reduction(+:r2)))]]
for (int i = 0; i < 64; i++)
ll++;
+ [[omp::sequence (directive (taskgroup task_reduction (+:r2) allocate (r2)),
+ directive (masked taskloop
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp)
+ reduction(default, +:r) in_reduction(+:r2) filter (d)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
[[omp::sequence (omp::directive (taskgroup task_reduction (+:r2) allocate (r2)),
omp::directive (master taskloop simd
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp)
order(concurrent) allocate (f)))]]
for (int i = 0; i < 64; i++)
ll++;
+ [[omp::sequence (omp::directive (taskgroup task_reduction (+:r2) allocate (r2)),
+ omp::directive (masked taskloop simd
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm)
+ order(concurrent) allocate (f) filter (d)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
[[omp::directive (parallel master taskloop
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp)
reduction(default, +:r) num_threads (nth) proc_bind(spread) copyin(t) allocate (f))]]
for (int i = 0; i < 64; i++)
ll++;
+ [[omp::directive (parallel masked taskloop
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp)
+ reduction(default, +:r) num_threads (nth) proc_bind(spread) copyin(t) allocate (f) filter (d))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
[[omp::directive (parallel master taskloop simd
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp)
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) num_threads (nth) proc_bind(spread) copyin(t)
order(concurrent) allocate (f))]]
for (int i = 0; i < 64; i++)
ll++;
+ [[omp::directive (parallel masked taskloop simd
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) num_threads (nth) proc_bind(spread) copyin(t)
+ order(concurrent) allocate (f) filter (d))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
[[omp::directive (loop bind(thread) order(concurrent)
private (p) lastprivate (l) collapse(1) reduction(+:r))]]
for (l = 0; l < 64; ++l)
private (p),firstprivate (f),if (parallel: i2),default(shared),shared(s),reduction(+:r),
num_threads (nth),proc_bind(spread),copyin(t),allocate (f))]]
;
+ [[omp::directive (parallel masked,
+ private (p),firstprivate (f),if (parallel: i2),default(shared),shared(s),reduction(+:r),
+ num_threads (nth),proc_bind(spread),copyin(t),allocate (f),filter(d))]]
+ ;
[[omp::directive (parallel,
private (p),firstprivate (f),if (parallel: i2),default(shared),shared(s),reduction(+:r),
num_threads (nth),proc_bind(spread),copyin(t),allocate (f))]]
reduction(default, +:r),in_reduction(+:r2),allocate (f)))]]
for (int i = 0; i < 64; i++)
ll++;
+ [[using omp:sequence (directive (taskgroup, task_reduction (+:r2),allocate (r2)),
+ omp::directive (masked taskloop,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),grainsize (g),collapse(1),untied, if(taskloop: i1),final(fi),mergeable, priority (pp),
+ reduction(default, +:r),in_reduction(+:r2),allocate (f),filter(d)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
[[using omp:directive (master)]];
+ [[using omp:directive (masked)]];
+ [[using omp:directive (masked,filter(d))]];
[[omp::sequence (omp::directive (taskgroup task_reduction (+:r2),allocate (r2)),
directive (master taskloop simd,
private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),grainsize (g),collapse(1),untied,if(taskloop: i1),if(simd: i2),final(fi),mergeable,priority (pp),
order(concurrent),allocate (f)))]]
for (int i = 0; i < 64; i++)
ll++;
+ [[omp::sequence (omp::directive (taskgroup task_reduction (+:r2),allocate (r2)),
+ directive (masked taskloop simd,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),grainsize (g),collapse(1),untied,if(taskloop: i1),if(simd: i2),final(fi),mergeable,priority (pp),
+ safelen(8),simdlen(4),linear(ll: 1),aligned(q: 32),reduction(default, +:r),in_reduction(+:r2),nontemporal(ntm),
+ order(concurrent),allocate (f),filter(d)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
[[omp::directive (parallel master taskloop,
private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),grainsize (g),collapse(1),untied,if(taskloop: i1),final(fi),mergeable,priority (pp),
reduction(default, +:r),if (parallel: i2),num_threads (nth),proc_bind(spread),copyin(t),allocate (f))]]
for (int i = 0; i < 64; i++)
ll++;
+ [[omp::directive (parallel masked taskloop,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),grainsize (g),collapse(1),untied,if(taskloop: i1),final(fi),mergeable,priority (pp),
+ reduction(default, +:r),if (parallel: i2),num_threads (nth),proc_bind(spread),copyin(t),allocate (f),filter(d))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
[[omp::directive (parallel master taskloop simd,
private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),grainsize (g),collapse(1),untied,if(taskloop: i1),if(simd: i2),final(fi),mergeable,priority (pp),
safelen(8),simdlen(4),linear(ll: 1),aligned(q: 32),reduction(default, +:r),nontemporal(ntm),if (parallel: i2),num_threads (nth),proc_bind(spread),copyin(t),
order(concurrent),allocate (f))]]
for (int i = 0; i < 64; i++)
ll++;
+ [[omp::directive (parallel masked taskloop simd,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),grainsize (g),collapse(1),untied,if(taskloop: i1),if(simd: i2),final(fi),mergeable,priority (pp),
+ safelen(8),simdlen(4),linear(ll: 1),aligned(q: 32),reduction(default, +:r),nontemporal(ntm),if (parallel: i2),num_threads (nth),proc_bind(spread),copyin(t),
+ order(concurrent),allocate (f),filter(d))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
[[omp::sequence (directive (taskgroup,task_reduction (+:r2),allocate (r2)),
directive (master taskloop,
private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),num_tasks (nta),collapse(1),untied,if(i1),final(fi),mergeable,priority (pp),
reduction(default, +:r),in_reduction(+:r2)))]]
for (int i = 0; i < 64; i++)
ll++;
+ [[omp::sequence (directive (taskgroup,task_reduction (+:r2),allocate (r2)),
+ directive (masked taskloop,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),num_tasks (nta),collapse(1),untied,if(i1),final(fi),mergeable,priority (pp),
+ reduction(default, +:r),in_reduction(+:r2),filter(d)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
[[omp::sequence (omp::directive (taskgroup,task_reduction (+:r2),allocate (r2)),
omp::directive (master taskloop simd,
private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),num_tasks (nta),collapse(1),untied,if(i1),final(fi),mergeable,priority (pp),
order(concurrent),allocate (f)))]]
for (int i = 0; i < 64; i++)
ll++;
+ [[omp::sequence (omp::directive (taskgroup,task_reduction (+:r2),allocate (r2)),
+ omp::directive (masked taskloop simd,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),num_tasks (nta),collapse(1),untied,if(i1),final(fi),mergeable,priority (pp),
+ safelen(8),simdlen(4),linear(ll: 1),aligned(q: 32),reduction(default, +:r),in_reduction(+:r2),nontemporal(ntm),
+ order(concurrent),allocate (f),filter(d)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
[[omp::directive (parallel master taskloop,
private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),num_tasks (nta),collapse(1),untied if(i1),final(fi),mergeable priority (pp),
reduction(default, +:r),num_threads (nth),proc_bind(spread),copyin(t),allocate (f))]]
for (int i = 0; i < 64; i++)
ll++;
+ [[omp::directive (parallel masked taskloop,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),num_tasks (nta),collapse(1),untied if(i1),final(fi),mergeable priority (pp),
+ reduction(default, +:r),num_threads (nth),proc_bind(spread),copyin(t),allocate (f),filter(d))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
[[omp::directive (parallel master taskloop simd,
private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),num_tasks (nta),collapse(1),untied if(i1),final(fi),mergeable priority (pp),
safelen(8),simdlen(4),linear(ll: 1),aligned(q: 32),reduction(default, +:r),nontemporal(ntm),num_threads (nth),proc_bind(spread),copyin(t),
order(concurrent),allocate (f))]]
for (int i = 0; i < 64; i++)
ll++;
+ [[omp::directive (parallel masked taskloop simd,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),num_tasks (nta),collapse(1),untied if(i1),final(fi),mergeable priority (pp),
+ safelen(8),simdlen(4),linear(ll: 1),aligned(q: 32),reduction(default, +:r),nontemporal(ntm),num_threads (nth),proc_bind(spread),copyin(t),
+ order(concurrent),allocate (f),filter(d))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
[[omp::directive (loop, bind(thread),order(concurrent),
private (p),lastprivate (l),collapse(1),reduction(+:r))]]
for (l = 0; l < 64; ++l)
+// { dg-do compile }
+
+void foo()
+{
+ #pragma omp masked
+ {
+ goto bad1; // { dg-message "from here" }
+ }
+
+ #pragma omp masked filter(1)
+ {
+ bad1: // { dg-error "jump" }
+ // { dg-message "exits OpenMP" "" { target *-*-* } .-1 }
+ return; // { dg-error "invalid exit" }
+ }
+}
+
+// { dg-message "error: invalid branch to/from OpenMP structured block" "" { target *-*-* } 7 }
/* PR c++/24516 */
/* { dg-do compile } */
--- /dev/null
+// { dg-do compile }
+// { dg-options "-fopenmp -fdump-tree-gimple" }
+
+int i;
+
+template <typename T> void f1 (bool p, T t)
+{
+ if (p)
+ {
+ #pragma omp masked filter (t)
+ i++;
+ }
+}
+
+void f2 ()
+{
+ f1<int> (true, 0);
+ f1<long> (true, 0L);
+}
+
+// { dg-final { scan-tree-dump-times "#pragma omp masked" 2 "gimple" } }
}
#pragma omp single /* { dg-error "may not be closely nested" } */
;
- #pragma omp master /* { dg-error "may not be closely nested" } */
- ;
+ #pragma omp master /* { dg-error "may not be closely nested" } */
+ ;
+ #pragma omp masked /* { dg-error "may not be closely nested" } */
+ ;
#pragma omp barrier /* { dg-error "may not be closely nested" } */
}
#pragma omp sections
;
}
#pragma omp sections
+ {
+ #pragma omp masked /* { dg-error "may not be closely nested" } */
+ ;
+ }
+ #pragma omp sections
{
#pragma omp section
;
#pragma omp section
#pragma omp master /* { dg-error "may not be closely nested" } */
;
+ #pragma omp section
+ #pragma omp masked /* { dg-error "may not be closely nested" } */
+ ;
}
#pragma omp single
{
;
#pragma omp master /* { dg-error "may not be closely nested" } */
;
+ #pragma omp masked /* { dg-error "may not be closely nested" } */
+ ;
#pragma omp barrier /* { dg-error "may not be closely nested" } */
}
#pragma omp master
;
#pragma omp barrier /* { dg-error "may not be closely nested" } */
}
+ #pragma omp masked filter (1)
+ {
+ #pragma omp for /* { dg-error "may not be closely nested" } */
+ for (j = 0; j < 3; j++)
+ ;
+ #pragma omp sections /* { dg-error "may not be closely nested" } */
+ {
+ ;
+ #pragma omp section
+ ;
+ }
+ #pragma omp single /* { dg-error "may not be closely nested" } */
+ ;
+ #pragma omp master
+ ;
+ #pragma omp barrier /* { dg-error "may not be closely nested" } */
+ }
#pragma omp task
{
#pragma omp for /* { dg-error "may not be closely nested" } */
;
#pragma omp master /* { dg-error "may not be closely nested" } */
;
+ #pragma omp masked /* { dg-error "may not be closely nested" } */
+ ;
#pragma omp barrier /* { dg-error "may not be closely nested" } */
}
#pragma omp parallel
;
#pragma omp master
;
+ #pragma omp masked
+ ;
#pragma omp barrier
}
}
;
#pragma omp master
;
+ #pragma omp masked
+ ;
#pragma omp barrier /* { dg-error "may not be closely nested" } */
}
}
!$acc host_data use_device(p) if (p == 42)
! { dg-final { scan-tree-dump-times "(?n)D\\.\[0-9\]+ = \\*p == 42;$" 1 "original" } }
! { dg-final { scan-tree-dump-times "(?n)#pragma acc host_data use_device_ptr\\(p\\) if\\(D\\.\[0-9\]+\\)$" 1 "original" } }
- ! { dg-final { scan-tree-dump-times "(?n)#pragma omp target oacc_host_data use_device_ptr\\(p\\) if\\(D\\.\[0-9\]+\\)$" 1 "gimple" } }
+ ! { dg-final { scan-tree-dump-times "(?n)#pragma omp target oacc_host_data use_device_ptr\\(p\\) if\\((?:D\\.|_)\[0-9\]+\\)$" 1 "gimple" } }
!$acc end host_data
!$acc host_data use_device(p) if_present if (p == 43)
! { dg-final { scan-tree-dump-times "(?n)D\\.\[0-9\]+ = \\*p == 43;$" 1 "original" } }
! { dg-final { scan-tree-dump-times "(?n)#pragma acc host_data use_device_ptr\\(p\\) if\\(D\\.\[0-9\]+\\) if_present$" 1 "original" } }
- ! { dg-final { scan-tree-dump-times "(?n)#pragma omp target oacc_host_data use_device_ptr\\(if_present:p\\) if\\(D\\.\[0-9\]+\\) if_present$" 1 "gimple" } }
+ ! { dg-final { scan-tree-dump-times "(?n)#pragma omp target oacc_host_data use_device_ptr\\(if_present:p\\) if\\((?:D\\.|_)\[0-9\]+\\) if_present$" 1 "gimple" } }
!$acc end host_data
end program test
! { dg-final { scan-tree-dump-times "map\\(force_deviceptr:u\\)" 1 "original" } }
-! { dg-final { scan-tree-dump-times {(?n)#pragma omp target oacc_data_kernels if\(D\.[0-9]+\)$} 1 "omp_oacc_kernels_decompose" } }
-! { dg-final { scan-tree-dump-times {(?n)#pragma omp target oacc_parallel_kernels_gang_single num_gangs\(1\) if\(D\.[0-9]+\) async\(-1\)$} 1 "omp_oacc_kernels_decompose" } }
+! { dg-final { scan-tree-dump-times {(?n)#pragma omp target oacc_data_kernels if\((?:D\.|_)[0-9]+\)$} 1 "omp_oacc_kernels_decompose" } }
+! { dg-final { scan-tree-dump-times {(?n)#pragma omp target oacc_parallel_kernels_gang_single num_gangs\(1\) if\((?:D\.|_)[0-9]+\) async\(-1\)$} 1 "omp_oacc_kernels_decompose" } }
/* OpenMP clause: bind (binding). */
OMP_CLAUSE_BIND,
+ /* OpenMP clause: filter (integer-expression). */
+ OMP_CLAUSE_FILTER,
+
/* Internally used only clause, holding SIMD uid. */
OMP_CLAUSE__SIMDUID_,
copy = gimple_build_omp_master (s1);
break;
+ case GIMPLE_OMP_MASKED:
+ s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
+ copy = gimple_build_omp_masked
+ (s1, gimple_omp_masked_clauses (stmt));
+ break;
+
case GIMPLE_OMP_TASKGROUP:
s1 = remap_gimple_seq (gimple_omp_body (stmt), id);
copy = gimple_build_omp_taskgroup
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SCAN:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_HINT:
+ case OMP_CLAUSE_FILTER:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SCAN:
walk_body (convert_nonlocal_reference_stmt, convert_nonlocal_reference_op,
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_HINT:
+ case OMP_CLAUSE_FILTER:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SCAN:
walk_body (convert_local_reference_stmt, convert_local_reference_op,
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_MASKED:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SCAN:
pp_right_paren (pp);
break;
+ case OMP_CLAUSE_FILTER:
+ pp_string (pp, "filter(");
+ dump_generic_node (pp, OMP_CLAUSE_FILTER_EXPR (clause),
+ spc, flags, false);
+ pp_right_paren (pp);
+ break;
+
case OMP_CLAUSE_DEFAULTMAP:
pp_string (pp, "defaultmap(");
switch (OMP_CLAUSE_DEFAULTMAP_BEHAVIOR (clause))
pp_string (pp, "#pragma omp master");
goto dump_omp_body;
+ case OMP_MASKED:
+ pp_string (pp, "#pragma omp masked");
+ dump_omp_clauses (pp, OMP_MASKED_CLAUSES (node), spc, flags);
+ goto dump_omp_body;
+
case OMP_TASKGROUP:
pp_string (pp, "#pragma omp taskgroup");
dump_omp_clauses (pp, OMP_TASKGROUP_CLAUSES (node), spc, flags);
0, /* OMP_CLAUSE_DEFAULTMAP */
0, /* OMP_CLAUSE_ORDER */
0, /* OMP_CLAUSE_BIND */
+ 1, /* OMP_CLAUSE_FILTER */
1, /* OMP_CLAUSE__SIMDUID_ */
0, /* OMP_CLAUSE__SIMT_ */
0, /* OMP_CLAUSE_INDEPENDENT */
"defaultmap",
"order",
"bind",
+ "filter",
"_simduid_",
"_simt_",
"independent",
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_HINT:
+ case OMP_CLAUSE_FILTER:
case OMP_CLAUSE_TO_DECLARE:
case OMP_CLAUSE_LINK:
case OMP_CLAUSE_DETACH:
Operand 1: OMP_SINGLE_CLAUSES: List of clauses. */
DEFTREECODE (OMP_TASKGROUP, "omp_taskgroup", tcc_statement, 2)
+/* OpenMP - #pragma omp masked
+ Operand 0: OMP_MASKED_BODY: Masked section body.
+ Operand 1: OMP_MASKED_CLAUSES: List of clauses. */
+DEFTREECODE (OMP_MASKED, "omp_masked", tcc_statement, 2)
+
/* OpenMP - #pragma omp scan
Operand 0: OMP_SCAN_BODY: Scan body.
Operand 1: OMP_SCAN_CLAUSES: List of clauses. */
#define OMP_MASTER_BODY(NODE) TREE_OPERAND (OMP_MASTER_CHECK (NODE), 0)
+#define OMP_MASKED_BODY(NODE) TREE_OPERAND (OMP_MASKED_CHECK (NODE), 0)
+#define OMP_MASKED_CLAUSES(NODE) TREE_OPERAND (OMP_MASKED_CHECK (NODE), 1)
+
#define OMP_TASKGROUP_BODY(NODE) TREE_OPERAND (OMP_TASKGROUP_CHECK (NODE), 0)
#define OMP_TASKGROUP_CLAUSES(NODE) \
TREE_OPERAND (OMP_TASKGROUP_CHECK (NODE), 1)
#define OMP_MASTER_COMBINED(NODE) \
(OMP_MASTER_CHECK (NODE)->base.private_flag)
+/* True on an OMP_MASKED statement if it represents an explicit
+ combined masked constructs. */
+#define OMP_MASKED_COMBINED(NODE) \
+ (OMP_MASKED_CHECK (NODE)->base.private_flag)
+
/* Memory order for OMP_ATOMIC*. */
#define OMP_ATOMIC_MEMORY_ORDER(NODE) \
(TREE_RANGE_CHECK (NODE, OMP_ATOMIC, \
OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_NUM_TASKS), 0)
#define OMP_CLAUSE_HINT_EXPR(NODE) \
OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_HINT), 0)
+#define OMP_CLAUSE_FILTER_EXPR(NODE) \
+ OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_FILTER), 0)
#define OMP_CLAUSE_GRAINSIZE_EXPR(NODE) \
OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_GRAINSIZE),0)
--- /dev/null
+#include <omp.h>
+#include <stdlib.h>
+
+void
+foo (int x, int *a)
+{
+ #pragma omp masked
+ {
+ if (omp_get_thread_num () != 0)
+ abort ();
+ a[128]++;
+ }
+ #pragma omp masked filter (0)
+ {
+ if (omp_get_thread_num () != 0)
+ abort ();
+ a[129]++;
+ }
+ #pragma omp masked filter (7)
+ {
+ if (omp_get_thread_num () != 7)
+ abort ();
+ a[130]++;
+ }
+ #pragma omp masked filter (x)
+ {
+ if (omp_get_thread_num () != x)
+ abort ();
+ a[131]++;
+ }
+ #pragma omp masked taskloop simd filter (x) grainsize (12) simdlen (4)
+ for (int i = 0; i < 128; i++)
+ a[i] += i;
+}
+
+int
+main ()
+{
+ int a[136] = {};
+ #pragma omp parallel num_threads (4)
+ foo (4, a);
+ for (int i = 0; i < 128; i++)
+ if (a[i])
+ abort ();
+ if (a[128] != 1 || a[129] != 1 || a[130] || a[131])
+ abort ();
+ #pragma omp parallel num_threads (4)
+ foo (3, a);
+ for (int i = 0; i < 128; i++)
+ if (a[i] != i)
+ abort ();
+ if (a[128] != 2 || a[129] != 2 || a[130] || a[131] != 1)
+ abort ();
+ #pragma omp parallel num_threads (8)
+ foo (8, a);
+ for (int i = 0; i < 128; i++)
+ if (a[i] != i)
+ abort ();
+ if (a[128] != 3 || a[129] != 3 || a[130] != 1 || a[131] != 1)
+ abort ();
+ #pragma omp parallel num_threads (8)
+ foo (6, a);
+ for (int i = 0; i < 128; i++)
+ if (a[i] != 2 * i)
+ abort ();
+ if (a[128] != 4 || a[129] != 4 || a[130] != 2 || a[131] != 2)
+ abort ();
+ for (int i = 0; i < 8; i++)
+ a[i] = 0;
+ /* The filter expression can evaluate to different values in different threads. */
+ #pragma omp parallel masked num_threads (8) filter (omp_get_thread_num () + 1)
+ a[omp_get_thread_num ()]++;
+ for (int i = 0; i < 8; i++)
+ if (a[i])
+ abort ();
+ /* And multiple threads can be filtered. */
+ #pragma omp parallel masked num_threads (8) filter (omp_get_thread_num () & ~1)
+ a[omp_get_thread_num ()]++;
+ for (int i = 0; i < 8; i++)
+ if (a[i] != !(i & 1))
+ abort ();
+ return 0;
+}