extern const char *c_omp_map_clause_name (tree, bool);
extern void c_omp_adjust_map_clauses (tree, bool);
+enum c_omp_directive_kind {
+ C_OMP_DIR_STANDALONE,
+ C_OMP_DIR_CONSTRUCT,
+ C_OMP_DIR_DECLARATIVE,
+ C_OMP_DIR_UTILITY,
+ C_OMP_DIR_INFORMATIONAL
+};
+
+struct c_omp_directive {
+ const char *first, *second, *third;
+ unsigned int id;
+ enum c_omp_directive_kind kind;
+ bool simd;
+};
+
+extern const struct c_omp_directive *c_omp_categorize_directive (const char *,
+ const char *,
+ const char *);
+
/* Return next tree in the chain for chain_next walking of tree nodes. */
static inline tree
c_tree_chain_next (tree t)
}
}
}
+
+static const struct c_omp_directive omp_directives[] = {
+ /* Keep this alphabetically sorted by the first word. Non-null second/third
+ if any should precede null ones. */
+ { "allocate", nullptr, nullptr, PRAGMA_OMP_ALLOCATE,
+ C_OMP_DIR_DECLARATIVE, false },
+ /* { "assume", nullptr, nullptr, PRAGMA_OMP_ASSUME,
+ C_OMP_DIR_INFORMATIONAL, false }, */
+ /* { "assumes", nullptr, nullptr, PRAGMA_OMP_ASSUMES,
+ C_OMP_DIR_INFORMATIONAL, false }, */
+ { "atomic", nullptr, nullptr, PRAGMA_OMP_ATOMIC,
+ C_OMP_DIR_CONSTRUCT, false },
+ { "barrier", nullptr, nullptr, PRAGMA_OMP_BARRIER,
+ C_OMP_DIR_STANDALONE, false },
+ /* { "begin", "assumes", nullptr, PRAGMA_OMP_BEGIN,
+ C_OMP_DIR_INFORMATIONAL, false }, */
+ /* { "begin", "declare", "target", PRAGMA_OMP_BEGIN,
+ C_OMP_DIR_DECLARATIVE, false }, */
+ /* { "begin", "declare", "variant", PRAGMA_OMP_BEGIN,
+ C_OMP_DIR_DECLARATIVE, false }, */
+ /* { "begin", "metadirective", nullptr, PRAGMA_OMP_BEGIN,
+ C_OMP_DIR_???, ??? }, */
+ { "cancel", nullptr, nullptr, PRAGMA_OMP_CANCEL,
+ C_OMP_DIR_STANDALONE, false },
+ { "cancellation", "point", nullptr, PRAGMA_OMP_CANCELLATION_POINT,
+ C_OMP_DIR_STANDALONE, false },
+ { "critical", nullptr, nullptr, PRAGMA_OMP_CRITICAL,
+ C_OMP_DIR_CONSTRUCT, false },
+ /* { "declare", "mapper", nullptr, PRAGMA_OMP_DECLARE,
+ C_OMP_DIR_DECLARATIVE, false }, */
+ { "declare", "reduction", nullptr, PRAGMA_OMP_DECLARE,
+ C_OMP_DIR_DECLARATIVE, true },
+ { "declare", "simd", nullptr, PRAGMA_OMP_DECLARE,
+ C_OMP_DIR_DECLARATIVE, true },
+ { "declare", "target", nullptr, PRAGMA_OMP_DECLARE,
+ C_OMP_DIR_DECLARATIVE, false },
+ { "declare", "variant", nullptr, PRAGMA_OMP_DECLARE,
+ C_OMP_DIR_DECLARATIVE, false },
+ { "depobj", nullptr, nullptr, PRAGMA_OMP_DEPOBJ,
+ C_OMP_DIR_STANDALONE, false },
+ /* { "dispatch", nullptr, nullptr, PRAGMA_OMP_DISPATCH,
+ C_OMP_DIR_CONSTRUCT, false }, */
+ { "distribute", nullptr, nullptr, PRAGMA_OMP_DISTRIBUTE,
+ C_OMP_DIR_CONSTRUCT, true },
+ /* { "end", "assumes", nullptr, PRAGMA_OMP_END,
+ C_OMP_DIR_INFORMATIONAL, false }, */
+ { "end", "declare", "target", PRAGMA_OMP_END_DECLARE_TARGET,
+ C_OMP_DIR_DECLARATIVE, false },
+ /* { "end", "declare", "variant", PRAGMA_OMP_END,
+ C_OMP_DIR_DECLARATIVE, false }, */
+ /* { "end", "metadirective", nullptr, PRAGMA_OMP_END,
+ C_OMP_DIR_???, ??? }, */
+ /* error with at(execution) is C_OMP_DIR_STANDALONE. */
+ /* { "error", nullptr, nullptr, PRAGMA_OMP_ERROR,
+ C_OMP_DIR_UTILITY, false }, */
+ { "flush", nullptr, nullptr, PRAGMA_OMP_FLUSH,
+ C_OMP_DIR_STANDALONE, false },
+ { "for", nullptr, nullptr, PRAGMA_OMP_FOR,
+ C_OMP_DIR_CONSTRUCT, true },
+ /* { "interop", nullptr, nullptr, PRAGMA_OMP_INTEROP,
+ C_OMP_DIR_STANDALONE, false }, */
+ { "loop", nullptr, nullptr, PRAGMA_OMP_LOOP,
+ C_OMP_DIR_CONSTRUCT, true },
+ /* { "masked", nullptr, nullptr, PRAGMA_OMP_MASKED,
+ C_OMP_DIR_CONSTRUCT, true }, */
+ { "master", nullptr, nullptr, PRAGMA_OMP_MASTER,
+ C_OMP_DIR_CONSTRUCT, true },
+ /* { "metadirective", nullptr, nullptr, PRAGMA_OMP_METADIRECTIVE,
+ C_OMP_DIR_???, ??? }, */
+ /* { "nothing", nullptr, nullptr, PRAGMA_OMP_NOTHING,
+ C_OMP_DIR_UTILITY, false }, */
+ /* ordered with depend clause is C_OMP_DIR_STANDALONE. */
+ { "ordered", nullptr, nullptr, PRAGMA_OMP_ORDERED,
+ C_OMP_DIR_CONSTRUCT, true },
+ { "parallel", nullptr, nullptr, PRAGMA_OMP_PARALLEL,
+ C_OMP_DIR_CONSTRUCT, true },
+ { "requires", nullptr, nullptr, PRAGMA_OMP_REQUIRES,
+ C_OMP_DIR_INFORMATIONAL, false },
+ { "scan", nullptr, nullptr, PRAGMA_OMP_SCAN,
+ C_OMP_DIR_CONSTRUCT, true },
+ /* { "scope", nullptr, nullptr, PRAGMA_OMP_SCOPE,
+ C_OMP_DIR_CONSTRUCT, false }, */
+ { "section", nullptr, nullptr, PRAGMA_OMP_SECTION,
+ C_OMP_DIR_CONSTRUCT, false },
+ { "sections", nullptr, nullptr, PRAGMA_OMP_SECTIONS,
+ C_OMP_DIR_CONSTRUCT, false },
+ { "simd", nullptr, nullptr, PRAGMA_OMP_SIMD,
+ C_OMP_DIR_CONSTRUCT, true },
+ { "single", nullptr, nullptr, PRAGMA_OMP_SINGLE,
+ C_OMP_DIR_CONSTRUCT, false },
+ { "target", "data", nullptr, PRAGMA_OMP_TARGET,
+ C_OMP_DIR_CONSTRUCT, false },
+ { "target", "enter", "data", PRAGMA_OMP_TARGET,
+ C_OMP_DIR_STANDALONE, false },
+ { "target", "exit", "data", PRAGMA_OMP_TARGET,
+ C_OMP_DIR_STANDALONE, false },
+ { "target", "update", nullptr, PRAGMA_OMP_TARGET,
+ C_OMP_DIR_STANDALONE, false },
+ { "target", nullptr, nullptr, PRAGMA_OMP_TARGET,
+ C_OMP_DIR_CONSTRUCT, true },
+ { "task", nullptr, nullptr, PRAGMA_OMP_TASK,
+ C_OMP_DIR_CONSTRUCT, false },
+ { "taskgroup", nullptr, nullptr, PRAGMA_OMP_TASKGROUP,
+ C_OMP_DIR_CONSTRUCT, false },
+ { "taskloop", nullptr, nullptr, PRAGMA_OMP_TASKLOOP,
+ C_OMP_DIR_CONSTRUCT, true },
+ { "taskwait", nullptr, nullptr, PRAGMA_OMP_TASKWAIT,
+ C_OMP_DIR_STANDALONE, false },
+ { "taskyield", nullptr, nullptr, PRAGMA_OMP_TASKYIELD,
+ C_OMP_DIR_STANDALONE, false },
+ /* { "tile", nullptr, nullptr, PRAGMA_OMP_TILE,
+ C_OMP_DIR_CONSTRUCT, false }, */
+ { "teams", nullptr, nullptr, PRAGMA_OMP_TEAMS,
+ C_OMP_DIR_CONSTRUCT, true },
+ { "threadprivate", nullptr, nullptr, PRAGMA_OMP_THREADPRIVATE,
+ C_OMP_DIR_DECLARATIVE, false }
+ /* { "unroll", nullptr, nullptr, PRAGMA_OMP_UNROLL,
+ C_OMP_DIR_CONSTRUCT, false }, */
+};
+
+/* Find (non-combined/composite) OpenMP directive (if any) which starts
+ with FIRST keyword and for multi-word directives has SECOND and
+ THIRD keyword after it. */
+
+const struct c_omp_directive *
+c_omp_categorize_directive (const char *first, const char *second,
+ const char *third)
+{
+ const size_t n_omp_directives = ARRAY_SIZE (omp_directives);
+ for (size_t i = 0; i < n_omp_directives; i++)
+ {
+ if ((unsigned char) omp_directives[i].first[0]
+ < (unsigned char) first[0])
+ continue;
+ if ((unsigned char) omp_directives[i].first[0]
+ > (unsigned char) first[0])
+ break;
+ if (strcmp (omp_directives[i].first, first))
+ continue;
+ if (!omp_directives[i].second)
+ return &omp_directives[i];
+ if (!second || strcmp (omp_directives[i].second, second))
+ continue;
+ if (!omp_directives[i].third)
+ return &omp_directives[i];
+ if (!third || strcmp (omp_directives[i].third, third))
+ continue;
+ return &omp_directives[i];
+ }
+ return NULL;
+}
CPTI_HEAP_DELETED_IDENTIFIER,
CPTI_HEAP_VEC_UNINIT_IDENTIFIER,
CPTI_HEAP_VEC_IDENTIFIER,
+ CPTI_OMP_IDENTIFIER,
CPTI_LANG_NAME_C,
CPTI_LANG_NAME_CPLUSPLUS,
#define heap_deleted_identifier cp_global_trees[CPTI_HEAP_DELETED_IDENTIFIER]
#define heap_vec_uninit_identifier cp_global_trees[CPTI_HEAP_VEC_UNINIT_IDENTIFIER]
#define heap_vec_identifier cp_global_trees[CPTI_HEAP_VEC_IDENTIFIER]
+#define omp_identifier cp_global_trees[CPTI_OMP_IDENTIFIER]
#define lang_name_c cp_global_trees[CPTI_LANG_NAME_C]
#define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS]
{"heap deleted", &heap_deleted_identifier, cik_normal},
{"heap [] uninit", &heap_vec_uninit_identifier, cik_normal},
{"heap []", &heap_vec_identifier, cik_normal},
+ {"omp", &omp_identifier, cik_normal},
{NULL, NULL, cik_normal}
};
cp_check_const_attributes (attributes);
+ if ((flag_openmp || flag_openmp_simd) && attributes != error_mark_node)
+ {
+ bool diagnosed = false;
+ for (tree *pa = &attributes; *pa; )
+ {
+ if (get_attribute_namespace (*pa) == omp_identifier)
+ {
+ tree name = get_attribute_name (*pa);
+ if (is_attribute_p ("directive", name)
+ || is_attribute_p ("sequence", name))
+ {
+ if (!diagnosed)
+ {
+ error ("%<omp::%E%> not allowed to be specified in this "
+ "context", name);
+ diagnosed = true;
+ }
+ *pa = TREE_CHAIN (*pa);
+ continue;
+ }
+ }
+ pa = &TREE_CHAIN (*pa);
+ }
+ }
+
if (TREE_CODE (*decl) == TEMPLATE_DECL)
decl = &DECL_TEMPLATE_RESULT (*decl);
-
/* -*- C++ -*- Parser.
Copyright (C) 2000-2021 Free Software Foundation, Inc.
Written by Mark Mitchell <mark@codesourcery.com>.
/* Ensure that the pragma is not parsed again. */
cp_lexer_purge_tokens_after (parser->lexer, pragma_tok);
parser->lexer->in_pragma = false;
+ if (parser->lexer->in_omp_attribute_pragma
+ && cp_lexer_next_token_is (parser->lexer, CPP_EOF))
+ {
+ parser->lexer = parser->lexer->next;
+ /* Put the current source position back where it was before this
+ lexer was pushed. */
+ cp_lexer_set_source_position_from_token (parser->lexer->next_token);
+ }
}
}
parser->lexer->in_pragma = false;
if (!cp_parser_require (parser, CPP_PRAGMA_EOL, RT_PRAGMA_EOL))
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
+ else if (parser->lexer->in_omp_attribute_pragma
+ && cp_lexer_next_token_is (parser->lexer, CPP_EOF))
+ {
+ parser->lexer = parser->lexer->next;
+ /* Put the current source position back where it was before this
+ lexer was pushed. */
+ cp_lexer_set_source_position_from_token (parser->lexer->next_token);
+ }
}
/* This is a simple wrapper around make_typename_type. When the id is
add_stmt (stmt);
}
+struct cp_omp_attribute_data
+{
+ cp_token_cache *tokens;
+ const c_omp_directive *dir;
+ c_omp_directive_kind kind;
+};
+
+/* Handle omp::directive and omp::sequence attributes in ATTRS
+ (if any) at the start of a statement. */
+
+static tree
+cp_parser_handle_statement_omp_attributes (cp_parser *parser, tree attrs)
+{
+ if (!flag_openmp && !flag_openmp_simd)
+ return attrs;
+
+ auto_vec<cp_omp_attribute_data, 16> vec;
+ int cnt = 0;
+ int tokens = 0;
+ for (tree *pa = &attrs; *pa; )
+ if (get_attribute_namespace (*pa) == omp_identifier
+ && is_attribute_p ("directive", get_attribute_name (*pa)))
+ {
+ cnt++;
+ for (tree a = TREE_VALUE (*pa); a; a = TREE_CHAIN (a))
+ {
+ tree d = TREE_VALUE (a);
+ gcc_assert (TREE_CODE (d) == DEFERRED_PARSE);
+ cp_token *first = DEFPARSE_TOKENS (d)->first;
+ cp_token *last = DEFPARSE_TOKENS (d)->last;
+ const char *directive[3] = {};
+ for (int i = 0; i < 3; i++)
+ {
+ tree id = NULL_TREE;
+ if (first + i == last)
+ break;
+ if (first[i].type == CPP_NAME)
+ id = first[i].u.value;
+ else if (first[i].type == CPP_KEYWORD)
+ id = ridpointers[(int) first[i].keyword];
+ else
+ break;
+ directive[i] = IDENTIFIER_POINTER (id);
+ }
+ const c_omp_directive *dir = NULL;
+ if (directive[0])
+ dir = c_omp_categorize_directive (directive[0], directive[1],
+ directive[2]);
+ if (dir == NULL)
+ {
+ error_at (first->location,
+ "unknown OpenMP directive name in %<omp::directive%>"
+ " attribute argument");
+ continue;
+ }
+ c_omp_directive_kind kind = dir->kind;
+ if (dir->id == PRAGMA_OMP_ORDERED)
+ {
+ /* ordered is C_OMP_DIR_CONSTRUCT only if it doesn't contain
+ depend clause. */
+ if (directive[1] && strcmp (directive[1], "depend") == 0)
+ kind = C_OMP_DIR_STANDALONE;
+ else if (first + 2 < last
+ && first[1].type == CPP_COMMA
+ && first[2].type == CPP_NAME
+ && strcmp (IDENTIFIER_POINTER (first[2].u.value),
+ "depend") == 0)
+ kind = C_OMP_DIR_STANDALONE;
+ }
+ /* else if (dir->id == PRAGMA_OMP_ERROR)
+ {
+ error with at(execution) clause is C_OMP_DIR_STANDALONE.
+ } */
+ cp_omp_attribute_data v = { DEFPARSE_TOKENS (d), dir, kind };
+ vec.safe_push (v);
+ if (flag_openmp || dir->simd)
+ tokens += (last - first) + 1;
+ }
+ cp_omp_attribute_data v = {};
+ vec.safe_push (v);
+ *pa = TREE_CHAIN (*pa);
+ }
+ else
+ pa = &TREE_CHAIN (*pa);
+
+ unsigned int i;
+ cp_omp_attribute_data *v;
+ cp_omp_attribute_data *construct_seen = nullptr;
+ cp_omp_attribute_data *standalone_seen = nullptr;
+ cp_omp_attribute_data *prev_standalone_seen = nullptr;
+ FOR_EACH_VEC_ELT (vec, i, v)
+ if (v->tokens)
+ {
+ if (v->kind == C_OMP_DIR_CONSTRUCT && !construct_seen)
+ construct_seen = v;
+ else if (v->kind == C_OMP_DIR_STANDALONE && !standalone_seen)
+ standalone_seen = v;
+ }
+ else
+ {
+ if (standalone_seen && !prev_standalone_seen)
+ {
+ prev_standalone_seen = standalone_seen;
+ standalone_seen = nullptr;
+ }
+ }
+
+ if (cnt > 1 && construct_seen)
+ {
+ error_at (construct_seen->tokens->first->location,
+ "OpenMP construct among %<omp::directive%> attributes"
+ " requires all %<omp::directive%> attributes on the"
+ " same statement to be in the same %<omp::sequence%>");
+ return attrs;
+ }
+ if (cnt > 1 && standalone_seen && prev_standalone_seen)
+ {
+ error_at (standalone_seen->tokens->first->location,
+ "multiple OpenMP standalone directives among"
+ " %<omp::directive%> attributes must be all within the"
+ " same %<omp::sequence%>");
+ return attrs;
+ }
+
+ if (prev_standalone_seen)
+ standalone_seen = prev_standalone_seen;
+ if (standalone_seen
+ && !cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
+ {
+ error_at (standalone_seen->tokens->first->location,
+ "standalone OpenMP directives in %<omp::directive%> attribute"
+ " can only appear on an empty statement");
+ return attrs;
+ }
+
+ if (!tokens)
+ return attrs;
+ tokens++;
+ cp_lexer *lexer = cp_lexer_alloc ();
+ lexer->debugging_p = parser->lexer->debugging_p;
+ vec_safe_reserve (lexer->buffer, tokens, true);
+ FOR_EACH_VEC_ELT (vec, i, v)
+ {
+ if (!v->tokens)
+ continue;
+ if (!flag_openmp && !v->dir->simd)
+ continue;
+ cp_token *first = v->tokens->first;
+ cp_token *last = v->tokens->last;
+ cp_token tok = {};
+ tok.type = CPP_PRAGMA;
+ tok.keyword = RID_MAX;
+ tok.u.value = build_int_cst (NULL, v->dir->id);
+ tok.location = first->location;
+ lexer->buffer->quick_push (tok);
+ while (++first < last)
+ lexer->buffer->quick_push (*first);
+ tok = {};
+ tok.type = CPP_PRAGMA_EOL;
+ tok.keyword = RID_MAX;
+ tok.location = last->location;
+ lexer->buffer->quick_push (tok);
+ }
+ cp_token tok = {};
+ tok.type = CPP_EOF;
+ tok.keyword = RID_MAX;
+ tok.location = lexer->buffer->last ().location;
+ lexer->buffer->quick_push (tok);
+ lexer->next = parser->lexer;
+ lexer->next_token = lexer->buffer->address ();
+ lexer->last_token = lexer->next_token
+ + lexer->buffer->length ()
+ - 1;
+ lexer->in_omp_attribute_pragma = true;
+ parser->lexer = lexer;
+ /* Move the current source position to that of the first token in the
+ new lexer. */
+ cp_lexer_set_source_position_from_token (lexer->next_token);
+ return attrs;
+}
+
/* Parse a statement.
statement:
tree statement, std_attrs = NULL_TREE;
cp_token *token;
location_t statement_location, attrs_loc;
+ bool in_omp_attribute_pragma;
restart:
+ in_omp_attribute_pragma = parser->lexer->in_omp_attribute_pragma;
if (if_p != NULL)
*if_p = false;
/* There is no statement yet. */
std_attrs = NULL_TREE;
}
+ if (std_attrs && (flag_openmp || flag_openmp_simd))
+ std_attrs = cp_parser_handle_statement_omp_attributes (parser, std_attrs);
+
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Remember the location of the first token in the statement. */
a statement all its own. */
else if (token->type == CPP_PRAGMA)
{
+ cp_lexer *lexer = parser->lexer;
+ bool do_restart = false;
/* Only certain OpenMP pragmas are attached to statements, and thus
are considered statements themselves. All others are not. In
the context of a compound, accept the pragma as a "statement" and
if (in_compound)
cp_parser_pragma (parser, pragma_compound, if_p);
else if (!cp_parser_pragma (parser, pragma_stmt, if_p))
+ do_restart = true;
+ if (lexer->in_omp_attribute_pragma && !in_omp_attribute_pragma)
+ {
+ gcc_assert (parser->lexer != lexer);
+ cp_lexer_destroy (lexer);
+ }
+ if (do_restart)
goto restart;
return;
}
return nreverse (attribute_list);
}
+/* Parse arguments of omp::directive attribute.
+
+ ( directive-name ,[opt] clause-list[opt] )
+
+ For directive just remember the first/last tokens for subsequent
+ parsing. */
+
+static void
+cp_parser_omp_directive_args (cp_parser *parser, tree attribute)
+{
+ cp_token *first = cp_lexer_peek_nth_token (parser->lexer, 2);
+ if (first->type == CPP_CLOSE_PAREN)
+ {
+ cp_lexer_consume_token (parser->lexer);
+ error_at (first->location, "expected OpenMP directive name");
+ cp_lexer_consume_token (parser->lexer);
+ TREE_VALUE (attribute) = NULL_TREE;
+ return;
+ }
+ for (size_t n = cp_parser_skip_balanced_tokens (parser, 1) - 2; n; --n)
+ cp_lexer_consume_token (parser->lexer);
+ cp_token *last = cp_lexer_peek_token (parser->lexer);
+ cp_lexer_consume_token (parser->lexer);
+ tree arg = make_node (DEFERRED_PARSE);
+ DEFPARSE_TOKENS (arg) = cp_token_cache_new (first, last);
+ DEFPARSE_INSTANTIATIONS (arg) = nullptr;
+ TREE_VALUE (attribute) = tree_cons (NULL_TREE, arg, TREE_VALUE (attribute));
+}
+
+/* Parse arguments of omp::sequence attribute.
+
+ ( omp::[opt] directive-attr [ , omp::[opt] directive-attr ]... ) */
+
+static void
+cp_parser_omp_sequence_args (cp_parser *parser, tree attribute)
+{
+ matching_parens parens;
+ parens.consume_open (parser);
+ do
+ {
+ cp_token *token = cp_lexer_peek_token (parser->lexer);
+ if (token->type == CPP_NAME
+ && token->u.value == omp_identifier
+ && cp_lexer_nth_token_is (parser->lexer, 2, CPP_SCOPE))
+ {
+ cp_lexer_consume_token (parser->lexer);
+ cp_lexer_consume_token (parser->lexer);
+ token = cp_lexer_peek_token (parser->lexer);
+ }
+ bool directive = false;
+ const char *p;
+ if (token->type != CPP_NAME)
+ p = "";
+ else
+ p = IDENTIFIER_POINTER (token->u.value);
+ if (strcmp (p, "directive") == 0)
+ directive = true;
+ else if (strcmp (p, "sequence") != 0)
+ {
+ error_at (token->location, "expected %<directive%> or %<sequence%>");
+ cp_parser_skip_to_closing_parenthesis (parser,
+ /*recovering=*/true,
+ /*or_comma=*/true,
+ /*consume_paren=*/false);
+ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
+ break;
+ cp_lexer_consume_token (parser->lexer);
+ }
+ cp_lexer_consume_token (parser->lexer);
+ if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN))
+ cp_parser_required_error (parser, RT_OPEN_PAREN, false,
+ UNKNOWN_LOCATION);
+ else if (directive)
+ cp_parser_omp_directive_args (parser, attribute);
+ else
+ cp_parser_omp_sequence_args (parser, attribute);
+ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
+ break;
+ cp_lexer_consume_token (parser->lexer);
+ }
+ while (1);
+ if (!parens.require_close (parser))
+ cp_parser_skip_to_closing_parenthesis (parser, true, false,
+ /*consume_paren=*/true);
+}
+
/* Parse a standard C++11 attribute.
The returned representation is a TREE_LIST which TREE_PURPOSE is
/* Now parse the optional argument clause of the attribute. */
if (token->type != CPP_OPEN_PAREN)
- return attribute;
+ {
+ if ((flag_openmp || flag_openmp_simd)
+ && attr_ns == omp_identifier
+ && (is_attribute_p ("directive", attr_id)
+ || is_attribute_p ("sequence", attr_id)))
+ {
+ error_at (token->location, "%<omp::%E%> attribute requires argument",
+ attr_id);
+ return NULL_TREE;
+ }
+ return attribute;
+ }
{
vec<tree, va_gc> *vec;
if (as == NULL)
{
+ if ((flag_openmp || flag_openmp_simd) && attr_ns == omp_identifier)
+ {
+ if (is_attribute_p ("directive", attr_id))
+ {
+ cp_parser_omp_directive_args (parser, attribute);
+ return attribute;
+ }
+ else if (is_attribute_p ("sequence", attr_id))
+ {
+ TREE_VALUE (TREE_PURPOSE (attribute))
+ = get_identifier ("directive");
+ cp_parser_omp_sequence_args (parser, attribute);
+ TREE_VALUE (attribute) = nreverse (TREE_VALUE (attribute));
+ return attribute;
+ }
+ }
+
/* For unknown attributes, just skip balanced tokens instead of
trying to parse the arguments. */
for (size_t n = cp_parser_skip_balanced_tokens (parser, 1) - 1; n; --n)
if (nested && cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))
break;
- if (!first)
+ if (!first
+ /* OpenMP 5.1 allows optional comma in between directive-name and
+ clauses everywhere, but as we aren't done with OpenMP 5.0
+ implementation yet, let's allow it for now only in C++11
+ attributes. */
+ || (parser->lexer->in_omp_attribute_pragma && nested != 2))
{
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
location_t loc = pragma_tok->location;
tree nl = cp_parser_omp_var_list (parser, OMP_CLAUSE_ALLOCATE, NULL_TREE);
+ /* For now only in C++ attributes, do it always for OpenMP 5.1. */
+ if (parser->lexer->in_omp_attribute_pragma
+ && cp_lexer_next_token_is (parser->lexer, CPP_COMMA)
+ && cp_lexer_nth_token_is (parser->lexer, 2, CPP_NAME))
+ cp_lexer_consume_token (parser->lexer);
+
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
matching_parens parens;
while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL))
{
- if (!first
+ /* For now only in C++ attributes, do it always for OpenMP 5.1. */
+ if ((!first || parser->lexer->in_omp_attribute_pragma)
&& cp_lexer_next_token_is (parser->lexer, CPP_COMMA)
&& cp_lexer_nth_token_is (parser->lexer, 2, CPP_NAME))
cp_lexer_consume_token (parser->lexer);
tree clause = NULL_TREE;
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_SOURCE;
location_t c_loc = cp_lexer_peek_token (parser->lexer)->location;
+ /* For now only in C++ attributes, do it always for OpenMP 5.1. */
+ if (parser->lexer->in_omp_attribute_pragma
+ && cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
+ cp_lexer_consume_token (parser->lexer);
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
cp_parser_omp_flush (cp_parser *parser, cp_token *pragma_tok)
{
enum memmodel mo = MEMMODEL_LAST;
+ /* For now only in C++ attributes, do it always for OpenMP 5.1. */
+ if (parser->lexer->in_omp_attribute_pragma
+ && cp_lexer_next_token_is (parser->lexer, CPP_COMMA)
+ && cp_lexer_nth_token_is (parser->lexer, 2, CPP_NAME))
+ cp_lexer_consume_token (parser->lexer);
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
enum pragma_context context, bool *if_p)
{
location_t loc = pragma_tok->location;
+ int n = 1;
- if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
+ /* For now only in C++ attributes, do it always for OpenMP 5.1. */
+ if (parser->lexer->in_omp_attribute_pragma
+ && cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
+ n = 2;
+
+ if (cp_lexer_nth_token_is (parser->lexer, n, CPP_NAME))
{
- tree id = cp_lexer_peek_token (parser->lexer)->u.value;
+ tree id = cp_lexer_peek_nth_token (parser->lexer, n)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (strcmp (p, "depend") == 0)
data.error_seen = false;
data.fndecl_seen = false;
data.variant_p = variant_p;
+ data.in_omp_attribute_pragma = parser->lexer->in_omp_attribute_pragma;
data.tokens = vNULL;
data.clauses = NULL_TREE;
/* It is safe to take the address of a local variable; it will only be
static tree
cp_finish_omp_declare_variant (cp_parser *parser, cp_token *pragma_tok,
- tree attrs)
+ tree attrs, bool in_omp_attribute_pragma)
{
matching_parens parens;
if (!parens.require_open (parser))
location_t finish_loc = get_finish (varid.get_location ());
location_t varid_loc = make_location (caret_loc, start_loc, finish_loc);
+ /* For now only in C++ attributes, do it always for OpenMP 5.1. */
+ if (in_omp_attribute_pragma
+ && cp_lexer_next_token_is (parser->lexer, CPP_COMMA)
+ && cp_lexer_nth_token_is (parser->lexer, 2, CPP_NAME))
+ cp_lexer_consume_token (parser->lexer);
+
const char *clause = "";
location_t match_loc = cp_lexer_peek_token (parser->lexer)->location;
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
cp_lexer_consume_token (parser->lexer);
if (strcmp (kind, "simd") == 0)
{
+ /* For now only in C++ attributes, do it always for OpenMP 5.1. */
+ if (data->in_omp_attribute_pragma
+ && cp_lexer_next_token_is (parser->lexer, CPP_COMMA)
+ && cp_lexer_nth_token_is (parser->lexer, 2, CPP_NAME))
+ cp_lexer_consume_token (parser->lexer);
+
cl = cp_parser_omp_all_clauses (parser, OMP_DECLARE_SIMD_CLAUSE_MASK,
"#pragma omp declare simd",
pragma_tok);
else
{
gcc_assert (strcmp (kind, "variant") == 0);
- attrs = cp_finish_omp_declare_variant (parser, pragma_tok, attrs);
+ attrs
+ = cp_finish_omp_declare_variant (parser, pragma_tok, attrs,
+ data->in_omp_attribute_pragma);
}
cp_parser_pop_lexer (parser);
}
tree clauses = NULL_TREE;
int device_type = 0;
bool only_device_type = true;
- if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
+ if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)
+ /* For now only in C++ attributes, do it always for OpenMP 5.1. */
+ || (parser->lexer->in_omp_attribute_pragma
+ && cp_lexer_next_token_is (parser->lexer, CPP_COMMA)
+ && cp_lexer_nth_token_is (parser->lexer, 2, CPP_NAME)))
clauses
= cp_parser_omp_all_clauses (parser, OMP_DECLARE_TARGET_CLAUSE_MASK,
"#pragma omp declare target", pragma_tok);
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
return false;
+ /* For now only in C++ attributes, do it always for OpenMP 5.1. */
+ if (parser->lexer->in_omp_attribute_pragma
+ && cp_lexer_next_token_is (parser->lexer, CPP_COMMA)
+ && cp_lexer_nth_token_is (parser->lexer, 2, CPP_NAME))
+ cp_lexer_consume_token (parser->lexer);
+
const char *p = "";
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
location_t loc = pragma_tok->location;
while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL))
{
- if (!first && cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
+ /* For now only in C++ attributes, do it always for OpenMP 5.1. */
+ if ((!first || parser->lexer->in_omp_attribute_pragma)
+ && cp_lexer_next_token_is (parser->lexer, CPP_COMMA)
+ && cp_lexer_nth_token_is (parser->lexer, 2, CPP_NAME))
cp_lexer_consume_token (parser->lexer);
first = false;
/* True if we're in the context of parsing a pragma, and should not
increment past the end-of-line marker. */
bool in_pragma;
+
+ /* True if we're in the context of OpenMP directives written as C++11
+ attributes turned into pragma. */
+ bool in_omp_attribute_pragma;
};
bool error_seen; /* Set if error has been reported. */
bool fndecl_seen; /* Set if one fn decl/definition has been seen already. */
bool variant_p; /* Set for #pragma omp declare variant. */
+ bool in_omp_attribute_pragma; /* True if declare simd/variant comes from
+ C++11 attribute rather than pragma. */
vec<cp_token_cache_ptr> tokens;
tree clauses;
};
--- /dev/null
+// { dg-do compile { target c++11 } }
+
+typedef enum omp_allocator_handle_t
+: __UINTPTR_TYPE__
+{
+ omp_null_allocator = 0,
+ omp_default_mem_alloc = 1,
+ omp_large_cap_mem_alloc = 2,
+ omp_const_mem_alloc = 3,
+ omp_high_bw_mem_alloc = 4,
+ omp_low_lat_mem_alloc = 5,
+ omp_cgroup_mem_alloc = 6,
+ omp_pteam_mem_alloc = 7,
+ omp_thread_mem_alloc = 8,
+ __omp_allocator_handle_t_max__ = __UINTPTR_MAX__
+} omp_allocator_handle_t;
+
+typedef enum omp_sync_hint_t {
+omp_sync_hint_none = 0x0,
+omp_lock_hint_none = omp_sync_hint_none,
+omp_sync_hint_uncontended = 0x1,
+omp_lock_hint_uncontended = omp_sync_hint_uncontended,
+omp_sync_hint_contended = 0x2,
+omp_lock_hint_contended = omp_sync_hint_contended,
+omp_sync_hint_nonspeculative = 0x4,
+omp_lock_hint_nonspeculative = omp_sync_hint_nonspeculative,
+omp_sync_hint_speculative = 0x8,
+omp_lock_hint_speculative = omp_sync_hint_speculative
+} omp_sync_hint_t;
+
+typedef struct __attribute__((__aligned__ (sizeof (void *)))) omp_depend_t {
+ char __omp_depend_t__[2 * sizeof (void *)];
+} omp_depend_t;
+
+int t;
+#pragma omp threadprivate (t)
+
+#pragma omp declare target
+int f, l, ll, r, r2;
+
+void
+foo (int d, int m, int i1, int i2, int p, int *idp, int s,
+ int nte, int tl, int nth, int g, int nta, int fi, int pp, int *q, int ntm)
+{
+ [[omp::directive (distribute parallel for
+ private (p) firstprivate (f) collapse(1) dist_schedule(static, 16)
+ if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread)
+ lastprivate (l) schedule(static, 4) order(concurrent) allocate (omp_default_mem_alloc:f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (distribute parallel for simd
+ private (p) firstprivate (f) collapse(1) dist_schedule(static, 16)
+ if (parallel: i2) if(simd: i1) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread)
+ lastprivate (l) schedule(static, 4) nontemporal(ntm)
+ safelen(8) simdlen(4) aligned(q: 32) order(concurrent) allocate (omp_default_mem_alloc:f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (distribute simd
+ private (p) firstprivate (f) collapse(1) dist_schedule(static, 16)
+ safelen(8) simdlen(4) aligned(q: 32) reduction(+:r) if(i1) nontemporal(ntm)
+ order(concurrent) allocate (omp_default_mem_alloc:f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (distribute
+ private (p) firstprivate (f) collapse(1) dist_schedule(static, 16)
+ allocate (omp_default_mem_alloc:f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+}
+
+void
+qux (int p)
+{
+ [[omp::directive (loop bind(teams) order(concurrent)
+ private (p) lastprivate (l) collapse(1) reduction(+:r))]]
+ for (l = 0; l < 64; ++l)
+ ll++;
+}
+#pragma omp end declare target
+
+void
+baz (int d, int m, int i1, int i2, int p, int *idp, int s,
+ int nte, int tl, int nth, int g, int nta, int fi, int pp, int *q, int ntm)
+{
+ [[omp::directive (distribute parallel for
+ private (p) firstprivate (f) collapse(1) dist_schedule(static, 16)
+ if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread)
+ lastprivate (l) schedule(static, 4) copyin(t) order(concurrent) allocate (p))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (distribute parallel for simd
+ private (p) firstprivate (f) collapse(1) dist_schedule(static, 16)
+ if (parallel: i2) if(simd: i1) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread)
+ lastprivate (l) schedule(static, 4) nontemporal(ntm)
+ safelen(8) simdlen(4) aligned(q: 32) copyin(t) order(concurrent) allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (distribute simd
+ private (p) firstprivate (f) collapse(1) dist_schedule(static, 16)
+ safelen(8) simdlen(4) aligned(q: 32) reduction(+:r) if(i1) nontemporal(ntm)
+ order(concurrent) allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (loop bind(parallel) order(concurrent)
+ private (p) lastprivate (l) collapse(1) reduction(+:r))]]
+ for (l = 0; l < 64; ++l)
+ ll++;
+}
+
+void
+bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
+ int nte, int tl, int nth, int g, int nta, int fi, int pp, int *q, int *dd, int ntm)
+{
+ [[omp::directive (for simd
+ private (p) firstprivate (f) lastprivate (l) linear (ll:1) reduction(+:r) schedule(static, 4) collapse(1) nowait
+ safelen(8) simdlen(4) aligned(q: 32) nontemporal(ntm) if(i1) order(concurrent) allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (for
+ private (p) firstprivate (f) lastprivate (l) linear (ll:1) reduction(+:r) schedule(static, 4) collapse(1) nowait
+ order(concurrent) allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (simd
+ private (p) lastprivate (l) linear (ll:1) reduction(+:r) collapse(1) safelen(8) simdlen(4) aligned(q: 32)
+ nontemporal(ntm) if(i1) order(concurrent))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel for
+ private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread)
+ lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1) allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel for
+ private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread)
+ lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) order(concurrent) allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel for simd
+ private (p) firstprivate (f) if (i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread)
+ lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1)
+ safelen(8) simdlen(4) aligned(q: 32) nontemporal(ntm) order(concurrent) allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel sections
+ private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread)
+ lastprivate (l) allocate (f))]]
+ {
+ #pragma omp section
+ {}
+ #pragma omp section
+ {}
+ }
+ [[omp::directive (sections private (p) firstprivate (f) reduction(+:r) lastprivate (l) allocate (f) nowait)]]
+ {
+ ;
+ #pragma omp section
+ ;
+ #pragma omp section
+ {}
+ }
+ [[omp::directive (barrier)]];
+ [[omp::sequence (omp::directive (single private (p) firstprivate (f) allocate (f) nowait))]]
+ ;
+ [[omp::sequence (directive (barrier))]];
+ [[omp::sequence (directive (parallel private (p)),
+ omp::directive (single copyprivate (p) firstprivate (f) allocate (f)))]]
+ p = 6;
+ [[omp::directive (target parallel
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp)
+ if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread)
+ nowait depend(inout: dd[0]) allocate (omp_default_mem_alloc:f) in_reduction(+:r2))]]
+ ;
+ [[omp::directive (target parallel for
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp)
+ if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread)
+ lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1) nowait depend(inout: dd[0])
+ allocate (omp_default_mem_alloc:f) in_reduction(+:r2))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (target parallel for
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp)
+ if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread)
+ lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) nowait depend(inout: dd[0]) order(concurrent)
+ allocate (omp_default_mem_alloc:f) in_reduction(+:r2))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (omp::directive (target parallel for simd
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp)
+ if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread)
+ lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1)
+ safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm) if (simd: i3) order(concurrent)
+ allocate (omp_default_mem_alloc:f) in_reduction(+:r2)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (directive (target teams
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp)
+ shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) nowait depend(inout: dd[0])
+ allocate (omp_default_mem_alloc:f) in_reduction(+:r2)))]]
+ ;
+ [[omp::sequence (directive (target
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp)
+ nowait depend(inout: dd[0]) allocate (omp_default_mem_alloc:f) in_reduction(+:r2)))]]
+ ;
+ [[omp::sequence (omp::directive (target teams distribute
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp)
+ shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+ collapse(1) dist_schedule(static, 16) nowait depend(inout: dd[0]) allocate (omp_default_mem_alloc:f) in_reduction(+:r2)))]]
+ for (int i = 0; i < 64; i++)
+ ;
+ [[omp::directive (target teams distribute parallel for
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp)
+ shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+ collapse(1) dist_schedule(static, 16)
+ if (parallel: i2) num_threads (nth) proc_bind(spread)
+ lastprivate (l) schedule(static, 4) nowait depend(inout: dd[0]) order(concurrent)
+ allocate (omp_default_mem_alloc:f) in_reduction(+:r2))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (target teams distribute parallel for simd
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp)
+ shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+ collapse(1) dist_schedule(static, 16)
+ if (parallel: i2) num_threads (nth) proc_bind(spread)
+ lastprivate (l) schedule(static, 4) order(concurrent)
+ safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm) if (simd: i3)
+ allocate (omp_default_mem_alloc:f) in_reduction(+:r2))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (target teams distribute simd
+ device(d) map (tofrom: m) if (i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp)
+ shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+ collapse(1) dist_schedule(static, 16) order(concurrent)
+ safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm)
+ allocate (omp_default_mem_alloc:f) in_reduction(+:r2))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (target simd
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp)
+ safelen(8) simdlen(4) lastprivate (l) linear(ll: 1) aligned(q: 32) reduction(+:r)
+ nowait depend(inout: dd[0]) nontemporal(ntm) if(simd:i3) order(concurrent)
+ allocate (omp_default_mem_alloc:f) in_reduction(+:r2))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (directive (taskgroup task_reduction(+:r2) allocate (r2)),
+ omp::directive (taskloop simd
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm)
+ order(concurrent) allocate (f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (omp::directive (taskgroup task_reduction(+:r) allocate (r)),
+ directive (taskloop simd
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(i1) final(fi) mergeable nogroup priority (pp)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) in_reduction(+:r) nontemporal(ntm)
+ order(concurrent) allocate (f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (taskwait)]];
+ [[omp::directive (taskloop simd
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) if(taskloop: i1) final(fi) priority (pp)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(+:r) if (simd: i3) nontemporal(ntm)
+ order(concurrent) allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (directive (taskgroup task_reduction(+:r2) allocate (r2)),
+ omp::directive (taskloop
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) final(fi) mergeable priority (pp)
+ reduction(default, +:r) in_reduction(+:r2) allocate (f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (directive (taskgroup task_reduction(+:r2) allocate (r2)),
+ omp::directive (task
+ private (p) firstprivate (f) shared (s) default(shared) untied if(task: i1) final(fi) mergeable priority (pp)
+ in_reduction(+:r2) allocate (f)))]]
+ ;
+ [[omp::directive (taskyield)]];
+ [[omp::directive (target data if (target data: i1) device(d) map (tofrom: m) use_device_ptr (q) use_device_addr (p))]]
+ ;
+ [[omp::directive (target enter data if (target enter data: i1) device(d) map (to: m) depend(inout: dd[0]) nowait)]]
+ ;
+ [[omp::directive (target exit data if (target exit data: i1) device(d) map (from: m) depend(inout: dd[0]) nowait)]]
+ ;
+ [[omp::directive (target update if (target update: i1) device(d) to (m) depend(inout: dd[0]) nowait)]]
+ ;
+ [[omp::directive (target update if (target update: i1) device(d) from (m) depend(inout: dd[0]) nowait)]]
+ ;
+ [[omp::directive (taskwait)]];
+ [[omp::sequence (directive (target nowait depend(inout: dd[0]) in_reduction(+:r2)),
+ directive (teams distribute
+ private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+ collapse(1) dist_schedule(static, 16) allocate (omp_default_mem_alloc: f)))]]
+ for (int i = 0; i < 64; i++)
+ ;
+ [[omp::directive (teams
+ private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+ allocate (omp_default_mem_alloc: f))]]
+ ;
+ [[omp::sequence (omp::directive (target),
+ omp::directive (teams distribute parallel for
+ private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+ collapse(1) dist_schedule(static, 16)
+ if (parallel: i2) num_threads (nth) proc_bind(spread)
+ lastprivate (l) schedule(static, 4) order(concurrent) allocate (omp_default_mem_alloc: f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (directive (target),
+ directive (teams distribute parallel for simd
+ private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+ collapse(1) dist_schedule(static, 16)
+ if (parallel: i2) num_threads (nth) proc_bind(spread)
+ lastprivate (l) schedule(static, 4) order(concurrent)
+ safelen(8) simdlen(4) aligned(q: 32) if (simd: i3) nontemporal(ntm)
+ allocate (omp_default_mem_alloc: f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (directive (target),
+ directive (teams distribute simd
+ private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+ collapse(1) dist_schedule(static, 16) order(concurrent)
+ safelen(8) simdlen(4) aligned(q: 32) if(i3) nontemporal(ntm)
+ allocate (omp_default_mem_alloc: f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (teams distribute parallel for
+ private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+ collapse(1) dist_schedule(static, 16) order(concurrent)
+ if (parallel: i2) num_threads (nth) proc_bind(spread)
+ lastprivate (l) schedule(static, 4) copyin(t) allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (teams distribute parallel for simd
+ private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+ collapse(1) dist_schedule(static, 16)
+ if (parallel: i2) num_threads (nth) proc_bind(spread)
+ lastprivate (l) schedule(static, 4) order(concurrent)
+ safelen(8) simdlen(4) aligned(q: 32) if (simd: i3) nontemporal(ntm) copyin(t)
+ allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (teams distribute simd
+ private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+ collapse(1) dist_schedule(static, 16) order(concurrent)
+ safelen(8) simdlen(4) aligned(q: 32) if(i3) nontemporal(ntm) allocate(f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel master
+ private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) reduction(+:r)
+ num_threads (nth) proc_bind(spread) copyin(t) allocate (f))]]
+ ;
+ [[omp::directive (parallel
+ private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) reduction(+:r)
+ num_threads (nth) proc_bind(spread) copyin(t) allocate (f))]]
+ ;
+ [[omp::sequence (directive (taskgroup task_reduction (+:r2) allocate (r2)),
+ omp::directive (master taskloop
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) final(fi) mergeable priority (pp)
+ reduction(default, +:r) in_reduction(+:r2) allocate (f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (master)]];
+ [[omp::sequence (omp::directive (taskgroup task_reduction (+:r2) allocate (r2)),
+ directive (master taskloop simd
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm)
+ order(concurrent) allocate (f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel master taskloop
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) final(fi) mergeable priority (pp)
+ reduction(default, +:r) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t) allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel master taskloop simd
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t)
+ order(concurrent) allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (directive (taskgroup task_reduction (+:r2) allocate (r2)),
+ directive (master taskloop
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp)
+ reduction(default, +:r) in_reduction(+:r2)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (omp::directive (taskgroup task_reduction (+:r2) allocate (r2)),
+ omp::directive (master taskloop simd
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm)
+ order(concurrent) allocate (f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel master taskloop
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp)
+ reduction(default, +:r) num_threads (nth) proc_bind(spread) copyin(t) allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel master taskloop simd
+ private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) num_threads (nth) proc_bind(spread) copyin(t)
+ order(concurrent) allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (loop bind(thread) order(concurrent)
+ private (p) lastprivate (l) collapse(1) reduction(+:r))]]
+ for (l = 0; l < 64; ++l)
+ ll++;
+ [[omp::directive (parallel loop
+ private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread)
+ lastprivate (l) collapse(1) bind(parallel) order(concurrent) allocate (f))]]
+ for (l = 0; l < 64; l++)
+ ll++;
+ [[omp::directive (parallel loop
+ private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread)
+ lastprivate (l) collapse(1) allocate (f))]]
+ for (l = 0; l < 64; l++)
+ ll++;
+ [[omp::directive (teams loop
+ private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+ collapse(1) lastprivate (l) bind(teams) allocate (f))]]
+ for (l = 0; l < 64; ++l)
+ ;
+ [[omp::directive (teams loop
+ private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl)
+ collapse(1) lastprivate (l) order(concurrent) allocate (f))]]
+ for (l = 0; l < 64; ++l)
+ ;
+ [[omp::directive (target parallel loop
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp)
+ if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread)
+ nowait depend(inout: dd[0]) lastprivate (l) bind(parallel) order(concurrent) collapse(1)
+ allocate (omp_default_mem_alloc: f) in_reduction(+:r2))]]
+ for (l = 0; l < 64; ++l)
+ ;
+ [[omp::directive (target parallel loop
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp)
+ if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread)
+ nowait depend(inout: dd[0]) lastprivate (l) order(concurrent) collapse(1)
+ allocate (omp_default_mem_alloc: f) in_reduction(+:r2))]]
+ for (l = 0; l < 64; ++l)
+ ;
+ [[omp::directive (target teams loop
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp)
+ shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) nowait depend(inout: dd[0])
+ lastprivate (l) bind(teams) collapse(1)
+ allocate (omp_default_mem_alloc: f) in_reduction(+:r2))]]
+ for (l = 0; l < 64; ++l)
+ ;
+ [[omp::directive (target teams loop
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp)
+ shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) nowait depend(inout: dd[0])
+ lastprivate (l) order(concurrent) collapse(1)
+ allocate (omp_default_mem_alloc: f) in_reduction(+:r2))]]
+ for (l = 0; l < 64; ++l)
+ ;
+ [[omp::directive (critical)]] {
+ }
+ [[omp::directive (critical (foobar) hint(omp_sync_hint_none))]]
+ ;
+ [[omp::directive (taskwait depend (inout: dd[0]))]]
+ ;
+ [[omp::directive (taskgroup task_reduction(+:r2) allocate (r2))]]
+ ;
+ [[omp::directive (atomic update seq_cst hint(omp_sync_hint_none))]]
+ p++;
+ [[omp::directive (atomic read hint(omp_sync_hint_none) relaxed)]]
+ f = p;
+ [[omp::directive (atomic write release hint(omp_sync_hint_none))]]
+ p = f;
+ [[omp::directive (flush)]]
+ ;
+ [[omp::directive (flush acq_rel)]]
+ ;
+ [[omp::directive (flush (p, f))]]
+ ;
+ [[omp::directive (simd
+ private (p) lastprivate (l) linear (ll:1) reduction(+:r) collapse(1) safelen(8) simdlen(4) aligned(q: 32)
+ nontemporal(ntm) if(i1))]]
+ for (int i = 0; i < 64; i++)
+ [[omp::directive (ordered simd)]]
+ ll++;
+ [[omp::directive (for
+ private (p) firstprivate (f) lastprivate (l) linear (ll:1) reduction(+:r) schedule(static, 4) collapse(1) nowait
+ ordered allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ [[omp::directive (ordered threads)]]
+ ll++;
+ [[omp::directive(for ordered (1))]]
+ for (l = 0; l < 64; l++)
+ {
+ [[omp::directive(ordered depend (sink: l - 1))]];
+ [[omp::directive(ordered depend (source))]];
+ }
+ extern omp_depend_t depobj;
+ [[omp::directive (depobj(depobj) depend(in : dd[0]))]];
+ [[omp::directive (parallel)]] {
+ if (p) {
+ [[omp::directive (cancel parallel)]];
+ } else {
+ [[omp::directive (cancellation point parallel)]];
+ }
+ }
+ extern int t2;
+ [[omp::directive (threadprivate (t2))]]
+ extern int t2;
+ [[omp::directive (declare reduction (dr: int: omp_out += omp_in) initializer (omp_priv = 0))]]
+ ;
+}
+
+void corge1 ();
+
+void
+corge ()
+{
+ [[omp::directive (declare variant (corge1) match (construct={parallel,for}))]]
+ extern void corge2 ();
+ [[omp::sequence (directive (parallel), directive (for))]]
+ for (int i = 0; i < 5; i++)
+ corge2 ();
+ [[omp::directive (declare simd simdlen(4) linear(l) aligned(p:4) uniform(p) inbranch),
+ omp::directive (declare simd simdlen(8) notinbranch)]]
+ extern int corge3 (int l, int *p);
+ [[omp::directive (declare simd simdlen(4) linear(l) aligned(p:4) uniform(p) inbranch),
+ directive (declare simd simdlen(8) notinbranch)]]
+ extern int corge4 (int l, int *p);
+ [[omp::sequence (directive (declare simd simdlen(4) linear(l) aligned(p:4) uniform(p) inbranch),
+ omp::directive (declare simd simdlen(8) notinbranch))]]
+ extern int corge5 (int l, int *p);
+ [[omp::directive (declare target)]];
+ extern void corge6 ();
+ [[omp::directive (end declare target)]];
+}
+
+int
+garply (int a, int *c, int *d, int *e, int *f)
+{
+ int i;
+ [[omp::directive (simd reduction (inscan, +: a))]]
+ for (i = 0; i < 64; i++)
+ {
+ d[i] = a;
+ #pragma omp scan exclusive (a)
+ a += c[i];
+ }
+ [[omp::directive (simd reduction (inscan, +: a))]]
+ for (i = 0; i < 64; i++)
+ {
+ a += c[i];
+ #pragma omp scan inclusive (a)
+ d[i] = a;
+ }
+ return a;
+}
--- /dev/null
+// { dg-do compile { target c++17 } }
+
+typedef enum omp_allocator_handle_t
+: __UINTPTR_TYPE__
+{
+ omp_null_allocator = 0,
+ omp_default_mem_alloc = 1,
+ omp_large_cap_mem_alloc = 2,
+ omp_const_mem_alloc = 3,
+ omp_high_bw_mem_alloc = 4,
+ omp_low_lat_mem_alloc = 5,
+ omp_cgroup_mem_alloc = 6,
+ omp_pteam_mem_alloc = 7,
+ omp_thread_mem_alloc = 8,
+ __omp_allocator_handle_t_max__ = __UINTPTR_MAX__
+} omp_allocator_handle_t;
+
+typedef enum omp_sync_hint_t {
+omp_sync_hint_none = 0x0,
+omp_lock_hint_none = omp_sync_hint_none,
+omp_sync_hint_uncontended = 0x1,
+omp_lock_hint_uncontended = omp_sync_hint_uncontended,
+omp_sync_hint_contended = 0x2,
+omp_lock_hint_contended = omp_sync_hint_contended,
+omp_sync_hint_nonspeculative = 0x4,
+omp_lock_hint_nonspeculative = omp_sync_hint_nonspeculative,
+omp_sync_hint_speculative = 0x8,
+omp_lock_hint_speculative = omp_sync_hint_speculative
+} omp_sync_hint_t;
+
+typedef struct __attribute__((__aligned__ (sizeof (void *)))) omp_depend_t {
+ char __omp_depend_t__[2 * sizeof (void *)];
+} omp_depend_t;
+
+int t;
+#pragma omp threadprivate (t)
+
+#pragma omp declare target
+int f, l, ll, r, r2;
+
+void
+foo (int d, int m, int i1, int i2, int p, int *idp, int s,
+ int nte, int tl, int nth, int g, int nta, int fi, int pp, int *q, int ntm)
+{
+ [[omp::directive (distribute parallel for,
+ private (p),firstprivate (f),collapse(1),dist_schedule(static, 16),
+ if (parallel: i2),default(shared),shared(s),reduction(+:r),num_threads (nth),proc_bind(spread),
+ lastprivate (l),schedule(static, 4),order(concurrent),allocate (omp_default_mem_alloc:f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (distribute parallel for simd,
+ private (p),firstprivate (f),collapse(1),dist_schedule(static, 16),
+ if (parallel: i2),if(simd: i1),default(shared),shared(s),reduction(+:r),num_threads (nth),proc_bind(spread),
+ lastprivate (l),schedule(static, 4),nontemporal(ntm),
+ safelen(8),simdlen(4),aligned(q: 32),order(concurrent),allocate (omp_default_mem_alloc:f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (distribute simd,
+ private (p),firstprivate (f),collapse(1),dist_schedule(static, 16),
+ safelen(8),simdlen(4),aligned(q: 32),reduction(+:r),if(i1),nontemporal(ntm),
+ order(concurrent),allocate (omp_default_mem_alloc:f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (distribute,
+ private (p),firstprivate (f),collapse(1),dist_schedule(static, 16),
+ allocate (omp_default_mem_alloc:f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+}
+
+void
+qux (int p)
+{
+ [[omp::directive (loop, bind(teams),order(concurrent),
+ private (p),lastprivate (l),collapse(1),reduction(+:r))]]
+ for (l = 0; l < 64; ++l)
+ ll++;
+}
+#pragma omp end declare target
+
+void
+baz (int d, int m, int i1, int i2, int p, int *idp, int s,
+ int nte, int tl, int nth, int g, int nta, int fi, int pp, int *q, int ntm)
+{
+ [[omp::directive (distribute parallel for,
+ private (p),firstprivate (f),collapse(1),dist_schedule(static, 16),
+ if (parallel: i2),default(shared),shared(s),reduction(+:r),num_threads (nth),proc_bind(spread),
+ lastprivate (l),schedule(static, 4),copyin(t),order(concurrent),allocate (p))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (distribute parallel for simd,
+ private (p),firstprivate (f),collapse(1),dist_schedule(static, 16),
+ if (parallel: i2),if(simd: i1),default(shared),shared(s),reduction(+:r),num_threads (nth),proc_bind(spread),
+ lastprivate (l),schedule(static, 4),nontemporal(ntm),
+ safelen(8),simdlen(4),aligned(q: 32),copyin(t),order(concurrent),allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (distribute simd,
+ private (p),firstprivate (f),collapse(1),dist_schedule(static, 16),
+ safelen(8),simdlen(4),aligned(q: 32),reduction(+:r),if(i1),nontemporal(ntm),
+ order(concurrent),allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (loop, bind(parallel),order(concurrent),
+ private (p),lastprivate (l),collapse(1),reduction(+:r))]]
+ for (l = 0; l < 64; ++l)
+ ll++;
+}
+
+void
+bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
+ int nte, int tl, int nth, int g, int nta, int fi, int pp, int *q, int *dd, int ntm)
+{
+ [[omp::directive (for simd,
+ private (p),firstprivate (f),lastprivate (l),linear (ll:1),reduction(+:r),schedule(static, 4),collapse(1),nowait,
+ safelen(8),simdlen(4),aligned(q: 32),nontemporal(ntm),if(i1),order(concurrent),allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (for,
+ private (p),firstprivate (f),lastprivate (l),linear (ll:1),reduction(+:r),schedule(static, 4),collapse(1),nowait,
+ order(concurrent),allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (simd,
+ private (p),lastprivate (l),linear (ll:1),reduction(+:r),collapse(1),safelen(8),simdlen(4),aligned(q: 32),
+ nontemporal(ntm),if(i1),order(concurrent))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel for,
+ private (p),firstprivate (f),if (parallel: i2),default(shared),shared(s),copyin(t),reduction(+:r),num_threads (nth),proc_bind(spread),
+ lastprivate (l),linear (ll:1),ordered schedule(static, 4),collapse(1),allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel for,
+ private (p),firstprivate (f),if (parallel: i2),default(shared),shared(s),copyin(t),reduction(+:r),num_threads (nth),proc_bind(spread),
+ lastprivate (l),linear (ll:1),schedule(static, 4),collapse(1),order(concurrent),allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel for simd,
+ private (p),firstprivate (f),if (i2),default(shared),shared(s),copyin(t),reduction(+:r),num_threads (nth),proc_bind(spread),
+ lastprivate (l),linear (ll:1),schedule(static, 4),collapse(1),
+ safelen(8),simdlen(4),aligned(q: 32),nontemporal(ntm),order(concurrent),allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel sections,
+ private (p),firstprivate (f),if (parallel: i2),default(shared),shared(s),copyin(t),reduction(+:r),num_threads (nth),proc_bind(spread),
+ lastprivate (l),allocate (f))]]
+ {
+ #pragma omp section
+ {}
+ #pragma omp section
+ {}
+ }
+ [[omp::directive (sections, private (p),firstprivate (f),reduction(+:r),lastprivate (l),allocate (f),nowait)]]
+ {
+ ;
+ #pragma omp section
+ ;
+ #pragma omp section
+ {}
+ }
+ [[omp::directive (barrier)]];
+ [[using omp:sequence (omp::directive (single, private (p),firstprivate (f),allocate (f),nowait))]]
+ ;
+ [[omp::sequence (directive (barrier))]];
+ [[using omp:sequence (directive (parallel, private (p)),
+ omp::directive (single, copyprivate (p),firstprivate (f),allocate (f)))]]
+ p = 6;
+ [[omp::directive (target parallel,
+ device(d),map (tofrom: m),if (target: i1),private (p),firstprivate (f),defaultmap(tofrom: scalar),is_device_ptr (idp),
+ if (parallel: i2),default(shared),shared(s),reduction(+:r),num_threads (nth),proc_bind(spread)
+ nowait depend(inout: dd[0]),allocate (omp_default_mem_alloc:f),in_reduction(+:r2))]]
+ ;
+ [[omp::directive (target parallel for,
+ device(d),map (tofrom: m),if (target: i1),private (p),firstprivate (f),defaultmap(tofrom: scalar),is_device_ptr (idp),
+ if (parallel: i2),default(shared),shared(s),reduction(+:r),num_threads (nth),proc_bind(spread),
+ lastprivate (l),linear (ll:1),ordered schedule(static, 4),collapse(1),nowait depend(inout: dd[0]),
+ allocate (omp_default_mem_alloc:f),in_reduction(+:r2))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[using omp:directive (target parallel for,
+ device(d),map (tofrom: m),if (target: i1),private (p),firstprivate (f),defaultmap(tofrom: scalar),is_device_ptr (idp),
+ if (parallel: i2),default(shared),shared(s),reduction(+:r),num_threads (nth),proc_bind(spread),
+ lastprivate (l),linear (ll:1),schedule(static, 4),collapse(1),nowait depend(inout: dd[0]),order(concurrent),
+ allocate (omp_default_mem_alloc:f),in_reduction(+:r2))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (omp::directive (target parallel for simd,
+ device(d),map (tofrom: m),if (target: i1),private (p),firstprivate (f),defaultmap(tofrom: scalar),is_device_ptr (idp),
+ if (parallel: i2),default(shared),shared(s),reduction(+:r),num_threads (nth),proc_bind(spread),
+ lastprivate (l),linear (ll:1),schedule(static, 4),collapse(1),
+ safelen(8),simdlen(4),aligned(q: 32),nowait depend(inout: dd[0]),nontemporal(ntm),if (simd: i3),order(concurrent),
+ allocate (omp_default_mem_alloc:f),in_reduction(+:r2)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[using omp:sequence (directive (target teams,
+ device(d),map (tofrom: m),if (target: i1),private (p),firstprivate (f),defaultmap(tofrom: scalar),is_device_ptr (idp),
+ shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),nowait, depend(inout: dd[0]),
+ allocate (omp_default_mem_alloc:f) in_reduction(+:r2)))]]
+ ;
+ [[using omp:sequence (directive (target,
+ device(d),map (tofrom: m),if (target: i1),private (p),firstprivate (f),defaultmap(tofrom: scalar),is_device_ptr (idp),
+ nowait depend(inout: dd[0]),allocate (omp_default_mem_alloc:f),in_reduction(+:r2)))]]
+ ;
+ [[omp::sequence (omp::directive (target teams distribute,
+ device(d),map (tofrom: m),if (target: i1),private (p),firstprivate (f),defaultmap(tofrom: scalar),is_device_ptr (idp),
+ shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+ collapse(1),dist_schedule(static, 16),nowait depend(inout: dd[0]),allocate (omp_default_mem_alloc:f),in_reduction(+:r2)))]]
+ for (int i = 0; i < 64; i++)
+ ;
+ [[omp::directive (target teams distribute parallel for,
+ device(d),map (tofrom: m),if (target: i1),private (p),firstprivate (f),defaultmap(tofrom: scalar),is_device_ptr (idp),
+ shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+ collapse(1),dist_schedule(static, 16),
+ if (parallel: i2),num_threads (nth),proc_bind(spread),
+ lastprivate (l),schedule(static, 4),nowait depend(inout: dd[0]),order(concurrent),
+ allocate (omp_default_mem_alloc:f),in_reduction(+:r2))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (target teams distribute parallel for simd,
+ device(d),map (tofrom: m),if (target: i1),private (p),firstprivate (f),defaultmap(tofrom: scalar),is_device_ptr (idp),
+ shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+ collapse(1),dist_schedule(static, 16),
+ if (parallel: i2),num_threads (nth),proc_bind(spread),
+ lastprivate (l),schedule(static, 4),order(concurrent),
+ safelen(8),simdlen(4),aligned(q: 32),nowait depend(inout: dd[0]),nontemporal(ntm),if (simd: i3),
+ allocate (omp_default_mem_alloc:f),in_reduction(+:r2))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (target teams distribute simd,
+ device(d),map (tofrom: m),if (i1),private (p),firstprivate (f),defaultmap(tofrom: scalar),is_device_ptr (idp),
+ shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+ collapse(1),dist_schedule(static, 16),order(concurrent),
+ safelen(8),simdlen(4),aligned(q: 32),nowait depend(inout: dd[0]),nontemporal(ntm),
+ allocate (omp_default_mem_alloc:f),in_reduction(+:r2))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (target simd,
+ device(d),map (tofrom: m),if (target: i1),private (p),firstprivate (f),defaultmap(tofrom: scalar),is_device_ptr (idp),
+ safelen(8),simdlen(4),lastprivate (l),linear(ll: 1),aligned(q: 32),reduction(+:r),
+ nowait depend(inout: dd[0]),nontemporal(ntm),if(simd:i3),order(concurrent),
+ allocate (omp_default_mem_alloc:f),in_reduction(+:r2))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (directive (taskgroup, task_reduction(+:r2), allocate (r2)),
+ omp::directive (taskloop simd,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),grainsize (g),collapse(1),untied,if(taskloop: i1),if(simd: i2),final(fi),mergeable,priority (pp),
+ safelen(8),simdlen(4),linear(ll: 1),aligned(q: 32),reduction(default, +:r),in_reduction(+:r2),nontemporal(ntm),
+ order(concurrent),allocate (f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[using omp:sequence (omp::directive (taskgroup, task_reduction(+:r), allocate (r)),
+ directive (taskloop simd,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),grainsize (g),collapse(1),untied,if(i1),final(fi),mergeable,nogroup,priority (pp),
+ safelen(8),simdlen(4),linear(ll: 1),aligned(q: 32),in_reduction(+:r),nontemporal(ntm),
+ order(concurrent),allocate (f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (taskwait)]];
+ [[omp::directive (taskloop simd,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),num_tasks (nta),collapse(1),if(taskloop: i1),final(fi),priority (pp)
+ safelen(8),simdlen(4),linear(ll: 1),aligned(q: 32),reduction(+:r),if (simd: i3),nontemporal(ntm),
+ order(concurrent),allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (directive (taskgroup, task_reduction(+:r2), allocate (r2)),
+ omp::directive (taskloop
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),grainsize (g),collapse(1),untied, if(taskloop: i1),final(fi),mergeable, priority (pp),
+ reduction(default, +:r),in_reduction(+:r2),allocate (f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (directive (taskgroup, task_reduction(+:r2),allocate (r2)),
+ omp::directive (task,
+ private (p),firstprivate (f),shared (s),default(shared),untied,if(task: i1),final(fi),mergeable,priority (pp),
+ in_reduction(+:r2),allocate (f)))]]
+ ;
+ [[omp::directive (taskyield)]];
+ [[omp::directive (target data, if (target data: i1),device(d),map (tofrom: m),use_device_ptr (q),use_device_addr (p))]]
+ ;
+ [[omp::directive (target enter data, if (target enter data: i1),device(d),map (to: m),depend(inout: dd[0]),nowait)]]
+ ;
+ [[omp::directive (target exit data, if (target exit data: i1),device(d),map (from: m),depend(inout: dd[0]),nowait)]]
+ ;
+ [[omp::directive (target update, if (target update: i1),device(d),to (m),depend(inout: dd[0]),nowait)]]
+ ;
+ [[omp::directive (target update, if (target update: i1),device(d),from (m),depend(inout: dd[0]),nowait)]]
+ ;
+ [[omp::directive (taskwait)]];
+ [[omp::sequence (directive (target, nowait,depend(inout: dd[0]),in_reduction(+:r2)),
+ directive (teams distribute,
+ private(p),firstprivate (f),shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+ collapse(1),dist_schedule(static, 16),allocate (omp_default_mem_alloc: f)))]]
+ for (int i = 0; i < 64; i++)
+ ;
+ [[omp::directive (teams,
+ private(p),firstprivate (f),shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+ allocate (omp_default_mem_alloc: f))]]
+ ;
+ [[omp::sequence (omp::directive (target),
+ omp::directive (teams distribute parallel for,
+ private(p),firstprivate (f),shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+ collapse(1),dist_schedule(static, 16),
+ if (parallel: i2),num_threads (nth),proc_bind(spread),
+ lastprivate (l),schedule(static, 4),order(concurrent),allocate (omp_default_mem_alloc: f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[using omp:sequence (directive (target),
+ directive (teams distribute parallel for simd,
+ private(p),firstprivate (f),shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+ collapse(1),dist_schedule(static, 16),
+ if (parallel: i2),num_threads (nth),proc_bind(spread),
+ lastprivate (l),schedule(static, 4),order(concurrent),
+ safelen(8),simdlen(4),aligned(q: 32),if (simd: i3),nontemporal(ntm),
+ allocate (omp_default_mem_alloc: f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (directive (target),
+ directive (teams distribute simd,
+ private(p),firstprivate (f),shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+ collapse(1),dist_schedule(static, 16),order(concurrent),
+ safelen(8),simdlen(4),aligned(q: 32),if(i3),nontemporal(ntm),
+ allocate (omp_default_mem_alloc: f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (teams distribute parallel for,
+ private(p),firstprivate (f),shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+ collapse(1),dist_schedule(static, 16),order(concurrent),
+ if (parallel: i2),num_threads (nth),proc_bind(spread),
+ lastprivate (l),schedule(static, 4),copyin(t),allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (teams distribute parallel for simd,
+ private(p),firstprivate (f),shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+ collapse(1),dist_schedule(static, 16),
+ if (parallel: i2),num_threads (nth),proc_bind(spread),
+ lastprivate (l),schedule(static, 4),order(concurrent),
+ safelen(8),simdlen(4),aligned(q: 32),if (simd: i3),nontemporal(ntm),copyin(t),
+ allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (teams distribute simd,
+ private(p),firstprivate (f),shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+ collapse(1),dist_schedule(static, 16),order(concurrent),
+ safelen(8),simdlen(4),aligned(q: 32),if(i3),nontemporal(ntm),allocate(f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel master,
+ private (p),firstprivate (f),if (parallel: i2),default(shared),shared(s),reduction(+:r),
+ num_threads (nth),proc_bind(spread),copyin(t),allocate (f))]]
+ ;
+ [[omp::directive (parallel,
+ private (p),firstprivate (f),if (parallel: i2),default(shared),shared(s),reduction(+:r),
+ num_threads (nth),proc_bind(spread),copyin(t),allocate (f))]]
+ ;
+ [[using omp:sequence (directive (taskgroup, task_reduction (+:r2),allocate (r2)),
+ omp::directive (master taskloop,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),grainsize (g),collapse(1),untied, if(taskloop: i1),final(fi),mergeable, priority (pp),
+ reduction(default, +:r),in_reduction(+:r2),allocate (f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[using omp:directive (master)]];
+ [[omp::sequence (omp::directive (taskgroup task_reduction (+:r2),allocate (r2)),
+ directive (master taskloop simd,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),grainsize (g),collapse(1),untied,if(taskloop: i1),if(simd: i2),final(fi),mergeable,priority (pp),
+ safelen(8),simdlen(4),linear(ll: 1),aligned(q: 32),reduction(default, +:r),in_reduction(+:r2),nontemporal(ntm),
+ order(concurrent),allocate (f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel master taskloop,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),grainsize (g),collapse(1),untied,if(taskloop: i1),final(fi),mergeable,priority (pp),
+ reduction(default, +:r),if (parallel: i2),num_threads (nth),proc_bind(spread),copyin(t),allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel master taskloop simd,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),grainsize (g),collapse(1),untied,if(taskloop: i1),if(simd: i2),final(fi),mergeable,priority (pp),
+ safelen(8),simdlen(4),linear(ll: 1),aligned(q: 32),reduction(default, +:r),nontemporal(ntm),if (parallel: i2),num_threads (nth),proc_bind(spread),copyin(t),
+ order(concurrent),allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (directive (taskgroup,task_reduction (+:r2),allocate (r2)),
+ directive (master taskloop,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),num_tasks (nta),collapse(1),untied,if(i1),final(fi),mergeable,priority (pp),
+ reduction(default, +:r),in_reduction(+:r2)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::sequence (omp::directive (taskgroup,task_reduction (+:r2),allocate (r2)),
+ omp::directive (master taskloop simd,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),num_tasks (nta),collapse(1),untied,if(i1),final(fi),mergeable,priority (pp),
+ safelen(8),simdlen(4),linear(ll: 1),aligned(q: 32),reduction(default, +:r),in_reduction(+:r2),nontemporal(ntm),
+ order(concurrent),allocate (f)))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel master taskloop,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),num_tasks (nta),collapse(1),untied if(i1),final(fi),mergeable priority (pp),
+ reduction(default, +:r),num_threads (nth),proc_bind(spread),copyin(t),allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (parallel master taskloop simd,
+ private (p),firstprivate (f),lastprivate (l),shared (s),default(shared),num_tasks (nta),collapse(1),untied if(i1),final(fi),mergeable priority (pp),
+ safelen(8),simdlen(4),linear(ll: 1),aligned(q: 32),reduction(default, +:r),nontemporal(ntm),num_threads (nth),proc_bind(spread),copyin(t),
+ order(concurrent),allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ ll++;
+ [[omp::directive (loop, bind(thread),order(concurrent),
+ private (p),lastprivate (l),collapse(1),reduction(+:r))]]
+ for (l = 0; l < 64; ++l)
+ ll++;
+ [[omp::directive (parallel loop,
+ private (p),firstprivate (f),if (parallel: i2),default(shared),shared(s),copyin(t),reduction(+:r),num_threads (nth),proc_bind(spread),
+ lastprivate (l),collapse(1),bind(parallel),order(concurrent),allocate (f))]]
+ for (l = 0; l < 64; l++)
+ ll++;
+ [[omp::directive (parallel loop,
+ private (p),firstprivate (f),if (parallel: i2),default(shared),shared(s),copyin(t),reduction(+:r),num_threads (nth),proc_bind(spread),
+ lastprivate (l),collapse(1),allocate (f))]]
+ for (l = 0; l < 64; l++)
+ ll++;
+ [[omp::directive (teams loop,
+ private(p),firstprivate (f),shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+ collapse(1),lastprivate (l),bind(teams),allocate (f))]]
+ for (l = 0; l < 64; ++l)
+ ;
+ [[omp::directive (teams loop,
+ private(p),firstprivate (f),shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),
+ collapse(1),lastprivate (l),order(concurrent),allocate (f))]]
+ for (l = 0; l < 64; ++l)
+ ;
+ [[omp::directive (target parallel loop,
+ device(d),map (tofrom: m),if (target: i1),private (p),firstprivate (f),defaultmap(tofrom: scalar),is_device_ptr (idp),
+ if (parallel: i2),default(shared),shared(s),reduction(+:r),num_threads (nth),proc_bind(spread),
+ nowait depend(inout: dd[0]),lastprivate (l),bind(parallel),order(concurrent),collapse(1),
+ allocate (omp_default_mem_alloc: f),in_reduction(+:r2))]]
+ for (l = 0; l < 64; ++l)
+ ;
+ [[omp::directive (target parallel loop,
+ device(d),map (tofrom: m),if (target: i1),private (p),firstprivate (f),defaultmap(tofrom: scalar),is_device_ptr (idp),
+ if (parallel: i2),default(shared),shared(s),reduction(+:r),num_threads (nth),proc_bind(spread),
+ nowait depend(inout: dd[0]),lastprivate (l),order(concurrent),collapse(1),
+ allocate (omp_default_mem_alloc: f),in_reduction(+:r2))]]
+ for (l = 0; l < 64; ++l)
+ ;
+ [[omp::directive (target teams loop,
+ device(d),map (tofrom: m),if (target: i1),private (p),firstprivate (f),defaultmap(tofrom: scalar),is_device_ptr (idp),
+ shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),nowait,depend(inout: dd[0]),
+ lastprivate (l),bind(teams),collapse(1),
+ allocate (omp_default_mem_alloc: f),in_reduction(+:r2))]]
+ for (l = 0; l < 64; ++l)
+ ;
+ [[omp::directive (target teams loop,
+ device(d),map (tofrom: m),if (target: i1),private (p),firstprivate (f),defaultmap(tofrom: scalar),is_device_ptr (idp),
+ shared(s),default(shared),reduction(+:r),num_teams(nte),thread_limit(tl),nowait,depend(inout: dd[0]),
+ lastprivate (l),order(concurrent),collapse(1)
+ allocate (omp_default_mem_alloc: f),in_reduction(+:r2))]]
+ for (l = 0; l < 64; ++l)
+ ;
+ [[omp::directive (critical)]] {
+ }
+ [[omp::directive (critical (foobar),hint(omp_sync_hint_none))]]
+ ;
+ [[using omp:directive (taskwait, depend (inout: dd[0]))]]
+ ;
+ [[omp::directive (taskgroup, task_reduction(+:r2),allocate (r2))]]
+ ;
+ [[omp::directive (atomic, update,seq_cst,hint(omp_sync_hint_none))]]
+ p++;
+ [[omp::directive (atomic, read, hint(omp_sync_hint_none),relaxed)]]
+ f = p;
+ [[omp::directive (atomic,write, release hint(omp_sync_hint_none))]]
+ p = f;
+ [[omp::directive (flush)]]
+ ;
+ [[omp::directive (flush, acq_rel)]]
+ ;
+ [[omp::directive (flush (p, f))]]
+ ;
+ [[omp::directive (simd,
+ private (p),lastprivate (l),linear (ll:1),reduction(+:r),collapse(1),safelen(8),simdlen(4),aligned(q: 32),
+ nontemporal(ntm),if(i1))]]
+ for (int i = 0; i < 64; i++)
+ [[omp::directive (ordered, simd)]]
+ ll++;
+ [[omp::directive (for,
+ private (p),firstprivate (f),lastprivate (l),linear (ll:1),reduction(+:r),schedule(static, 4),collapse(1),nowait,
+ ordered, allocate (f))]]
+ for (int i = 0; i < 64; i++)
+ [[omp::directive (ordered, threads)]]
+ ll++;
+ [[omp::directive(for, ordered (1))]]
+ for (l = 0; l < 64; l++)
+ {
+ [[omp::directive(ordered, depend (sink: l - 1))]];
+ [[omp::directive(ordered, depend (source))]];
+ }
+ extern omp_depend_t depobj;
+ [[omp::directive (depobj(depobj),depend(in : dd[0]))]];
+ [[omp::directive (parallel)]] {
+ if (p) {
+ [[omp::directive (cancel, parallel)]];
+ } else {
+ [[omp::directive (cancellation point, parallel)]];
+ }
+ }
+ extern int t2;
+ [[omp::directive (threadprivate (t2))]]
+ extern int t2;
+ [[omp::directive (declare reduction (dr: int: omp_out += omp_in),initializer (omp_priv = 0))]]
+ ;
+}
+
+void corge1 ();
+
+void
+corge ()
+{
+ [[omp::directive (declare variant (corge1),match (construct={parallel,for}))]]
+ extern void corge2 ();
+ [[omp::sequence (directive (parallel), directive (for))]]
+ for (int i = 0; i < 5; i++)
+ corge2 ();
+ [[omp::directive (declare simd, simdlen(4),linear(l),aligned(p:4),uniform(p),inbranch),
+ omp::directive (declare simd,simdlen(8),notinbranch)]]
+ extern int corge3 (int l, int *p);
+ [[using omp:directive (declare simd, simdlen(4),linear(l),aligned(p:4),uniform(p),inbranch),
+ directive (declare simd, simdlen(8),notinbranch)]]
+ extern int corge4 (int l, int *p);
+ [[omp::sequence (directive (declare simd, simdlen(4),linear(l),aligned(p:4),uniform(p),inbranch),
+ omp::directive (declare simd, simdlen(8),notinbranch))]]
+ extern int corge5 (int l, int *p);
+ [[omp::directive (declare target)]];
+ extern void corge6 ();
+ [[omp::directive (end declare target)]];
+}
+
+int
+garply (int a, int *c, int *d, int *e, int *f)
+{
+ int i;
+ [[omp::directive (simd, reduction (inscan, +: a))]]
+ for (i = 0; i < 64; i++)
+ {
+ d[i] = a;
+ #pragma omp scan exclusive (a)
+ a += c[i];
+ }
+ [[omp::directive (simd, reduction (inscan, +: a))]]
+ for (i = 0; i < 64; i++)
+ {
+ a += c[i];
+ #pragma omp scan inclusive (a)
+ d[i] = a;
+ }
+ return a;
+}
--- /dev/null
+// { dg-do compile { target c++11 } }
+
+int i;
+int t1, t2, t3, t4, t5, t6, t7;
+
+void
+foo ()
+{
+ [[omp::directive]]; // { dg-error "'omp::directive' attribute requires argument" }
+ [[omp::directive ()]]; // { dg-error "expected OpenMP directive name" }
+ [[omp::directive (nonexistent foobar)]]; // { dg-error "unknown OpenMP directive name in 'omp::directive' attribute argument" }
+ [[omp::sequence]]; // { dg-error "'omp::sequence' attribute requires argument" }
+ [[omp::sequence()]]; // { dg-error "expected 'directive' or 'sequence'" }
+ [[omp::sequence(foobar())]]; // { dg-error "expected 'directive' or 'sequence'" }
+ [[omp::sequence(omp::foobar())]]; // { dg-error "expected 'directive' or 'sequence'" }
+ [[omp::sequence(directive(taskwait), foobar())]]; // { dg-error "expected 'directive' or 'sequence'" }
+ [[omp::sequence(omp::directive(taskwait), omp::foobar())]]; // { dg-error "expected 'directive' or 'sequence'" }
+ [[omp::sequence(directive(taskwait) foobar())]]; // { dg-error "expected '\\\)' before 'foobar'" }
+ [[omp::sequence(directive)]]; // { dg-error "expected '\\\(' before '\\\)' token" }
+ [[omp::sequence(omp::sequence)]]; // { dg-error "expected '\\\(' before '\\\)' token" }
+ [[omp::directive (parallel), omp::directive (single)]] // { dg-error "OpenMP construct among 'omp::directive' attributes requires all 'omp::directive' attributes on the same statement to be in the same 'omp::sequence'" }
+ ;
+ [[omp::directive (parallel)]] // { dg-error "OpenMP construct among 'omp::directive' attributes requires all 'omp::directive' attributes on the same statement to be in the same 'omp::sequence'" }
+ [[omp::directive (single)]]
+ ;
+ [[omp::directive (taskwait), omp::directive (taskyield)]] // { dg-error "multiple OpenMP standalone directives among 'omp::directive' attributes must be all within the same 'omp::sequence'" }
+ ;
+ [[omp::directive (taskwait)]]
+ [[omp::directive (taskyield)]] // { dg-error "multiple OpenMP standalone directives among 'omp::directive' attributes must be all within the same 'omp::sequence'" }
+ ;
+ [[omp::directive (flush)]] // { dg-error "standalone OpenMP directives in 'omp::directive' attribute can only appear on an empty statement" }
+ i++;
+ auto a = [] () [[omp::directive (threadprivate (t1))]] {}; // { dg-error "'omp::directive' not allowed to be specified in this context" }
+ int [[omp::directive (threadprivate (t2))]] b; // { dg-warning "attribute ignored" }
+ int *[[omp::directive (threadprivate (t3))]] c; // { dg-warning "'omp::directive' scoped attribute directive ignored" }
+ int &[[omp::directive (threadprivate (t4))]] d = b; // { dg-warning "'omp::directive' scoped attribute directive ignored" }
+ typedef int T [[omp::directive (threadprivate (t5))]]; // { dg-error "'omp::directive' not allowed to be specified in this context" }
+ int e[10] [[omp::directive (threadprivate (t6))]]; // { dg-error "'omp::directive' not allowed to be specified in this context" }
+ struct [[omp::directive (threadprivate (t7))]] S {}; // { dg-error "'omp::directive' not allowed to be specified in this context" }
+}