if (TARGET_XTHEADVECTOR && !check_type (return_type, argument_types))
return;
- b.add_overloaded_function (function_instance, *group.shape);
+ b.add_overloaded_function (function_instance, *group.shape,
+ group.required_extensions);
b.add_unique_function (function_instance, (*group.shape), return_type,
- argument_types);
+ argument_types, group.required_extensions);
}
/* Add a function instance for every operand && predicate && args
if (strstr (group.base_name, "w") && (sew == 8 || sew ==16))
return;
- b.add_overloaded_function (function_instance, *group.shape);
+ b.add_overloaded_function (function_instance, *group.shape,
+ group.required_extensions);
b.add_unique_function (function_instance, (*group.shape), return_type,
- argument_types);
+ argument_types, group.required_extensions);
}
/* th_loadstore_width_def class. */
argument_types.quick_push (arg_type);
b.add_unique_function (function_instance, (*group.shape), return_type,
- argument_types);
+ argument_types, group.required_extensions);
}
}
{
auto_vec<tree> argument_types;
b.add_unique_function (get_read_vl_instance (), (*group.shape),
- size_type_node, argument_types);
+ size_type_node, argument_types,
+ group.required_extensions);
}
char *get_name (function_builder &b, const function_instance &instance,
*group.shape, group.ops_infos.types[0],
group.preds[0], &group.ops_infos);
b.add_unique_function (function_instance, (*group.shape),
- long_unsigned_type_node, argument_types);
+ long_unsigned_type_node, argument_types,
+ group.required_extensions);
}
char *get_name (function_builder &b, const function_instance &instance,
/* Generate hash value based on the overload_name and the argument list passed
by the user when calling. */
hashval_t overloaded_hash (const vec<tree, va_gc> &);
+
+ /* The reqired extension for the register function. */
+ enum required_ext required;
};
/* Hash traits for registered_function. */
const char *name, tree fntype, tree attrs,
bool placeholder_p, const char *overload_name,
const vec<tree> &argument_types,
+ enum required_ext required,
bool overloaded_p = false)
{
unsigned int code = vec_safe_length (registered_functions);
rfn.overload_name = overload_name ? xstrdup (overload_name) : NULL;
rfn.argument_types = argument_types;
rfn.overloaded_p = overloaded_p;
+ rfn.required = required;
vec_safe_push (registered_functions, &rfn);
return rfn;
function_builder::add_unique_function (const function_instance &instance,
const function_shape *shape,
tree return_type,
- vec<tree> &argument_types)
+ vec<tree> &argument_types,
+ enum required_ext required)
{
/* Do not add this function if it is invalid. */
if (!check_required_extensions (instance))
tree attrs = get_attributes (instance);
registered_function &rfn
= add_function (instance, name, fntype, attrs, false, overload_name,
- argument_types.copy ());
+ argument_types.copy (), required);
/* Enter the function into the hash table. */
hashval_t hash = instance.hash ();
tree attrs = get_attributes (instance);
bool placeholder_p = !m_direct_overloads;
add_function (instance, overload_name, fntype, attrs, placeholder_p, NULL,
- vNULL);
+ vNULL, required);
/* Enter the function into the non-overloaded hash table. */
hash = rfn.overloaded_hash ();
/* Add overloaded function for gcc. */
void
function_builder::add_overloaded_function (const function_instance &instance,
- const function_shape *shape)
+ const function_shape *shape,
+ enum required_ext required)
{
if (!check_required_extensions (instance))
return;
for the overloaded function. */
tree fntype = build_function_type (void_type_node, void_list_node);
add_function (instance, name, fntype, NULL_TREE, m_direct_overloads, name,
- vNULL, true);
+ vNULL, required, true);
obstack_free (&m_string_obstack, name);
}
}
{
registered_function &rfn = *(*registered_functions)[code];
- if (!TARGET_VECTOR)
+ if (!required_extensions_specified (rfn.required))
{
error_at (EXPR_LOCATION (exp),
- "built-in function %qE requires the V ISA extension", exp);
+ "built-in function %qE requires the %qs ISA extension",
+ exp,
+ reqired_ext_to_isa_name (rfn.required));
return target;
}
ZVKSED_EXT, /* Crypto vector Zvksed sub-ext */
ZVKSH_EXT, /* Crypto vector Zvksh sub-ext */
XTHEADVECTOR_EXT, /* XTheadVector extension */
+ /* Please update below to isa_name func when add or remove enum type(s). */
};
+static inline const char * reqired_ext_to_isa_name (enum required_ext required)
+{
+ switch (required)
+ {
+ case VECTOR_EXT:
+ return "v";
+ case ZVBB_EXT:
+ return "zvbb";
+ case ZVBB_OR_ZVKB_EXT:
+ return "zvbb or zvkb";
+ case ZVBC_EXT:
+ return "zvbc";
+ case ZVKG_EXT:
+ return "zvkg";
+ case ZVKNED_EXT:
+ return "zvkned";
+ case ZVKNHA_OR_ZVKNHB_EXT:
+ return "zvknha or zvknhb";
+ case ZVKNHB_EXT:
+ return "zvknhb";
+ case ZVKSED_EXT:
+ return "zvksed";
+ case ZVKSH_EXT:
+ return "zvksh";
+ case XTHEADVECTOR_EXT:
+ return "xthreadvector";
+ default:
+ gcc_unreachable ();
+ }
+
+ gcc_unreachable ();
+}
+
+static inline bool required_extensions_specified (enum required_ext required)
+{
+ switch (required)
+ {
+ case VECTOR_EXT:
+ return TARGET_VECTOR;;
+ case ZVBB_EXT:
+ return TARGET_ZVBB;
+ case ZVBB_OR_ZVKB_EXT:
+ return TARGET_ZVBB || TARGET_ZVKB;
+ case ZVBC_EXT:
+ return TARGET_ZVBC;
+ case ZVKG_EXT:
+ return TARGET_ZVKG;
+ case ZVKNED_EXT:
+ return TARGET_ZVKNED;
+ case ZVKNHA_OR_ZVKNHB_EXT:
+ return TARGET_ZVKNHA || TARGET_ZVKNHB;
+ case ZVKNHB_EXT:
+ return TARGET_ZVKNHB;
+ case ZVKSED_EXT:
+ return TARGET_ZVKSED;
+ case ZVKSH_EXT:
+ return TARGET_ZVKSH;
+ case XTHEADVECTOR_EXT:
+ return TARGET_XTHEADVECTOR;
+ default:
+ gcc_unreachable ();
+ }
+
+ gcc_unreachable ();
+}
+
/* Enumerates the RVV operand types. */
enum operand_type_index
{
void allocate_argument_types (const function_instance &, vec<tree> &) const;
void apply_predication (const function_instance &, tree, vec<tree> &) const;
void add_unique_function (const function_instance &, const function_shape *,
- tree, vec<tree> &);
+ tree, vec<tree> &, enum required_ext);
void add_overloaded_function (const function_instance &,
- const function_shape *);
+ const function_shape *,
+ enum required_ext);
void register_function_group (const function_group_info &);
void append_name (const char *);
void append_base_name (const char *);
registered_function &add_function (const function_instance &, const char *,
tree, tree, bool, const char *,
- const vec<tree> &, bool);
+ const vec<tree> &, enum required_ext,
+ bool);
/* True if we should create a separate decl for each instance of an
overloaded function, instead of using function_builder. */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t
+test_3 (size_t vl)
+{
+ return __riscv_vsetvl_e8m4 (vl); /* { dg-error {built-in function '__riscv_vsetvl_e8m4\(vl\)' requires the 'v' ISA extension} } */
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+vuint32m1_t
+__attribute__((target("arch=+v")))
+test_1 (vuint32m1_t op_1, vuint32m1_t op_2, size_t vl)
+{
+ return __riscv_vsm3me_vv_u32m1 (op_1, op_2, vl); /* { dg-error {built-in function '__riscv_vsm3me_vv_u32m1\(op_1, op_2, vl\)' requires the 'zvksh' ISA extension} } */
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+vuint32m1_t
+__attribute__((target("arch=+v")))
+test_1 (vuint32m1_t op_1, vuint32m1_t op_2, size_t vl)
+{
+ return __riscv_vandn_vv_u32m1 (op_1, op_2, vl); /* { dg-error {built-in function '__riscv_vandn_vv_u32m1\(op_1, op_2, vl\)' requires the 'zvbb or zvkb' ISA extension} } */
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+vuint32m1_t
+__attribute__((target("arch=+v")))
+test_1 (vuint32m1_t op_1, size_t vl)
+{
+ return __riscv_vclz_v_u32m1 (op_1, vl); /* { dg-error {built-in function '__riscv_vclz_v_u32m1\(op_1, vl\)' requires the 'zvbb' ISA extension} } */
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+vuint64m1_t
+__attribute__((target("arch=+v")))
+test_1 (vuint64m1_t op_1, vuint64m1_t op_2, size_t vl)
+{
+ return __riscv_vclmul_vv_u64m1 (op_1, op_2, vl); /* { dg-error {built-in function '__riscv_vclmul_vv_u64m1\(op_1, op_2, vl\)' requires the 'zvbc' ISA extension} } */
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+vuint32m1_t
+__attribute__((target("arch=+v")))
+test_1 (vuint32m1_t dest, vuint32m1_t op_1, vuint32m1_t op_2, size_t vl)
+{
+ return __riscv_vghsh_vv_u32m1 (dest, op_1, op_2, vl); /* { dg-error {built-in function '__riscv_vghsh_vv_u32m1\(dest, op_1, op_2, vl\)' requires the 'zvkg' ISA extension} } */
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+vuint32m1_t
+__attribute__((target("arch=+v")))
+test_1 (vuint32m1_t dest, vuint32mf2_t op_1, size_t vl)
+{
+ return __riscv_vaesef_vs_u32mf2_u32m1 (dest, op_1, vl); /* { dg-error {built-in function '__riscv_vaesef_vs_u32mf2_u32m1\(dest, op_1, vl\)' requires the 'zvkned' ISA extension} } */
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+vuint32m1_t
+__attribute__((target("arch=+v")))
+test_1 (vuint32m1_t dest, vuint32m1_t op_1, vuint32m1_t op_2, size_t vl)
+{
+ return __riscv_vsha2ms_vv_u32m1 (dest, op_1, op_2, vl); /* { dg-error {built-in function '__riscv_vsha2ms_vv_u32m1\(dest, op_1, op_2, vl\)' requires the 'zvknha or zvknhb' ISA extension} } */
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+vuint64m1_t
+__attribute__((target("arch=+v")))
+test_1 (vuint64m1_t dest, vuint64m1_t op_1, vuint64m1_t op_2, size_t vl)
+{
+ return __riscv_vsha2ms_vv_u64m1 (dest, op_1, op_2, vl); /* { dg-error {built-in function '__riscv_vsha2ms_vv_u64m1\(dest, op_1, op_2, vl\)' requires the 'zvknhb' ISA extension} } */
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+vuint32m1_t
+__attribute__((target("arch=+v")))
+test_1 (vuint32m1_t op_1, size_t vl)
+{
+ return __riscv_vsm4k_vi_u32m1 (op_1, 0, vl); /* { dg-error {built-in function '__riscv_vsm4k_vi_u32m1\(op_1, 0, vl\)' requires the 'zvksed' ISA extension} } */
+}
size_t test_1 (size_t vl)
{
- return __riscv_vsetvl_e8m4 (vl); /* { dg-error {built-in function '__riscv_vsetvl_e8m4\(vl\)' requires the V ISA extension} } */
+ return __riscv_vsetvl_e8m4 (vl); /* { dg-error {built-in function '__riscv_vsetvl_e8m4\(vl\)' requires the 'v' ISA extension} } */
}
size_t
test_3 (size_t vl)
{
- return __riscv_vsetvl_e8m4 (vl); /* { dg-error {built-in function '__riscv_vsetvl_e8m4\(vl\)' requires the V ISA extension} } */
+ return __riscv_vsetvl_e8m4 (vl); /* { dg-error {built-in function '__riscv_vsetvl_e8m4\(vl\)' requires the 'v' ISA extension} } */
}