(V16HI "v16hi") (V8HI "v8hi")
(V32QI "v32qi") (V16QI "v16qi")])
+;; Mapping of vector modes to an V*SImode of the same size
+(define_mode_attr ssedvecmode
+ [(V64QI "V16SI") (V32QI "V8SI") (V16QI "V4SI")])
+
+(define_mode_attr ssedvecmodelower
+ [(V64QI "v16si") (V32QI "v8si") (V16QI "v4si")])
+
;; Mapping of vector modes to a vector mode of double size
(define_mode_attr ssedoublevecmode
[(V64QI "V128QI") (V32HI "V64HI") (V16SI "V32SI") (V8DI "V16DI")
[(set_attr ("prefix") ("evex"))
(set_attr "mode" "<sseinsnmode>")])
-(define_mode_attr VI1SI
- [(V64QI "V16SI") (V32QI "V8SI") (V16QI "V4SI")])
-
-(define_mode_attr vi1si
- [(V64QI "v16si") (V32QI "v8si") (V16QI "v4si")])
-
(define_expand "usdot_prod<mode>"
- [(match_operand:<VI1SI> 0 "register_operand")
+ [(match_operand:<ssedvecmode> 0 "register_operand")
(match_operand:VI1_AVX512VNNI 1 "register_operand")
(match_operand:VI1_AVX512VNNI 2 "register_operand")
- (match_operand:<VI1SI> 3 "register_operand")]
+ (match_operand:<ssedvecmode> 3 "register_operand")]
"(<MODE_SIZE> == 64
||((TARGET_AVX512VNNI && TARGET_AVX512VL)
|| TARGET_AVXVNNI))"
{
- operands[1] = lowpart_subreg (<VI1SI>mode,
+ operands[1] = lowpart_subreg (<ssedvecmode>mode,
force_reg (<MODE>mode, operands[1]),
<MODE>mode);
- operands[2] = lowpart_subreg (<VI1SI>mode,
+ operands[2] = lowpart_subreg (<ssedvecmode>mode,
force_reg (<MODE>mode, operands[2]),
<MODE>mode);
emit_insn (gen_rtx_SET (operands[0], operands[3]));
- emit_insn (gen_vpdpbusd_<vi1si> (operands[0], operands[3],
+ emit_insn (gen_vpdpbusd_<ssedvecmodelower> (operands[0], operands[3],
operands[1], operands[2]));
DONE;
})
(UNSPEC_VPDPBSUD "bsud") (UNSPEC_VPDPBSUDS "bsuds")
(UNSPEC_VPDPBUUD "buud") (UNSPEC_VPDPBUUDS "buuds")])
+(define_expand "sdot_prod<mode>"
+ [(match_operand:<ssedvecmode> 0 "register_operand")
+ (match_operand:VI1 1 "register_operand")
+ (match_operand:VI1 2 "register_operand")
+ (match_operand:<ssedvecmode> 3 "register_operand")]
+ "TARGET_AVXVNNIINT8"
+{
+ operands[1] = lowpart_subreg (<ssedvecmode>mode,
+ force_reg (<MODE>mode, operands[1]),
+ <MODE>mode);
+ operands[2] = lowpart_subreg (<ssedvecmode>mode,
+ force_reg (<MODE>mode, operands[2]),
+ <MODE>mode);
+ emit_insn (gen_rtx_SET (operands[0], operands[3]));
+ emit_insn (gen_vpdpbssd_<ssedvecmodelower> (operands[0], operands[3],
+ operands[1], operands[2]));
+ DONE;
+})
+
+(define_expand "udot_prod<mode>"
+ [(match_operand:<ssedvecmode> 0 "register_operand")
+ (match_operand:VI1 1 "register_operand")
+ (match_operand:VI1 2 "register_operand")
+ (match_operand:<ssedvecmode> 3 "register_operand")]
+ "TARGET_AVXVNNIINT8"
+{
+ operands[1] = lowpart_subreg (<ssedvecmode>mode,
+ force_reg (<MODE>mode, operands[1]),
+ <MODE>mode);
+ operands[2] = lowpart_subreg (<ssedvecmode>mode,
+ force_reg (<MODE>mode, operands[2]),
+ <MODE>mode);
+ emit_insn (gen_rtx_SET (operands[0], operands[3]));
+ emit_insn (gen_vpdpbuud_<ssedvecmodelower> (operands[0], operands[3],
+ operands[1], operands[2]));
+ DONE;
+})
+
(define_insn "vpdp<vpdotprodtype>_<mode>"
[(set (match_operand:VI4_AVX 0 "register_operand" "=x")
(unspec:VI4_AVX
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-mavxvnniint8 -O2" } */
+/* { dg-final { scan-assembler "vpdpbssd\t" } } */
+/* { dg-final { scan-assembler "vpdpbuud\t" } } */
+
+int __attribute__((noinline, noclone, optimize("tree-vectorize")))
+sdot_prod_qi (char * restrict a, char * restrict b,
+ int c, int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ {
+ c += ((int) a[i] * (int) b[i]);
+ }
+ return c;
+}
+
+int __attribute__((noinline, noclone, optimize("tree-vectorize")))
+udot_prod_qi (unsigned char * restrict a, unsigned char *restrict b,
+ int c, int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ {
+ c += ((int) a[i] * (int) b[i]);
+ }
+ return c;
+}
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -mavxvnniint8" } */
+/* { dg-require-effective-target avxvnniint8 } */
+
+#define AVXVNNIINT8
+#ifndef CHECK
+#define CHECK "avx-check.h"
+#endif
+
+#ifndef TEST
+#define TEST avx_test
+#endif
+
+#include CHECK
+#include "vnniint8-auto-vectorize-1.c"
+
+#define N 256
+char a_i8[N], b_i8[N];
+unsigned char c_u8[N], d_u8[N];
+int i8_exp, i8_ref;
+
+int __attribute__((noipa, optimize("no-tree-vectorize")))
+sdot_prod_qi_scalar (char * restrict a, char * restrict b,
+ int c, int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ {
+ c += ((int) a[i] * (int) b[i]);
+ }
+ return c;
+}
+
+int __attribute__((noipa, optimize("no-tree-vectorize")))
+udot_prod_qi_scalar (unsigned char * restrict a, unsigned char *restrict b,
+ int c, int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ {
+ c += ((int) a[i] * (int) b[i]);
+ }
+ return c;
+}
+
+void init ()
+{
+ int i;
+
+ i8_exp = i8_ref = 127;
+
+ for (i = 0; i < N; i++)
+ {
+ a_i8[i] = (-i + 4) % 128;
+ b_i8[i] = (i + 1) % 128;
+ c_u8[i] = (i + 3) % 256;
+ d_u8[i] = (i + 5) % 256;
+ }
+}
+
+void
+TEST (void)
+{
+ init ();
+ i8_exp = sdot_prod_qi (a_i8, b_i8, i8_exp, N);
+ i8_ref = sdot_prod_qi_scalar (a_i8, b_i8, i8_ref, N);
+ if (i8_exp != i8_ref)
+ abort ();
+
+ init ();
+ i8_exp = udot_prod_qi (c_u8, d_u8, i8_exp, N);
+ i8_ref = udot_prod_qi_scalar (c_u8, d_u8, i8_ref, N);
+ if (i8_exp != i8_ref)
+ abort ();
+}