2 Copyright 1988-2022 Free Software Foundation, Inc.
3 This is part of the GCC manual.
4 For copying conditions, see the copyright.rst file.
6 .. _powerpc-matrix-multiply-assist-built-in-functions:
8 PowerPC Matrix-Multiply Assist Built-in Functions
9 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
11 ISA 3.1 of the PowerPC added new Matrix-Multiply Assist (MMA) instructions.
12 GCC provides support for these instructions through the following built-in
13 functions which are enabled with the ``-mmma`` option. The vec_t type
14 below is defined to be a normal vector unsigned char type. The uint2, uint4
15 and uint8 parameters are 2-bit, 4-bit and 8-bit unsigned integer constants
16 respectively. The compiler will verify that they are constants and that
17 their values are within range.
19 The built-in functions supported are:
23 void __builtin_mma_xvi4ger8 (__vector_quad *, vec_t, vec_t);
24 void __builtin_mma_xvi8ger4 (__vector_quad *, vec_t, vec_t);
25 void __builtin_mma_xvi16ger2 (__vector_quad *, vec_t, vec_t);
26 void __builtin_mma_xvi16ger2s (__vector_quad *, vec_t, vec_t);
27 void __builtin_mma_xvf16ger2 (__vector_quad *, vec_t, vec_t);
28 void __builtin_mma_xvbf16ger2 (__vector_quad *, vec_t, vec_t);
29 void __builtin_mma_xvf32ger (__vector_quad *, vec_t, vec_t);
31 void __builtin_mma_xvi4ger8pp (__vector_quad *, vec_t, vec_t);
32 void __builtin_mma_xvi8ger4pp (__vector_quad *, vec_t, vec_t);
33 void __builtin_mma_xvi8ger4spp(__vector_quad *, vec_t, vec_t);
34 void __builtin_mma_xvi16ger2pp (__vector_quad *, vec_t, vec_t);
35 void __builtin_mma_xvi16ger2spp (__vector_quad *, vec_t, vec_t);
36 void __builtin_mma_xvf16ger2pp (__vector_quad *, vec_t, vec_t);
37 void __builtin_mma_xvf16ger2pn (__vector_quad *, vec_t, vec_t);
38 void __builtin_mma_xvf16ger2np (__vector_quad *, vec_t, vec_t);
39 void __builtin_mma_xvf16ger2nn (__vector_quad *, vec_t, vec_t);
40 void __builtin_mma_xvbf16ger2pp (__vector_quad *, vec_t, vec_t);
41 void __builtin_mma_xvbf16ger2pn (__vector_quad *, vec_t, vec_t);
42 void __builtin_mma_xvbf16ger2np (__vector_quad *, vec_t, vec_t);
43 void __builtin_mma_xvbf16ger2nn (__vector_quad *, vec_t, vec_t);
44 void __builtin_mma_xvf32gerpp (__vector_quad *, vec_t, vec_t);
45 void __builtin_mma_xvf32gerpn (__vector_quad *, vec_t, vec_t);
46 void __builtin_mma_xvf32gernp (__vector_quad *, vec_t, vec_t);
47 void __builtin_mma_xvf32gernn (__vector_quad *, vec_t, vec_t);
49 void __builtin_mma_pmxvi4ger8 (__vector_quad *, vec_t, vec_t, uint4, uint4, uint8);
50 void __builtin_mma_pmxvi4ger8pp (__vector_quad *, vec_t, vec_t, uint4, uint4, uint8);
52 void __builtin_mma_pmxvi8ger4 (__vector_quad *, vec_t, vec_t, uint4, uint4, uint4);
53 void __builtin_mma_pmxvi8ger4pp (__vector_quad *, vec_t, vec_t, uint4, uint4, uint4);
54 void __builtin_mma_pmxvi8ger4spp(__vector_quad *, vec_t, vec_t, uint4, uint4, uint4);
56 void __builtin_mma_pmxvi16ger2 (__vector_quad *, vec_t, vec_t, uint4, uint4, uint2);
57 void __builtin_mma_pmxvi16ger2s (__vector_quad *, vec_t, vec_t, uint4, uint4, uint2);
58 void __builtin_mma_pmxvf16ger2 (__vector_quad *, vec_t, vec_t, uint4, uint4, uint2);
59 void __builtin_mma_pmxvbf16ger2 (__vector_quad *, vec_t, vec_t, uint4, uint4, uint2);
61 void __builtin_mma_pmxvi16ger2pp (__vector_quad *, vec_t, vec_t, uint4, uint4, uint2);
62 void __builtin_mma_pmxvi16ger2spp (__vector_quad *, vec_t, vec_t, uint4, uint4, uint2);
63 void __builtin_mma_pmxvf16ger2pp (__vector_quad *, vec_t, vec_t, uint4, uint4, uint2);
64 void __builtin_mma_pmxvf16ger2pn (__vector_quad *, vec_t, vec_t, uint4, uint4, uint2);
65 void __builtin_mma_pmxvf16ger2np (__vector_quad *, vec_t, vec_t, uint4, uint4, uint2);
66 void __builtin_mma_pmxvf16ger2nn (__vector_quad *, vec_t, vec_t, uint4, uint4, uint2);
67 void __builtin_mma_pmxvbf16ger2pp (__vector_quad *, vec_t, vec_t, uint4, uint4, uint2);
68 void __builtin_mma_pmxvbf16ger2pn (__vector_quad *, vec_t, vec_t, uint4, uint4, uint2);
69 void __builtin_mma_pmxvbf16ger2np (__vector_quad *, vec_t, vec_t, uint4, uint4, uint2);
70 void __builtin_mma_pmxvbf16ger2nn (__vector_quad *, vec_t, vec_t, uint4, uint4, uint2);
72 void __builtin_mma_pmxvf32ger (__vector_quad *, vec_t, vec_t, uint4, uint4);
73 void __builtin_mma_pmxvf32gerpp (__vector_quad *, vec_t, vec_t, uint4, uint4);
74 void __builtin_mma_pmxvf32gerpn (__vector_quad *, vec_t, vec_t, uint4, uint4);
75 void __builtin_mma_pmxvf32gernp (__vector_quad *, vec_t, vec_t, uint4, uint4);
76 void __builtin_mma_pmxvf32gernn (__vector_quad *, vec_t, vec_t, uint4, uint4);
78 void __builtin_mma_xvf64ger (__vector_quad *, __vector_pair, vec_t);
79 void __builtin_mma_xvf64gerpp (__vector_quad *, __vector_pair, vec_t);
80 void __builtin_mma_xvf64gerpn (__vector_quad *, __vector_pair, vec_t);
81 void __builtin_mma_xvf64gernp (__vector_quad *, __vector_pair, vec_t);
82 void __builtin_mma_xvf64gernn (__vector_quad *, __vector_pair, vec_t);
84 void __builtin_mma_pmxvf64ger (__vector_quad *, __vector_pair, vec_t, uint4, uint2);
85 void __builtin_mma_pmxvf64gerpp (__vector_quad *, __vector_pair, vec_t, uint4, uint2);
86 void __builtin_mma_pmxvf64gerpn (__vector_quad *, __vector_pair, vec_t, uint4, uint2);
87 void __builtin_mma_pmxvf64gernp (__vector_quad *, __vector_pair, vec_t, uint4, uint2);
88 void __builtin_mma_pmxvf64gernn (__vector_quad *, __vector_pair, vec_t, uint4, uint2);
90 void __builtin_mma_xxmtacc (__vector_quad *);
91 void __builtin_mma_xxmfacc (__vector_quad *);
92 void __builtin_mma_xxsetaccz (__vector_quad *);
94 void __builtin_mma_build_acc (__vector_quad *, vec_t, vec_t, vec_t, vec_t);
95 void __builtin_mma_disassemble_acc (void *, __vector_quad *);
97 void __builtin_vsx_build_pair (__vector_pair *, vec_t, vec_t);
98 void __builtin_vsx_disassemble_pair (void *, __vector_pair *);
100 vec_t __builtin_vsx_xvcvspbf16 (vec_t);
101 vec_t __builtin_vsx_xvcvbf16spn (vec_t);
103 __vector_pair __builtin_vsx_lxvp (size_t, __vector_pair *);
104 void __builtin_vsx_stxvp (__vector_pair, size_t, __vector_pair *);