]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - arch/x86/crypto/glue_helper-asm-avx.S
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 157
[thirdparty/kernel/stable.git] / arch / x86 / crypto / glue_helper-asm-avx.S
CommitLineData
c942fddf 1/* SPDX-License-Identifier: GPL-2.0-or-later */
cba1cce0
JK
2/*
3 * Shared glue code for 128bit block ciphers, AVX assembler macros
4 *
a05248ed 5 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
cba1cce0
JK
6 */
7
8#define load_8way(src, x0, x1, x2, x3, x4, x5, x6, x7) \
9 vmovdqu (0*16)(src), x0; \
10 vmovdqu (1*16)(src), x1; \
11 vmovdqu (2*16)(src), x2; \
12 vmovdqu (3*16)(src), x3; \
13 vmovdqu (4*16)(src), x4; \
14 vmovdqu (5*16)(src), x5; \
15 vmovdqu (6*16)(src), x6; \
16 vmovdqu (7*16)(src), x7;
17
18#define store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \
19 vmovdqu x0, (0*16)(dst); \
20 vmovdqu x1, (1*16)(dst); \
21 vmovdqu x2, (2*16)(dst); \
22 vmovdqu x3, (3*16)(dst); \
23 vmovdqu x4, (4*16)(dst); \
24 vmovdqu x5, (5*16)(dst); \
25 vmovdqu x6, (6*16)(dst); \
26 vmovdqu x7, (7*16)(dst);
27
28#define store_cbc_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \
29 vpxor (0*16)(src), x1, x1; \
30 vpxor (1*16)(src), x2, x2; \
31 vpxor (2*16)(src), x3, x3; \
32 vpxor (3*16)(src), x4, x4; \
33 vpxor (4*16)(src), x5, x5; \
34 vpxor (5*16)(src), x6, x6; \
35 vpxor (6*16)(src), x7, x7; \
36 store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
37
38#define inc_le128(x, minus_one, tmp) \
39 vpcmpeqq minus_one, x, tmp; \
40 vpsubq minus_one, x, x; \
41 vpslldq $8, tmp, tmp; \
42 vpsubq tmp, x, x;
43
44#define load_ctr_8way(iv, bswap, x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2) \
45 vpcmpeqd t0, t0, t0; \
46 vpsrldq $8, t0, t0; /* low: -1, high: 0 */ \
47 vmovdqa bswap, t1; \
48 \
49 /* load IV and byteswap */ \
50 vmovdqu (iv), x7; \
51 vpshufb t1, x7, x0; \
52 \
53 /* construct IVs */ \
54 inc_le128(x7, t0, t2); \
55 vpshufb t1, x7, x1; \
56 inc_le128(x7, t0, t2); \
57 vpshufb t1, x7, x2; \
58 inc_le128(x7, t0, t2); \
59 vpshufb t1, x7, x3; \
60 inc_le128(x7, t0, t2); \
61 vpshufb t1, x7, x4; \
62 inc_le128(x7, t0, t2); \
63 vpshufb t1, x7, x5; \
64 inc_le128(x7, t0, t2); \
65 vpshufb t1, x7, x6; \
66 inc_le128(x7, t0, t2); \
67 vmovdqa x7, t2; \
68 vpshufb t1, x7, x7; \
69 inc_le128(t2, t0, t1); \
70 vmovdqu t2, (iv);
71
72#define store_ctr_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \
73 vpxor (0*16)(src), x0, x0; \
74 vpxor (1*16)(src), x1, x1; \
75 vpxor (2*16)(src), x2, x2; \
76 vpxor (3*16)(src), x3, x3; \
77 vpxor (4*16)(src), x4, x4; \
78 vpxor (5*16)(src), x5, x5; \
79 vpxor (6*16)(src), x6, x6; \
80 vpxor (7*16)(src), x7, x7; \
81 store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
a05248ed
JK
82
83#define gf128mul_x_ble(iv, mask, tmp) \
84 vpsrad $31, iv, tmp; \
85 vpaddq iv, iv, iv; \
86 vpshufd $0x13, tmp, tmp; \
87 vpand mask, tmp, tmp; \
88 vpxor tmp, iv, iv;
89
90#define load_xts_8way(iv, src, dst, x0, x1, x2, x3, x4, x5, x6, x7, tiv, t0, \
91 t1, xts_gf128mul_and_shl1_mask) \
92 vmovdqa xts_gf128mul_and_shl1_mask, t0; \
93 \
94 /* load IV */ \
95 vmovdqu (iv), tiv; \
96 vpxor (0*16)(src), tiv, x0; \
97 vmovdqu tiv, (0*16)(dst); \
98 \
99 /* construct and store IVs, also xor with source */ \
100 gf128mul_x_ble(tiv, t0, t1); \
101 vpxor (1*16)(src), tiv, x1; \
102 vmovdqu tiv, (1*16)(dst); \
103 \
104 gf128mul_x_ble(tiv, t0, t1); \
105 vpxor (2*16)(src), tiv, x2; \
106 vmovdqu tiv, (2*16)(dst); \
107 \
108 gf128mul_x_ble(tiv, t0, t1); \
109 vpxor (3*16)(src), tiv, x3; \
110 vmovdqu tiv, (3*16)(dst); \
111 \
112 gf128mul_x_ble(tiv, t0, t1); \
113 vpxor (4*16)(src), tiv, x4; \
114 vmovdqu tiv, (4*16)(dst); \
115 \
116 gf128mul_x_ble(tiv, t0, t1); \
117 vpxor (5*16)(src), tiv, x5; \
118 vmovdqu tiv, (5*16)(dst); \
119 \
120 gf128mul_x_ble(tiv, t0, t1); \
121 vpxor (6*16)(src), tiv, x6; \
122 vmovdqu tiv, (6*16)(dst); \
123 \
124 gf128mul_x_ble(tiv, t0, t1); \
125 vpxor (7*16)(src), tiv, x7; \
126 vmovdqu tiv, (7*16)(dst); \
127 \
128 gf128mul_x_ble(tiv, t0, t1); \
129 vmovdqu tiv, (iv);
130
131#define store_xts_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \
132 vpxor (0*16)(dst), x0, x0; \
133 vpxor (1*16)(dst), x1, x1; \
134 vpxor (2*16)(dst), x2, x2; \
135 vpxor (3*16)(dst), x3, x3; \
136 vpxor (4*16)(dst), x4, x4; \
137 vpxor (5*16)(dst), x5, x5; \
138 vpxor (6*16)(dst), x6, x6; \
139 vpxor (7*16)(dst), x7, x7; \
140 store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);