]>
Commit | Line | Data |
---|---|---|
128027b5 GKH |
1 | From ba6771c0a0bc2fac9d6a8759bab8493bd1cffe3b Mon Sep 17 00:00:00 2001 |
2 | From: Eric Biggers <ebiggers@google.com> | |
3 | Date: Thu, 31 Jan 2019 23:51:38 -0800 | |
4 | Subject: crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP | |
5 | ||
6 | From: Eric Biggers <ebiggers@google.com> | |
7 | ||
8 | commit ba6771c0a0bc2fac9d6a8759bab8493bd1cffe3b upstream. | |
9 | ||
10 | The x86 AEGIS implementations all fail the improved AEAD tests because | |
11 | they produce the wrong result with some data layouts. The issue is that | |
12 | they assume that if the skcipher_walk API gives 'nbytes' not aligned to | |
13 | the walksize (a.k.a. walk.stride), then it is the end of the data. In | |
14 | fact, this can happen before the end. | |
15 | ||
16 | Also, when the CRYPTO_TFM_REQ_MAY_SLEEP flag is given, they can | |
17 | incorrectly sleep in the skcipher_walk_*() functions while preemption | |
18 | has been disabled by kernel_fpu_begin(). | |
19 | ||
20 | Fix these bugs. | |
21 | ||
22 | Fixes: 1d373d4e8e15 ("crypto: x86 - Add optimized AEGIS implementations") | |
23 | Cc: <stable@vger.kernel.org> # v4.18+ | |
24 | Cc: Ondrej Mosnacek <omosnace@redhat.com> | |
25 | Signed-off-by: Eric Biggers <ebiggers@google.com> | |
26 | Reviewed-by: Ondrej Mosnacek <omosnace@redhat.com> | |
27 | Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> | |
28 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | |
29 | ||
30 | --- | |
31 | arch/x86/crypto/aegis128-aesni-glue.c | 38 +++++++++++++-------------------- | |
32 | arch/x86/crypto/aegis128l-aesni-glue.c | 38 +++++++++++++-------------------- | |
33 | arch/x86/crypto/aegis256-aesni-glue.c | 38 +++++++++++++-------------------- | |
34 | 3 files changed, 45 insertions(+), 69 deletions(-) | |
35 | ||
36 | --- a/arch/x86/crypto/aegis128-aesni-glue.c | |
37 | +++ b/arch/x86/crypto/aegis128-aesni-glue.c | |
38 | @@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_proces | |
39 | } | |
40 | ||
41 | static void crypto_aegis128_aesni_process_crypt( | |
42 | - struct aegis_state *state, struct aead_request *req, | |
43 | + struct aegis_state *state, struct skcipher_walk *walk, | |
44 | const struct aegis_crypt_ops *ops) | |
45 | { | |
46 | - struct skcipher_walk walk; | |
47 | - u8 *src, *dst; | |
48 | - unsigned int chunksize, base; | |
49 | - | |
50 | - ops->skcipher_walk_init(&walk, req, false); | |
51 | - | |
52 | - while (walk.nbytes) { | |
53 | - src = walk.src.virt.addr; | |
54 | - dst = walk.dst.virt.addr; | |
55 | - chunksize = walk.nbytes; | |
56 | - | |
57 | - ops->crypt_blocks(state, chunksize, src, dst); | |
58 | - | |
59 | - base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1); | |
60 | - src += base; | |
61 | - dst += base; | |
62 | - chunksize &= AEGIS128_BLOCK_SIZE - 1; | |
63 | - | |
64 | - if (chunksize > 0) | |
65 | - ops->crypt_tail(state, chunksize, src, dst); | |
66 | + while (walk->nbytes >= AEGIS128_BLOCK_SIZE) { | |
67 | + ops->crypt_blocks(state, | |
68 | + round_down(walk->nbytes, AEGIS128_BLOCK_SIZE), | |
69 | + walk->src.virt.addr, walk->dst.virt.addr); | |
70 | + skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE); | |
71 | + } | |
72 | ||
73 | - skcipher_walk_done(&walk, 0); | |
74 | + if (walk->nbytes) { | |
75 | + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr, | |
76 | + walk->dst.virt.addr); | |
77 | + skcipher_walk_done(walk, 0); | |
78 | } | |
79 | } | |
80 | ||
81 | @@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt( | |
82 | { | |
83 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
84 | struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm); | |
85 | + struct skcipher_walk walk; | |
86 | struct aegis_state state; | |
87 | ||
88 | + ops->skcipher_walk_init(&walk, req, true); | |
89 | + | |
90 | kernel_fpu_begin(); | |
91 | ||
92 | crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv); | |
93 | crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen); | |
94 | - crypto_aegis128_aesni_process_crypt(&state, req, ops); | |
95 | + crypto_aegis128_aesni_process_crypt(&state, &walk, ops); | |
96 | crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen); | |
97 | ||
98 | kernel_fpu_end(); | |
99 | --- a/arch/x86/crypto/aegis128l-aesni-glue.c | |
100 | +++ b/arch/x86/crypto/aegis128l-aesni-glue.c | |
101 | @@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_proce | |
102 | } | |
103 | ||
104 | static void crypto_aegis128l_aesni_process_crypt( | |
105 | - struct aegis_state *state, struct aead_request *req, | |
106 | + struct aegis_state *state, struct skcipher_walk *walk, | |
107 | const struct aegis_crypt_ops *ops) | |
108 | { | |
109 | - struct skcipher_walk walk; | |
110 | - u8 *src, *dst; | |
111 | - unsigned int chunksize, base; | |
112 | - | |
113 | - ops->skcipher_walk_init(&walk, req, false); | |
114 | - | |
115 | - while (walk.nbytes) { | |
116 | - src = walk.src.virt.addr; | |
117 | - dst = walk.dst.virt.addr; | |
118 | - chunksize = walk.nbytes; | |
119 | - | |
120 | - ops->crypt_blocks(state, chunksize, src, dst); | |
121 | - | |
122 | - base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1); | |
123 | - src += base; | |
124 | - dst += base; | |
125 | - chunksize &= AEGIS128L_BLOCK_SIZE - 1; | |
126 | - | |
127 | - if (chunksize > 0) | |
128 | - ops->crypt_tail(state, chunksize, src, dst); | |
129 | + while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) { | |
130 | + ops->crypt_blocks(state, round_down(walk->nbytes, | |
131 | + AEGIS128L_BLOCK_SIZE), | |
132 | + walk->src.virt.addr, walk->dst.virt.addr); | |
133 | + skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE); | |
134 | + } | |
135 | ||
136 | - skcipher_walk_done(&walk, 0); | |
137 | + if (walk->nbytes) { | |
138 | + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr, | |
139 | + walk->dst.virt.addr); | |
140 | + skcipher_walk_done(walk, 0); | |
141 | } | |
142 | } | |
143 | ||
144 | @@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt | |
145 | { | |
146 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
147 | struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm); | |
148 | + struct skcipher_walk walk; | |
149 | struct aegis_state state; | |
150 | ||
151 | + ops->skcipher_walk_init(&walk, req, true); | |
152 | + | |
153 | kernel_fpu_begin(); | |
154 | ||
155 | crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv); | |
156 | crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen); | |
157 | - crypto_aegis128l_aesni_process_crypt(&state, req, ops); | |
158 | + crypto_aegis128l_aesni_process_crypt(&state, &walk, ops); | |
159 | crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen); | |
160 | ||
161 | kernel_fpu_end(); | |
162 | --- a/arch/x86/crypto/aegis256-aesni-glue.c | |
163 | +++ b/arch/x86/crypto/aegis256-aesni-glue.c | |
164 | @@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_proces | |
165 | } | |
166 | ||
167 | static void crypto_aegis256_aesni_process_crypt( | |
168 | - struct aegis_state *state, struct aead_request *req, | |
169 | + struct aegis_state *state, struct skcipher_walk *walk, | |
170 | const struct aegis_crypt_ops *ops) | |
171 | { | |
172 | - struct skcipher_walk walk; | |
173 | - u8 *src, *dst; | |
174 | - unsigned int chunksize, base; | |
175 | - | |
176 | - ops->skcipher_walk_init(&walk, req, false); | |
177 | - | |
178 | - while (walk.nbytes) { | |
179 | - src = walk.src.virt.addr; | |
180 | - dst = walk.dst.virt.addr; | |
181 | - chunksize = walk.nbytes; | |
182 | - | |
183 | - ops->crypt_blocks(state, chunksize, src, dst); | |
184 | - | |
185 | - base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1); | |
186 | - src += base; | |
187 | - dst += base; | |
188 | - chunksize &= AEGIS256_BLOCK_SIZE - 1; | |
189 | - | |
190 | - if (chunksize > 0) | |
191 | - ops->crypt_tail(state, chunksize, src, dst); | |
192 | + while (walk->nbytes >= AEGIS256_BLOCK_SIZE) { | |
193 | + ops->crypt_blocks(state, | |
194 | + round_down(walk->nbytes, AEGIS256_BLOCK_SIZE), | |
195 | + walk->src.virt.addr, walk->dst.virt.addr); | |
196 | + skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE); | |
197 | + } | |
198 | ||
199 | - skcipher_walk_done(&walk, 0); | |
200 | + if (walk->nbytes) { | |
201 | + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr, | |
202 | + walk->dst.virt.addr); | |
203 | + skcipher_walk_done(walk, 0); | |
204 | } | |
205 | } | |
206 | ||
207 | @@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt( | |
208 | { | |
209 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
210 | struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm); | |
211 | + struct skcipher_walk walk; | |
212 | struct aegis_state state; | |
213 | ||
214 | + ops->skcipher_walk_init(&walk, req, true); | |
215 | + | |
216 | kernel_fpu_begin(); | |
217 | ||
218 | crypto_aegis256_aesni_init(&state, ctx->key, req->iv); | |
219 | crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen); | |
220 | - crypto_aegis256_aesni_process_crypt(&state, req, ops); | |
221 | + crypto_aegis256_aesni_process_crypt(&state, &walk, ops); | |
222 | crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen); | |
223 | ||
224 | kernel_fpu_end(); |