]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - queue-4.19/s390-crypto-fix-gcm-aes-s390-selftest-failures.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / queue-4.19 / s390-crypto-fix-gcm-aes-s390-selftest-failures.patch
CommitLineData
c0a62f2b
GKH
1From bef9f0ba300a55d79a69aa172156072182176515 Mon Sep 17 00:00:00 2001
2From: Harald Freudenberger <freude@linux.ibm.com>
3Date: Thu, 23 May 2019 16:18:25 +0200
4Subject: s390/crypto: fix gcm-aes-s390 selftest failures
5
6From: Harald Freudenberger <freude@linux.ibm.com>
7
8commit bef9f0ba300a55d79a69aa172156072182176515 upstream.
9
10The current kernel uses improved crypto selftests. These
11tests showed that the current implementation of gcm-aes-s390
12is not able to deal with chunks of output buffers which are
13not a multiple of 16 bytes. This patch introduces a rework
14of the gcm aes s390 scatter walk handling which now is able
15to handle any input and output scatter list chunk sizes
16correctly.
17
18Code has been verified by the crypto selftests, the tcrypt
19kernel module and additional tests ran via the af_alg interface.
20
21Cc: <stable@vger.kernel.org>
22Reported-by: Julian Wiedmann <jwi@linux.ibm.com>
23Reviewed-by: Patrick Steuer <steuer@linux.ibm.com>
24Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
25Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
26Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
27
28---
29 arch/s390/crypto/aes_s390.c | 148 +++++++++++++++++++++++++++++++-------------
30 1 file changed, 107 insertions(+), 41 deletions(-)
31
32--- a/arch/s390/crypto/aes_s390.c
33+++ b/arch/s390/crypto/aes_s390.c
34@@ -826,19 +826,45 @@ static int gcm_aes_setauthsize(struct cr
35 return 0;
36 }
37
38-static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
39- unsigned int len)
40+static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
41+ unsigned int len)
42 {
43 memset(gw, 0, sizeof(*gw));
44 gw->walk_bytes_remain = len;
45 scatterwalk_start(&gw->walk, sg);
46 }
47
48-static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
49+static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
50+{
51+ struct scatterlist *nextsg;
52+
53+ gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
54+ while (!gw->walk_bytes) {
55+ nextsg = sg_next(gw->walk.sg);
56+ if (!nextsg)
57+ return 0;
58+ scatterwalk_start(&gw->walk, nextsg);
59+ gw->walk_bytes = scatterwalk_clamp(&gw->walk,
60+ gw->walk_bytes_remain);
61+ }
62+ gw->walk_ptr = scatterwalk_map(&gw->walk);
63+ return gw->walk_bytes;
64+}
65+
66+static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
67+ unsigned int nbytes)
68+{
69+ gw->walk_bytes_remain -= nbytes;
70+ scatterwalk_unmap(&gw->walk);
71+ scatterwalk_advance(&gw->walk, nbytes);
72+ scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
73+ gw->walk_ptr = NULL;
74+}
75+
76+static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
77 {
78 int n;
79
80- /* minbytesneeded <= AES_BLOCK_SIZE */
81 if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
82 gw->ptr = gw->buf;
83 gw->nbytes = gw->buf_bytes;
84@@ -851,13 +877,11 @@ static int gcm_sg_walk_go(struct gcm_sg_
85 goto out;
86 }
87
88- gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
89- if (!gw->walk_bytes) {
90- scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
91- gw->walk_bytes = scatterwalk_clamp(&gw->walk,
92- gw->walk_bytes_remain);
93+ if (!_gcm_sg_clamp_and_map(gw)) {
94+ gw->ptr = NULL;
95+ gw->nbytes = 0;
96+ goto out;
97 }
98- gw->walk_ptr = scatterwalk_map(&gw->walk);
99
100 if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
101 gw->ptr = gw->walk_ptr;
102@@ -869,51 +893,90 @@ static int gcm_sg_walk_go(struct gcm_sg_
103 n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
104 memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
105 gw->buf_bytes += n;
106- gw->walk_bytes_remain -= n;
107- scatterwalk_unmap(&gw->walk);
108- scatterwalk_advance(&gw->walk, n);
109- scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
110-
111+ _gcm_sg_unmap_and_advance(gw, n);
112 if (gw->buf_bytes >= minbytesneeded) {
113 gw->ptr = gw->buf;
114 gw->nbytes = gw->buf_bytes;
115 goto out;
116 }
117-
118- gw->walk_bytes = scatterwalk_clamp(&gw->walk,
119- gw->walk_bytes_remain);
120- if (!gw->walk_bytes) {
121- scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
122- gw->walk_bytes = scatterwalk_clamp(&gw->walk,
123- gw->walk_bytes_remain);
124+ if (!_gcm_sg_clamp_and_map(gw)) {
125+ gw->ptr = NULL;
126+ gw->nbytes = 0;
127+ goto out;
128 }
129- gw->walk_ptr = scatterwalk_map(&gw->walk);
130 }
131
132 out:
133 return gw->nbytes;
134 }
135
136-static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
137+static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
138 {
139- int n;
140+ if (gw->walk_bytes_remain == 0) {
141+ gw->ptr = NULL;
142+ gw->nbytes = 0;
143+ goto out;
144+ }
145+
146+ if (!_gcm_sg_clamp_and_map(gw)) {
147+ gw->ptr = NULL;
148+ gw->nbytes = 0;
149+ goto out;
150+ }
151
152+ if (gw->walk_bytes >= minbytesneeded) {
153+ gw->ptr = gw->walk_ptr;
154+ gw->nbytes = gw->walk_bytes;
155+ goto out;
156+ }
157+
158+ scatterwalk_unmap(&gw->walk);
159+ gw->walk_ptr = NULL;
160+
161+ gw->ptr = gw->buf;
162+ gw->nbytes = sizeof(gw->buf);
163+
164+out:
165+ return gw->nbytes;
166+}
167+
168+static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
169+{
170 if (gw->ptr == NULL)
171- return;
172+ return 0;
173
174 if (gw->ptr == gw->buf) {
175- n = gw->buf_bytes - bytesdone;
176+ int n = gw->buf_bytes - bytesdone;
177 if (n > 0) {
178 memmove(gw->buf, gw->buf + bytesdone, n);
179- gw->buf_bytes -= n;
180+ gw->buf_bytes = n;
181 } else
182 gw->buf_bytes = 0;
183- } else {
184- gw->walk_bytes_remain -= bytesdone;
185- scatterwalk_unmap(&gw->walk);
186- scatterwalk_advance(&gw->walk, bytesdone);
187- scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
188- }
189+ } else
190+ _gcm_sg_unmap_and_advance(gw, bytesdone);
191+
192+ return bytesdone;
193+}
194+
195+static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
196+{
197+ int i, n;
198+
199+ if (gw->ptr == NULL)
200+ return 0;
201+
202+ if (gw->ptr == gw->buf) {
203+ for (i = 0; i < bytesdone; i += n) {
204+ if (!_gcm_sg_clamp_and_map(gw))
205+ return i;
206+ n = min(gw->walk_bytes, bytesdone - i);
207+ memcpy(gw->walk_ptr, gw->buf + i, n);
208+ _gcm_sg_unmap_and_advance(gw, n);
209+ }
210+ } else
211+ _gcm_sg_unmap_and_advance(gw, bytesdone);
212+
213+ return bytesdone;
214 }
215
216 static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
217@@ -926,7 +989,7 @@ static int gcm_aes_crypt(struct aead_req
218 unsigned int pclen = req->cryptlen;
219 int ret = 0;
220
221- unsigned int len, in_bytes, out_bytes,
222+ unsigned int n, len, in_bytes, out_bytes,
223 min_bytes, bytes, aad_bytes, pc_bytes;
224 struct gcm_sg_walk gw_in, gw_out;
225 u8 tag[GHASH_DIGEST_SIZE];
226@@ -963,14 +1026,14 @@ static int gcm_aes_crypt(struct aead_req
227 *(u32 *)(param.j0 + ivsize) = 1;
228 memcpy(param.k, ctx->key, ctx->key_len);
229
230- gcm_sg_walk_start(&gw_in, req->src, len);
231- gcm_sg_walk_start(&gw_out, req->dst, len);
232+ gcm_walk_start(&gw_in, req->src, len);
233+ gcm_walk_start(&gw_out, req->dst, len);
234
235 do {
236 min_bytes = min_t(unsigned int,
237 aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
238- in_bytes = gcm_sg_walk_go(&gw_in, min_bytes);
239- out_bytes = gcm_sg_walk_go(&gw_out, min_bytes);
240+ in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
241+ out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
242 bytes = min(in_bytes, out_bytes);
243
244 if (aadlen + pclen <= bytes) {
245@@ -997,8 +1060,11 @@ static int gcm_aes_crypt(struct aead_req
246 gw_in.ptr + aad_bytes, pc_bytes,
247 gw_in.ptr, aad_bytes);
248
249- gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes);
250- gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes);
251+ n = aad_bytes + pc_bytes;
252+ if (gcm_in_walk_done(&gw_in, n) != n)
253+ return -ENOMEM;
254+ if (gcm_out_walk_done(&gw_out, n) != n)
255+ return -ENOMEM;
256 aadlen -= aad_bytes;
257 pclen -= pc_bytes;
258 } while (aadlen + pclen > 0);