]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - crypto/algif_skcipher.c
crypto: caam - Remove unused dentry members
[thirdparty/kernel/stable.git] / crypto / algif_skcipher.c
CommitLineData
8ff59090
HX
1/*
2 * algif_skcipher: User-space interface for skcipher algorithms
3 *
4 * This file provides the user-space API for symmetric key ciphers.
5 *
6 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
e870456d
SM
13 * The following concept of the memory management is used:
14 *
15 * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
16 * filled by user space with the data submitted via sendpage/sendmsg. Filling
17 * up the TX SGL does not cause a crypto operation -- the data will only be
18 * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
19 * provide a buffer which is tracked with the RX SGL.
20 *
21 * During the processing of the recvmsg operation, the cipher request is
22 * allocated and prepared. As part of the recvmsg operation, the processed
23 * TX buffers are extracted from the TX SGL into a separate SGL.
24 *
25 * After the completion of the crypto operation, the RX SGL and the cipher
26 * request is released. The extracted TX SGL parts are released together with
27 * the RX SGL release.
8ff59090
HX
28 */
29
30#include <crypto/scatterwalk.h>
31#include <crypto/skcipher.h>
32#include <crypto/if_alg.h>
33#include <linux/init.h>
34#include <linux/list.h>
35#include <linux/kernel.h>
174cd4b1 36#include <linux/sched/signal.h>
8ff59090
HX
37#include <linux/mm.h>
38#include <linux/module.h>
39#include <linux/net.h>
40#include <net/sock.h>
41
e870456d 42struct skcipher_tsgl {
8ff59090 43 struct list_head list;
8ff59090 44 int cur;
8ff59090
HX
45 struct scatterlist sg[0];
46};
47
e870456d
SM
48struct skcipher_rsgl {
49 struct af_alg_sgl sgl;
50 struct list_head list;
51 size_t sg_num_bytes;
52};
53
54struct skcipher_async_req {
55 struct kiocb *iocb;
56 struct sock *sk;
57
58 struct skcipher_rsgl first_sgl;
59 struct list_head rsgl_list;
60
61 struct scatterlist *tsgl;
62 unsigned int tsgl_entries;
63
64 unsigned int areqlen;
65 struct skcipher_request req;
66};
67
dd504589
HX
68struct skcipher_tfm {
69 struct crypto_skcipher *skcipher;
70 bool has_key;
71};
72
8ff59090 73struct skcipher_ctx {
e870456d 74 struct list_head tsgl_list;
8ff59090
HX
75
76 void *iv;
77
78 struct af_alg_completion completion;
79
652d5b8a 80 size_t used;
e870456d 81 size_t rcvused;
8ff59090 82
8ff59090
HX
83 bool more;
84 bool merge;
85 bool enc;
86
e870456d 87 unsigned int len;
a596999b
TS
88};
89
e870456d 90#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_tsgl)) / \
8ff59090
HX
91 sizeof(struct scatterlist) - 1)
92
e870456d 93static inline int skcipher_sndbuf(struct sock *sk)
a596999b 94{
e870456d
SM
95 struct alg_sock *ask = alg_sk(sk);
96 struct skcipher_ctx *ctx = ask->private;
a596999b 97
e870456d
SM
98 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
99 ctx->used, 0);
a596999b
TS
100}
101
e870456d 102static inline bool skcipher_writable(struct sock *sk)
a596999b 103{
e870456d 104 return PAGE_SIZE <= skcipher_sndbuf(sk);
a596999b
TS
105}
106
e870456d 107static inline int skcipher_rcvbuf(struct sock *sk)
8ff59090
HX
108{
109 struct alg_sock *ask = alg_sk(sk);
110 struct skcipher_ctx *ctx = ask->private;
111
e870456d
SM
112 return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
113 ctx->rcvused, 0);
0f6bb83c
HX
114}
115
e870456d 116static inline bool skcipher_readable(struct sock *sk)
0f6bb83c 117{
e870456d 118 return PAGE_SIZE <= skcipher_rcvbuf(sk);
8ff59090
HX
119}
120
e870456d 121static int skcipher_alloc_tsgl(struct sock *sk)
8ff59090
HX
122{
123 struct alg_sock *ask = alg_sk(sk);
124 struct skcipher_ctx *ctx = ask->private;
e870456d 125 struct skcipher_tsgl *sgl;
8ff59090
HX
126 struct scatterlist *sg = NULL;
127
e870456d
SM
128 sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl, list);
129 if (!list_empty(&ctx->tsgl_list))
8ff59090
HX
130 sg = sgl->sg;
131
132 if (!sg || sgl->cur >= MAX_SGL_ENTS) {
133 sgl = sock_kmalloc(sk, sizeof(*sgl) +
134 sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
135 GFP_KERNEL);
136 if (!sgl)
137 return -ENOMEM;
138
139 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
140 sgl->cur = 0;
141
142 if (sg)
c56f6d12 143 sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
8ff59090 144
e870456d 145 list_add_tail(&sgl->list, &ctx->tsgl_list);
8ff59090
HX
146 }
147
148 return 0;
149}
150
e870456d
SM
151static unsigned int skcipher_count_tsgl(struct sock *sk, size_t bytes)
152{
153 struct alg_sock *ask = alg_sk(sk);
154 struct skcipher_ctx *ctx = ask->private;
155 struct skcipher_tsgl *sgl, *tmp;
156 unsigned int i;
157 unsigned int sgl_count = 0;
158
159 if (!bytes)
160 return 0;
161
162 list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) {
163 struct scatterlist *sg = sgl->sg;
164
165 for (i = 0; i < sgl->cur; i++) {
166 sgl_count++;
167 if (sg[i].length >= bytes)
168 return sgl_count;
169
170 bytes -= sg[i].length;
171 }
172 }
173
174 return sgl_count;
175}
176
177static void skcipher_pull_tsgl(struct sock *sk, size_t used,
178 struct scatterlist *dst)
8ff59090
HX
179{
180 struct alg_sock *ask = alg_sk(sk);
181 struct skcipher_ctx *ctx = ask->private;
e870456d 182 struct skcipher_tsgl *sgl;
8ff59090 183 struct scatterlist *sg;
e870456d 184 unsigned int i;
8ff59090 185
e870456d
SM
186 while (!list_empty(&ctx->tsgl_list)) {
187 sgl = list_first_entry(&ctx->tsgl_list, struct skcipher_tsgl,
8ff59090
HX
188 list);
189 sg = sgl->sg;
190
191 for (i = 0; i < sgl->cur; i++) {
652d5b8a 192 size_t plen = min_t(size_t, used, sg[i].length);
e870456d 193 struct page *page = sg_page(sg + i);
8ff59090 194
e870456d 195 if (!page)
8ff59090
HX
196 continue;
197
e870456d
SM
198 /*
199 * Assumption: caller created skcipher_count_tsgl(len)
200 * SG entries in dst.
201 */
202 if (dst)
203 sg_set_page(dst + i, page, plen, sg[i].offset);
204
8ff59090
HX
205 sg[i].length -= plen;
206 sg[i].offset += plen;
207
208 used -= plen;
209 ctx->used -= plen;
210
211 if (sg[i].length)
212 return;
e870456d
SM
213
214 if (!dst)
215 put_page(page);
8ff59090
HX
216 sg_assign_page(sg + i, NULL);
217 }
218
219 list_del(&sgl->list);
e870456d
SM
220 sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) *
221 (MAX_SGL_ENTS + 1));
8ff59090
HX
222 }
223
224 if (!ctx->used)
225 ctx->merge = 0;
226}
227
e870456d 228static void skcipher_free_areq_sgls(struct skcipher_async_req *areq)
8ff59090 229{
e870456d 230 struct sock *sk = areq->sk;
8ff59090
HX
231 struct alg_sock *ask = alg_sk(sk);
232 struct skcipher_ctx *ctx = ask->private;
e870456d
SM
233 struct skcipher_rsgl *rsgl, *tmp;
234 struct scatterlist *tsgl;
235 struct scatterlist *sg;
236 unsigned int i;
237
238 list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
239 ctx->rcvused -= rsgl->sg_num_bytes;
240 af_alg_free_sg(&rsgl->sgl);
241 list_del(&rsgl->list);
242 if (rsgl != &areq->first_sgl)
243 sock_kfree_s(sk, rsgl, sizeof(*rsgl));
244 }
245
246 tsgl = areq->tsgl;
247 for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
248 if (!sg_page(sg))
249 continue;
250 put_page(sg_page(sg));
251 }
8ff59090 252
e870456d
SM
253 if (areq->tsgl && areq->tsgl_entries)
254 sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
8ff59090
HX
255}
256
257static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
258{
d9dc8b0f 259 DEFINE_WAIT_FUNC(wait, woken_wake_function);
8ff59090 260 int err = -ERESTARTSYS;
d9dc8b0f 261 long timeout;
8ff59090
HX
262
263 if (flags & MSG_DONTWAIT)
264 return -EAGAIN;
265
9cd3e072 266 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
8ff59090 267
d9dc8b0f 268 add_wait_queue(sk_sleep(sk), &wait);
8ff59090
HX
269 for (;;) {
270 if (signal_pending(current))
271 break;
8ff59090 272 timeout = MAX_SCHEDULE_TIMEOUT;
d9dc8b0f 273 if (sk_wait_event(sk, &timeout, skcipher_writable(sk), &wait)) {
8ff59090
HX
274 err = 0;
275 break;
276 }
277 }
d9dc8b0f 278 remove_wait_queue(sk_sleep(sk), &wait);
8ff59090
HX
279
280 return err;
281}
282
283static void skcipher_wmem_wakeup(struct sock *sk)
284{
285 struct socket_wq *wq;
286
287 if (!skcipher_writable(sk))
288 return;
289
290 rcu_read_lock();
291 wq = rcu_dereference(sk->sk_wq);
1ce0bf50 292 if (skwq_has_sleeper(wq))
8ff59090
HX
293 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
294 POLLRDNORM |
295 POLLRDBAND);
296 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
297 rcu_read_unlock();
298}
299
300static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
301{
d9dc8b0f 302 DEFINE_WAIT_FUNC(wait, woken_wake_function);
8ff59090
HX
303 struct alg_sock *ask = alg_sk(sk);
304 struct skcipher_ctx *ctx = ask->private;
305 long timeout;
8ff59090
HX
306 int err = -ERESTARTSYS;
307
308 if (flags & MSG_DONTWAIT) {
309 return -EAGAIN;
310 }
311
9cd3e072 312 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
8ff59090 313
d9dc8b0f 314 add_wait_queue(sk_sleep(sk), &wait);
8ff59090
HX
315 for (;;) {
316 if (signal_pending(current))
317 break;
8ff59090 318 timeout = MAX_SCHEDULE_TIMEOUT;
d9dc8b0f 319 if (sk_wait_event(sk, &timeout, ctx->used, &wait)) {
8ff59090
HX
320 err = 0;
321 break;
322 }
323 }
d9dc8b0f 324 remove_wait_queue(sk_sleep(sk), &wait);
8ff59090 325
9cd3e072 326 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
8ff59090
HX
327
328 return err;
329}
330
331static void skcipher_data_wakeup(struct sock *sk)
332{
333 struct alg_sock *ask = alg_sk(sk);
334 struct skcipher_ctx *ctx = ask->private;
335 struct socket_wq *wq;
336
337 if (!ctx->used)
338 return;
339
340 rcu_read_lock();
341 wq = rcu_dereference(sk->sk_wq);
1ce0bf50 342 if (skwq_has_sleeper(wq))
8ff59090
HX
343 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
344 POLLRDNORM |
345 POLLRDBAND);
346 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
347 rcu_read_unlock();
348}
349
1b784140
YX
350static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
351 size_t size)
8ff59090
HX
352{
353 struct sock *sk = sock->sk;
354 struct alg_sock *ask = alg_sk(sk);
6454c2b8
HX
355 struct sock *psk = ask->parent;
356 struct alg_sock *pask = alg_sk(psk);
8ff59090 357 struct skcipher_ctx *ctx = ask->private;
6454c2b8
HX
358 struct skcipher_tfm *skc = pask->private;
359 struct crypto_skcipher *tfm = skc->skcipher;
0d96e4ba 360 unsigned ivsize = crypto_skcipher_ivsize(tfm);
e870456d 361 struct skcipher_tsgl *sgl;
8ff59090
HX
362 struct af_alg_control con = {};
363 long copied = 0;
364 bool enc = 0;
f26b7b80 365 bool init = 0;
8ff59090
HX
366 int err;
367 int i;
368
369 if (msg->msg_controllen) {
370 err = af_alg_cmsg_send(msg, &con);
371 if (err)
372 return err;
373
f26b7b80 374 init = 1;
8ff59090
HX
375 switch (con.op) {
376 case ALG_OP_ENCRYPT:
377 enc = 1;
378 break;
379 case ALG_OP_DECRYPT:
380 enc = 0;
381 break;
382 default:
383 return -EINVAL;
384 }
385
386 if (con.iv && con.iv->ivlen != ivsize)
387 return -EINVAL;
388 }
389
390 err = -EINVAL;
391
392 lock_sock(sk);
393 if (!ctx->more && ctx->used)
394 goto unlock;
395
f26b7b80 396 if (init) {
8ff59090
HX
397 ctx->enc = enc;
398 if (con.iv)
399 memcpy(ctx->iv, con.iv->iv, ivsize);
400 }
401
8ff59090
HX
402 while (size) {
403 struct scatterlist *sg;
404 unsigned long len = size;
652d5b8a 405 size_t plen;
8ff59090
HX
406
407 if (ctx->merge) {
e870456d
SM
408 sgl = list_entry(ctx->tsgl_list.prev,
409 struct skcipher_tsgl, list);
8ff59090
HX
410 sg = sgl->sg + sgl->cur - 1;
411 len = min_t(unsigned long, len,
412 PAGE_SIZE - sg->offset - sg->length);
413
6ce8e9ce
AV
414 err = memcpy_from_msg(page_address(sg_page(sg)) +
415 sg->offset + sg->length,
416 msg, len);
8ff59090
HX
417 if (err)
418 goto unlock;
419
420 sg->length += len;
421 ctx->merge = (sg->offset + sg->length) &
422 (PAGE_SIZE - 1);
423
424 ctx->used += len;
425 copied += len;
426 size -= len;
8ff59090
HX
427 continue;
428 }
429
0f6bb83c 430 if (!skcipher_writable(sk)) {
8ff59090
HX
431 err = skcipher_wait_for_wmem(sk, msg->msg_flags);
432 if (err)
433 goto unlock;
8ff59090
HX
434 }
435
0f6bb83c 436 len = min_t(unsigned long, len, skcipher_sndbuf(sk));
8ff59090 437
e870456d 438 err = skcipher_alloc_tsgl(sk);
8ff59090
HX
439 if (err)
440 goto unlock;
441
e870456d
SM
442 sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl,
443 list);
8ff59090 444 sg = sgl->sg;
202736d9
HX
445 if (sgl->cur)
446 sg_unmark_end(sg + sgl->cur - 1);
8ff59090
HX
447 do {
448 i = sgl->cur;
652d5b8a 449 plen = min_t(size_t, len, PAGE_SIZE);
8ff59090
HX
450
451 sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
452 err = -ENOMEM;
453 if (!sg_page(sg + i))
454 goto unlock;
455
6ce8e9ce
AV
456 err = memcpy_from_msg(page_address(sg_page(sg + i)),
457 msg, plen);
8ff59090
HX
458 if (err) {
459 __free_page(sg_page(sg + i));
460 sg_assign_page(sg + i, NULL);
461 goto unlock;
462 }
463
464 sg[i].length = plen;
465 len -= plen;
466 ctx->used += plen;
467 copied += plen;
468 size -= plen;
8ff59090
HX
469 sgl->cur++;
470 } while (len && sgl->cur < MAX_SGL_ENTS);
471
0f477b65
TS
472 if (!size)
473 sg_mark_end(sg + sgl->cur - 1);
474
8ff59090
HX
475 ctx->merge = plen & (PAGE_SIZE - 1);
476 }
477
478 err = 0;
479
480 ctx->more = msg->msg_flags & MSG_MORE;
8ff59090
HX
481
482unlock:
483 skcipher_data_wakeup(sk);
484 release_sock(sk);
485
486 return copied ?: err;
487}
488
489static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
490 int offset, size_t size, int flags)
491{
492 struct sock *sk = sock->sk;
493 struct alg_sock *ask = alg_sk(sk);
494 struct skcipher_ctx *ctx = ask->private;
e870456d 495 struct skcipher_tsgl *sgl;
8ff59090 496 int err = -EINVAL;
8ff59090 497
d3f7d56a
SL
498 if (flags & MSG_SENDPAGE_NOTLAST)
499 flags |= MSG_MORE;
500
8ff59090
HX
501 lock_sock(sk);
502 if (!ctx->more && ctx->used)
503 goto unlock;
504
505 if (!size)
506 goto done;
507
0f6bb83c 508 if (!skcipher_writable(sk)) {
8ff59090
HX
509 err = skcipher_wait_for_wmem(sk, flags);
510 if (err)
511 goto unlock;
8ff59090
HX
512 }
513
e870456d 514 err = skcipher_alloc_tsgl(sk);
8ff59090
HX
515 if (err)
516 goto unlock;
517
518 ctx->merge = 0;
e870456d 519 sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl, list);
8ff59090 520
0f477b65
TS
521 if (sgl->cur)
522 sg_unmark_end(sgl->sg + sgl->cur - 1);
523
524 sg_mark_end(sgl->sg + sgl->cur);
8ff59090
HX
525 get_page(page);
526 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
527 sgl->cur++;
528 ctx->used += size;
529
530done:
531 ctx->more = flags & MSG_MORE;
8ff59090
HX
532
533unlock:
534 skcipher_data_wakeup(sk);
535 release_sock(sk);
536
537 return err ?: size;
538}
539
e870456d 540static void skcipher_async_cb(struct crypto_async_request *req, int err)
a596999b 541{
e870456d
SM
542 struct skcipher_async_req *areq = req->data;
543 struct sock *sk = areq->sk;
544 struct kiocb *iocb = areq->iocb;
545 unsigned int resultlen;
a596999b 546
e870456d 547 lock_sock(sk);
a596999b 548
e870456d
SM
549 /* Buffer size written by crypto operation. */
550 resultlen = areq->req.cryptlen;
a596999b 551
e870456d
SM
552 skcipher_free_areq_sgls(areq);
553 sock_kfree_s(sk, areq, areq->areqlen);
554 __sock_put(sk);
555
556 iocb->ki_complete(iocb, err ? err : resultlen, 0);
557
558 release_sock(sk);
a596999b
TS
559}
560
e870456d
SM
561static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
562 size_t ignored, int flags)
a596999b
TS
563{
564 struct sock *sk = sock->sk;
565 struct alg_sock *ask = alg_sk(sk);
ec69bbfb
HX
566 struct sock *psk = ask->parent;
567 struct alg_sock *pask = alg_sk(psk);
a596999b 568 struct skcipher_ctx *ctx = ask->private;
ec69bbfb
HX
569 struct skcipher_tfm *skc = pask->private;
570 struct crypto_skcipher *tfm = skc->skcipher;
e870456d
SM
571 unsigned int bs = crypto_skcipher_blocksize(tfm);
572 unsigned int areqlen = sizeof(struct skcipher_async_req) +
573 crypto_skcipher_reqsize(tfm);
574 struct skcipher_async_req *areq;
575 struct skcipher_rsgl *last_rsgl = NULL;
576 int err = 0;
577 size_t len = 0;
ec69bbfb 578
e870456d
SM
579 /* Allocate cipher request for current operation. */
580 areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
581 if (unlikely(!areq))
582 return -ENOMEM;
583 areq->areqlen = areqlen;
584 areq->sk = sk;
585 INIT_LIST_HEAD(&areq->rsgl_list);
586 areq->tsgl = NULL;
587 areq->tsgl_entries = 0;
a596999b 588
e870456d
SM
589 /* convert iovecs of output buffers into RX SGL */
590 while (msg_data_left(msg)) {
591 struct skcipher_rsgl *rsgl;
592 size_t seglen;
593
594 /* limit the amount of readable buffers */
595 if (!skcipher_readable(sk))
596 break;
a596999b
TS
597
598 if (!ctx->used) {
599 err = skcipher_wait_for_data(sk, flags);
600 if (err)
601 goto free;
602 }
a596999b 603
e870456d 604 seglen = min_t(size_t, ctx->used, msg_data_left(msg));
a596999b 605
e870456d
SM
606 if (list_empty(&areq->rsgl_list)) {
607 rsgl = &areq->first_sgl;
a596999b 608 } else {
e870456d 609 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
a596999b
TS
610 if (!rsgl) {
611 err = -ENOMEM;
612 goto free;
613 }
a596999b
TS
614 }
615
e870456d
SM
616 rsgl->sgl.npages = 0;
617 list_add_tail(&rsgl->list, &areq->rsgl_list);
618
619 /* make one iovec available as scatterlist */
620 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
621 if (err < 0)
a596999b 622 goto free;
e870456d
SM
623
624 /* chain the new scatterlist with previous one */
a596999b
TS
625 if (last_rsgl)
626 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
627
628 last_rsgl = rsgl;
e870456d
SM
629 len += err;
630 ctx->rcvused += err;
631 rsgl->sg_num_bytes = err;
632 iov_iter_advance(&msg->msg_iter, err);
a596999b
TS
633 }
634
e870456d
SM
635 /* Process only as much RX buffers for which we have TX data */
636 if (len > ctx->used)
637 len = ctx->used;
638
639 /*
640 * If more buffers are to be expected to be processed, process only
641 * full block size buffers.
642 */
643 if (ctx->more || len < ctx->used)
644 len -= len % bs;
645
646 /*
647 * Create a per request TX SGL for this request which tracks the
648 * SG entries from the global TX SGL.
649 */
650 areq->tsgl_entries = skcipher_count_tsgl(sk, len);
651 if (!areq->tsgl_entries)
652 areq->tsgl_entries = 1;
653 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries,
654 GFP_KERNEL);
655 if (!areq->tsgl) {
656 err = -ENOMEM;
657 goto free;
658 }
659 sg_init_table(areq->tsgl, areq->tsgl_entries);
660 skcipher_pull_tsgl(sk, len, areq->tsgl);
661
662 /* Initialize the crypto operation */
663 skcipher_request_set_tfm(&areq->req, tfm);
664 skcipher_request_set_crypt(&areq->req, areq->tsgl,
665 areq->first_sgl.sgl.sg, len, ctx->iv);
666
667 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
668 /* AIO operation */
669 areq->iocb = msg->msg_iocb;
670 skcipher_request_set_callback(&areq->req,
671 CRYPTO_TFM_REQ_MAY_SLEEP,
672 skcipher_async_cb, areq);
673 err = ctx->enc ? crypto_skcipher_encrypt(&areq->req) :
674 crypto_skcipher_decrypt(&areq->req);
675 } else {
676 /* Synchronous operation */
677 skcipher_request_set_callback(&areq->req,
678 CRYPTO_TFM_REQ_MAY_SLEEP |
679 CRYPTO_TFM_REQ_MAY_BACKLOG,
680 af_alg_complete,
681 &ctx->completion);
682 err = af_alg_wait_for_completion(ctx->enc ?
683 crypto_skcipher_encrypt(&areq->req) :
684 crypto_skcipher_decrypt(&areq->req),
685 &ctx->completion);
686 }
033f46b3 687
e870456d 688 /* AIO operation in progress */
a596999b 689 if (err == -EINPROGRESS) {
e870456d
SM
690 sock_hold(sk);
691 return -EIOCBQUEUED;
a596999b 692 }
e870456d 693
a596999b 694free:
e870456d
SM
695 skcipher_free_areq_sgls(areq);
696 if (areq)
697 sock_kfree_s(sk, areq, areqlen);
698
699 return err ? err : len;
a596999b
TS
700}
701
e870456d
SM
702static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
703 size_t ignored, int flags)
8ff59090
HX
704{
705 struct sock *sk = sock->sk;
e870456d 706 int ret = 0;
8ff59090
HX
707
708 lock_sock(sk);
01e97e65 709 while (msg_data_left(msg)) {
e870456d
SM
710 int err = _skcipher_recvmsg(sock, msg, ignored, flags);
711
712 /*
713 * This error covers -EIOCBQUEUED which implies that we can
714 * only handle one AIO request. If the caller wants to have
715 * multiple AIO requests in parallel, he must make multiple
716 * separate AIO calls.
5703c826
SM
717 *
718 * Also return the error if no data has been processed so far.
e870456d
SM
719 */
720 if (err <= 0) {
5703c826 721 if (err == -EIOCBQUEUED || !ret)
e870456d
SM
722 ret = err;
723 goto out;
1d10eb2f
AV
724 }
725
e870456d 726 ret += err;
8ff59090
HX
727 }
728
e870456d 729out:
8ff59090
HX
730 skcipher_wmem_wakeup(sk);
731 release_sock(sk);
e870456d 732 return ret;
a596999b 733}
8ff59090
HX
734
735static unsigned int skcipher_poll(struct file *file, struct socket *sock,
736 poll_table *wait)
737{
738 struct sock *sk = sock->sk;
739 struct alg_sock *ask = alg_sk(sk);
740 struct skcipher_ctx *ctx = ask->private;
741 unsigned int mask;
742
743 sock_poll_wait(file, sk_sleep(sk), wait);
744 mask = 0;
745
746 if (ctx->used)
747 mask |= POLLIN | POLLRDNORM;
748
749 if (skcipher_writable(sk))
750 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
751
752 return mask;
753}
754
755static struct proto_ops algif_skcipher_ops = {
756 .family = PF_ALG,
757
758 .connect = sock_no_connect,
759 .socketpair = sock_no_socketpair,
760 .getname = sock_no_getname,
761 .ioctl = sock_no_ioctl,
762 .listen = sock_no_listen,
763 .shutdown = sock_no_shutdown,
764 .getsockopt = sock_no_getsockopt,
765 .mmap = sock_no_mmap,
766 .bind = sock_no_bind,
767 .accept = sock_no_accept,
768 .setsockopt = sock_no_setsockopt,
769
770 .release = af_alg_release,
771 .sendmsg = skcipher_sendmsg,
772 .sendpage = skcipher_sendpage,
773 .recvmsg = skcipher_recvmsg,
774 .poll = skcipher_poll,
775};
776
a0fa2d03
HX
777static int skcipher_check_key(struct socket *sock)
778{
1822793a 779 int err = 0;
a0fa2d03
HX
780 struct sock *psk;
781 struct alg_sock *pask;
782 struct skcipher_tfm *tfm;
783 struct sock *sk = sock->sk;
784 struct alg_sock *ask = alg_sk(sk);
785
1822793a 786 lock_sock(sk);
a0fa2d03 787 if (ask->refcnt)
1822793a 788 goto unlock_child;
a0fa2d03
HX
789
790 psk = ask->parent;
791 pask = alg_sk(ask->parent);
792 tfm = pask->private;
793
794 err = -ENOKEY;
1822793a 795 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
a0fa2d03
HX
796 if (!tfm->has_key)
797 goto unlock;
798
799 if (!pask->refcnt++)
800 sock_hold(psk);
801
802 ask->refcnt = 1;
803 sock_put(psk);
804
805 err = 0;
806
807unlock:
808 release_sock(psk);
1822793a
HX
809unlock_child:
810 release_sock(sk);
a0fa2d03
HX
811
812 return err;
813}
814
815static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
816 size_t size)
817{
818 int err;
819
820 err = skcipher_check_key(sock);
821 if (err)
822 return err;
823
824 return skcipher_sendmsg(sock, msg, size);
825}
826
827static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
828 int offset, size_t size, int flags)
829{
830 int err;
831
832 err = skcipher_check_key(sock);
833 if (err)
834 return err;
835
836 return skcipher_sendpage(sock, page, offset, size, flags);
837}
838
839static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
840 size_t ignored, int flags)
841{
842 int err;
843
844 err = skcipher_check_key(sock);
845 if (err)
846 return err;
847
848 return skcipher_recvmsg(sock, msg, ignored, flags);
849}
850
851static struct proto_ops algif_skcipher_ops_nokey = {
852 .family = PF_ALG,
853
854 .connect = sock_no_connect,
855 .socketpair = sock_no_socketpair,
856 .getname = sock_no_getname,
857 .ioctl = sock_no_ioctl,
858 .listen = sock_no_listen,
859 .shutdown = sock_no_shutdown,
860 .getsockopt = sock_no_getsockopt,
861 .mmap = sock_no_mmap,
862 .bind = sock_no_bind,
863 .accept = sock_no_accept,
864 .setsockopt = sock_no_setsockopt,
865
866 .release = af_alg_release,
867 .sendmsg = skcipher_sendmsg_nokey,
868 .sendpage = skcipher_sendpage_nokey,
869 .recvmsg = skcipher_recvmsg_nokey,
870 .poll = skcipher_poll,
871};
872
8ff59090
HX
873static void *skcipher_bind(const char *name, u32 type, u32 mask)
874{
dd504589
HX
875 struct skcipher_tfm *tfm;
876 struct crypto_skcipher *skcipher;
877
878 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
879 if (!tfm)
880 return ERR_PTR(-ENOMEM);
881
882 skcipher = crypto_alloc_skcipher(name, type, mask);
883 if (IS_ERR(skcipher)) {
884 kfree(tfm);
885 return ERR_CAST(skcipher);
886 }
887
888 tfm->skcipher = skcipher;
889
890 return tfm;
8ff59090
HX
891}
892
893static void skcipher_release(void *private)
894{
dd504589
HX
895 struct skcipher_tfm *tfm = private;
896
897 crypto_free_skcipher(tfm->skcipher);
898 kfree(tfm);
8ff59090
HX
899}
900
901static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
902{
dd504589
HX
903 struct skcipher_tfm *tfm = private;
904 int err;
905
906 err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
907 tfm->has_key = !err;
908
909 return err;
8ff59090
HX
910}
911
912static void skcipher_sock_destruct(struct sock *sk)
913{
914 struct alg_sock *ask = alg_sk(sk);
915 struct skcipher_ctx *ctx = ask->private;
e870456d
SM
916 struct sock *psk = ask->parent;
917 struct alg_sock *pask = alg_sk(psk);
918 struct skcipher_tfm *skc = pask->private;
919 struct crypto_skcipher *tfm = skc->skcipher;
a596999b 920
e870456d 921 skcipher_pull_tsgl(sk, ctx->used, NULL);
0d96e4ba 922 sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
8ff59090
HX
923 sock_kfree_s(sk, ctx, ctx->len);
924 af_alg_release_parent(sk);
925}
926
d7b65aee 927static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
8ff59090
HX
928{
929 struct skcipher_ctx *ctx;
930 struct alg_sock *ask = alg_sk(sk);
dd504589
HX
931 struct skcipher_tfm *tfm = private;
932 struct crypto_skcipher *skcipher = tfm->skcipher;
e870456d 933 unsigned int len = sizeof(*ctx);
8ff59090
HX
934
935 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
936 if (!ctx)
937 return -ENOMEM;
938
dd504589 939 ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
8ff59090
HX
940 GFP_KERNEL);
941 if (!ctx->iv) {
942 sock_kfree_s(sk, ctx, len);
943 return -ENOMEM;
944 }
945
dd504589 946 memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
8ff59090 947
e870456d 948 INIT_LIST_HEAD(&ctx->tsgl_list);
8ff59090
HX
949 ctx->len = len;
950 ctx->used = 0;
e870456d 951 ctx->rcvused = 0;
8ff59090
HX
952 ctx->more = 0;
953 ctx->merge = 0;
954 ctx->enc = 0;
955 af_alg_init_completion(&ctx->completion);
956
957 ask->private = ctx;
958
8ff59090
HX
959 sk->sk_destruct = skcipher_sock_destruct;
960
961 return 0;
962}
963
a0fa2d03
HX
964static int skcipher_accept_parent(void *private, struct sock *sk)
965{
966 struct skcipher_tfm *tfm = private;
967
6e8d8ecf 968 if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher))
a0fa2d03
HX
969 return -ENOKEY;
970
d7b65aee 971 return skcipher_accept_parent_nokey(private, sk);
a0fa2d03
HX
972}
973
8ff59090
HX
974static const struct af_alg_type algif_type_skcipher = {
975 .bind = skcipher_bind,
976 .release = skcipher_release,
977 .setkey = skcipher_setkey,
978 .accept = skcipher_accept_parent,
a0fa2d03 979 .accept_nokey = skcipher_accept_parent_nokey,
8ff59090 980 .ops = &algif_skcipher_ops,
a0fa2d03 981 .ops_nokey = &algif_skcipher_ops_nokey,
8ff59090
HX
982 .name = "skcipher",
983 .owner = THIS_MODULE
984};
985
986static int __init algif_skcipher_init(void)
987{
988 return af_alg_register_type(&algif_type_skcipher);
989}
990
991static void __exit algif_skcipher_exit(void)
992{
993 int err = af_alg_unregister_type(&algif_type_skcipher);
994 BUG_ON(err);
995}
996
997module_init(algif_skcipher_init);
998module_exit(algif_skcipher_exit);
999MODULE_LICENSE("GPL");