2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/sched/signal.h>
39 #include <linux/module.h>
40 #include <crypto/aead.h>
42 #include <net/strparser.h>
45 static int __skb_nsg(struct sk_buff
*skb
, int offset
, int len
,
46 unsigned int recursion_level
)
48 int start
= skb_headlen(skb
);
49 int i
, chunk
= start
- offset
;
50 struct sk_buff
*frag_iter
;
53 if (unlikely(recursion_level
>= 24))
66 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
69 WARN_ON(start
> offset
+ len
);
71 end
= start
+ skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
85 if (unlikely(skb_has_frag_list(skb
))) {
86 skb_walk_frags(skb
, frag_iter
) {
89 WARN_ON(start
> offset
+ len
);
91 end
= start
+ frag_iter
->len
;
96 ret
= __skb_nsg(frag_iter
, offset
- start
, chunk
,
98 if (unlikely(ret
< 0))
113 /* Return the number of scatterlist elements required to completely map the
114 * skb, or -EMSGSIZE if the recursion depth is exceeded.
116 static int skb_nsg(struct sk_buff
*skb
, int offset
, int len
)
118 return __skb_nsg(skb
, offset
, len
, 0);
121 static int padding_length(struct tls_sw_context_rx
*ctx
,
122 struct tls_prot_info
*prot
, struct sk_buff
*skb
)
124 struct strp_msg
*rxm
= strp_msg(skb
);
127 /* Determine zero-padding length */
128 if (prot
->version
== TLS_1_3_VERSION
) {
129 char content_type
= 0;
133 while (content_type
== 0) {
134 if (back
> rxm
->full_len
- prot
->prepend_size
)
136 err
= skb_copy_bits(skb
,
137 rxm
->offset
+ rxm
->full_len
- back
,
146 ctx
->control
= content_type
;
151 static void tls_decrypt_done(struct crypto_async_request
*req
, int err
)
153 struct aead_request
*aead_req
= (struct aead_request
*)req
;
154 struct scatterlist
*sgout
= aead_req
->dst
;
155 struct scatterlist
*sgin
= aead_req
->src
;
156 struct tls_sw_context_rx
*ctx
;
157 struct tls_context
*tls_ctx
;
158 struct tls_prot_info
*prot
;
159 struct scatterlist
*sg
;
164 skb
= (struct sk_buff
*)req
->data
;
165 tls_ctx
= tls_get_ctx(skb
->sk
);
166 ctx
= tls_sw_ctx_rx(tls_ctx
);
167 prot
= &tls_ctx
->prot_info
;
169 /* Propagate if there was an err */
172 TLS_INC_STATS(sock_net(skb
->sk
),
173 LINUX_MIB_TLSDECRYPTERROR
);
174 ctx
->async_wait
.err
= err
;
175 tls_err_abort(skb
->sk
, err
);
177 struct strp_msg
*rxm
= strp_msg(skb
);
180 pad
= padding_length(ctx
, prot
, skb
);
182 ctx
->async_wait
.err
= pad
;
183 tls_err_abort(skb
->sk
, pad
);
185 rxm
->full_len
-= pad
;
186 rxm
->offset
+= prot
->prepend_size
;
187 rxm
->full_len
-= prot
->overhead_size
;
191 /* After using skb->sk to propagate sk through crypto async callback
192 * we need to NULL it again.
197 /* Free the destination pages if skb was not decrypted inplace */
199 /* Skip the first S/G entry as it points to AAD */
200 for_each_sg(sg_next(sgout
), sg
, UINT_MAX
, pages
) {
203 put_page(sg_page(sg
));
209 pending
= atomic_dec_return(&ctx
->decrypt_pending
);
211 if (!pending
&& READ_ONCE(ctx
->async_notify
))
212 complete(&ctx
->async_wait
.completion
);
215 static int tls_do_decryption(struct sock
*sk
,
217 struct scatterlist
*sgin
,
218 struct scatterlist
*sgout
,
221 struct aead_request
*aead_req
,
224 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
225 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
226 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
229 aead_request_set_tfm(aead_req
, ctx
->aead_recv
);
230 aead_request_set_ad(aead_req
, prot
->aad_size
);
231 aead_request_set_crypt(aead_req
, sgin
, sgout
,
232 data_len
+ prot
->tag_size
,
236 /* Using skb->sk to push sk through to crypto async callback
237 * handler. This allows propagating errors up to the socket
238 * if needed. It _must_ be cleared in the async handler
239 * before consume_skb is called. We _know_ skb->sk is NULL
240 * because it is a clone from strparser.
243 aead_request_set_callback(aead_req
,
244 CRYPTO_TFM_REQ_MAY_BACKLOG
,
245 tls_decrypt_done
, skb
);
246 atomic_inc(&ctx
->decrypt_pending
);
248 aead_request_set_callback(aead_req
,
249 CRYPTO_TFM_REQ_MAY_BACKLOG
,
250 crypto_req_done
, &ctx
->async_wait
);
253 ret
= crypto_aead_decrypt(aead_req
);
254 if (ret
== -EINPROGRESS
) {
258 ret
= crypto_wait_req(ret
, &ctx
->async_wait
);
262 atomic_dec(&ctx
->decrypt_pending
);
267 static void tls_trim_both_msgs(struct sock
*sk
, int target_size
)
269 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
270 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
271 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
272 struct tls_rec
*rec
= ctx
->open_rec
;
274 sk_msg_trim(sk
, &rec
->msg_plaintext
, target_size
);
276 target_size
+= prot
->overhead_size
;
277 sk_msg_trim(sk
, &rec
->msg_encrypted
, target_size
);
280 static int tls_alloc_encrypted_msg(struct sock
*sk
, int len
)
282 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
283 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
284 struct tls_rec
*rec
= ctx
->open_rec
;
285 struct sk_msg
*msg_en
= &rec
->msg_encrypted
;
287 return sk_msg_alloc(sk
, msg_en
, len
, 0);
290 static int tls_clone_plaintext_msg(struct sock
*sk
, int required
)
292 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
293 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
294 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
295 struct tls_rec
*rec
= ctx
->open_rec
;
296 struct sk_msg
*msg_pl
= &rec
->msg_plaintext
;
297 struct sk_msg
*msg_en
= &rec
->msg_encrypted
;
300 /* We add page references worth len bytes from encrypted sg
301 * at the end of plaintext sg. It is guaranteed that msg_en
302 * has enough required room (ensured by caller).
304 len
= required
- msg_pl
->sg
.size
;
306 /* Skip initial bytes in msg_en's data to be able to use
307 * same offset of both plain and encrypted data.
309 skip
= prot
->prepend_size
+ msg_pl
->sg
.size
;
311 return sk_msg_clone(sk
, msg_pl
, msg_en
, skip
, len
);
314 static struct tls_rec
*tls_get_rec(struct sock
*sk
)
316 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
317 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
318 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
319 struct sk_msg
*msg_pl
, *msg_en
;
323 mem_size
= sizeof(struct tls_rec
) + crypto_aead_reqsize(ctx
->aead_send
);
325 rec
= kzalloc(mem_size
, sk
->sk_allocation
);
329 msg_pl
= &rec
->msg_plaintext
;
330 msg_en
= &rec
->msg_encrypted
;
335 sg_init_table(rec
->sg_aead_in
, 2);
336 sg_set_buf(&rec
->sg_aead_in
[0], rec
->aad_space
, prot
->aad_size
);
337 sg_unmark_end(&rec
->sg_aead_in
[1]);
339 sg_init_table(rec
->sg_aead_out
, 2);
340 sg_set_buf(&rec
->sg_aead_out
[0], rec
->aad_space
, prot
->aad_size
);
341 sg_unmark_end(&rec
->sg_aead_out
[1]);
346 static void tls_free_rec(struct sock
*sk
, struct tls_rec
*rec
)
348 sk_msg_free(sk
, &rec
->msg_encrypted
);
349 sk_msg_free(sk
, &rec
->msg_plaintext
);
353 static void tls_free_open_rec(struct sock
*sk
)
355 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
356 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
357 struct tls_rec
*rec
= ctx
->open_rec
;
360 tls_free_rec(sk
, rec
);
361 ctx
->open_rec
= NULL
;
365 int tls_tx_records(struct sock
*sk
, int flags
)
367 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
368 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
369 struct tls_rec
*rec
, *tmp
;
370 struct sk_msg
*msg_en
;
371 int tx_flags
, rc
= 0;
373 if (tls_is_partially_sent_record(tls_ctx
)) {
374 rec
= list_first_entry(&ctx
->tx_list
,
375 struct tls_rec
, list
);
378 tx_flags
= rec
->tx_flags
;
382 rc
= tls_push_partial_record(sk
, tls_ctx
, tx_flags
);
386 /* Full record has been transmitted.
387 * Remove the head of tx_list
389 list_del(&rec
->list
);
390 sk_msg_free(sk
, &rec
->msg_plaintext
);
394 /* Tx all ready records */
395 list_for_each_entry_safe(rec
, tmp
, &ctx
->tx_list
, list
) {
396 if (READ_ONCE(rec
->tx_ready
)) {
398 tx_flags
= rec
->tx_flags
;
402 msg_en
= &rec
->msg_encrypted
;
403 rc
= tls_push_sg(sk
, tls_ctx
,
404 &msg_en
->sg
.data
[msg_en
->sg
.curr
],
409 list_del(&rec
->list
);
410 sk_msg_free(sk
, &rec
->msg_plaintext
);
418 if (rc
< 0 && rc
!= -EAGAIN
)
419 tls_err_abort(sk
, EBADMSG
);
424 static void tls_encrypt_done(struct crypto_async_request
*req
, int err
)
426 struct aead_request
*aead_req
= (struct aead_request
*)req
;
427 struct sock
*sk
= req
->data
;
428 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
429 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
430 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
431 struct scatterlist
*sge
;
432 struct sk_msg
*msg_en
;
437 rec
= container_of(aead_req
, struct tls_rec
, aead_req
);
438 msg_en
= &rec
->msg_encrypted
;
440 sge
= sk_msg_elem(msg_en
, msg_en
->sg
.curr
);
441 sge
->offset
-= prot
->prepend_size
;
442 sge
->length
+= prot
->prepend_size
;
444 /* Check if error is previously set on socket */
445 if (err
|| sk
->sk_err
) {
448 /* If err is already set on socket, return the same code */
450 ctx
->async_wait
.err
= sk
->sk_err
;
452 ctx
->async_wait
.err
= err
;
453 tls_err_abort(sk
, err
);
458 struct tls_rec
*first_rec
;
460 /* Mark the record as ready for transmission */
461 smp_store_mb(rec
->tx_ready
, true);
463 /* If received record is at head of tx_list, schedule tx */
464 first_rec
= list_first_entry(&ctx
->tx_list
,
465 struct tls_rec
, list
);
466 if (rec
== first_rec
)
470 pending
= atomic_dec_return(&ctx
->encrypt_pending
);
472 if (!pending
&& READ_ONCE(ctx
->async_notify
))
473 complete(&ctx
->async_wait
.completion
);
478 /* Schedule the transmission */
479 if (!test_and_set_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
))
480 schedule_delayed_work(&ctx
->tx_work
.work
, 1);
483 static int tls_do_encryption(struct sock
*sk
,
484 struct tls_context
*tls_ctx
,
485 struct tls_sw_context_tx
*ctx
,
486 struct aead_request
*aead_req
,
487 size_t data_len
, u32 start
)
489 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
490 struct tls_rec
*rec
= ctx
->open_rec
;
491 struct sk_msg
*msg_en
= &rec
->msg_encrypted
;
492 struct scatterlist
*sge
= sk_msg_elem(msg_en
, start
);
493 int rc
, iv_offset
= 0;
495 /* For CCM based ciphers, first byte of IV is a constant */
496 if (prot
->cipher_type
== TLS_CIPHER_AES_CCM_128
) {
497 rec
->iv_data
[0] = TLS_AES_CCM_IV_B0_BYTE
;
501 memcpy(&rec
->iv_data
[iv_offset
], tls_ctx
->tx
.iv
,
502 prot
->iv_size
+ prot
->salt_size
);
504 xor_iv_with_seq(prot
->version
, rec
->iv_data
, tls_ctx
->tx
.rec_seq
);
506 sge
->offset
+= prot
->prepend_size
;
507 sge
->length
-= prot
->prepend_size
;
509 msg_en
->sg
.curr
= start
;
511 aead_request_set_tfm(aead_req
, ctx
->aead_send
);
512 aead_request_set_ad(aead_req
, prot
->aad_size
);
513 aead_request_set_crypt(aead_req
, rec
->sg_aead_in
,
515 data_len
, rec
->iv_data
);
517 aead_request_set_callback(aead_req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
518 tls_encrypt_done
, sk
);
520 /* Add the record in tx_list */
521 list_add_tail((struct list_head
*)&rec
->list
, &ctx
->tx_list
);
522 atomic_inc(&ctx
->encrypt_pending
);
524 rc
= crypto_aead_encrypt(aead_req
);
525 if (!rc
|| rc
!= -EINPROGRESS
) {
526 atomic_dec(&ctx
->encrypt_pending
);
527 sge
->offset
-= prot
->prepend_size
;
528 sge
->length
+= prot
->prepend_size
;
532 WRITE_ONCE(rec
->tx_ready
, true);
533 } else if (rc
!= -EINPROGRESS
) {
534 list_del(&rec
->list
);
538 /* Unhook the record from context if encryption is not failure */
539 ctx
->open_rec
= NULL
;
540 tls_advance_record_sn(sk
, prot
, &tls_ctx
->tx
);
544 static int tls_split_open_record(struct sock
*sk
, struct tls_rec
*from
,
545 struct tls_rec
**to
, struct sk_msg
*msg_opl
,
546 struct sk_msg
*msg_oen
, u32 split_point
,
547 u32 tx_overhead_size
, u32
*orig_end
)
549 u32 i
, j
, bytes
= 0, apply
= msg_opl
->apply_bytes
;
550 struct scatterlist
*sge
, *osge
, *nsge
;
551 u32 orig_size
= msg_opl
->sg
.size
;
552 struct scatterlist tmp
= { };
553 struct sk_msg
*msg_npl
;
557 new = tls_get_rec(sk
);
560 ret
= sk_msg_alloc(sk
, &new->msg_encrypted
, msg_opl
->sg
.size
+
561 tx_overhead_size
, 0);
563 tls_free_rec(sk
, new);
567 *orig_end
= msg_opl
->sg
.end
;
568 i
= msg_opl
->sg
.start
;
569 sge
= sk_msg_elem(msg_opl
, i
);
570 while (apply
&& sge
->length
) {
571 if (sge
->length
> apply
) {
572 u32 len
= sge
->length
- apply
;
574 get_page(sg_page(sge
));
575 sg_set_page(&tmp
, sg_page(sge
), len
,
576 sge
->offset
+ apply
);
581 apply
-= sge
->length
;
582 bytes
+= sge
->length
;
585 sk_msg_iter_var_next(i
);
586 if (i
== msg_opl
->sg
.end
)
588 sge
= sk_msg_elem(msg_opl
, i
);
592 msg_opl
->sg
.curr
= i
;
593 msg_opl
->sg
.copybreak
= 0;
594 msg_opl
->apply_bytes
= 0;
595 msg_opl
->sg
.size
= bytes
;
597 msg_npl
= &new->msg_plaintext
;
598 msg_npl
->apply_bytes
= apply
;
599 msg_npl
->sg
.size
= orig_size
- bytes
;
601 j
= msg_npl
->sg
.start
;
602 nsge
= sk_msg_elem(msg_npl
, j
);
604 memcpy(nsge
, &tmp
, sizeof(*nsge
));
605 sk_msg_iter_var_next(j
);
606 nsge
= sk_msg_elem(msg_npl
, j
);
609 osge
= sk_msg_elem(msg_opl
, i
);
610 while (osge
->length
) {
611 memcpy(nsge
, osge
, sizeof(*nsge
));
613 sk_msg_iter_var_next(i
);
614 sk_msg_iter_var_next(j
);
617 osge
= sk_msg_elem(msg_opl
, i
);
618 nsge
= sk_msg_elem(msg_npl
, j
);
622 msg_npl
->sg
.curr
= j
;
623 msg_npl
->sg
.copybreak
= 0;
629 static void tls_merge_open_record(struct sock
*sk
, struct tls_rec
*to
,
630 struct tls_rec
*from
, u32 orig_end
)
632 struct sk_msg
*msg_npl
= &from
->msg_plaintext
;
633 struct sk_msg
*msg_opl
= &to
->msg_plaintext
;
634 struct scatterlist
*osge
, *nsge
;
638 sk_msg_iter_var_prev(i
);
639 j
= msg_npl
->sg
.start
;
641 osge
= sk_msg_elem(msg_opl
, i
);
642 nsge
= sk_msg_elem(msg_npl
, j
);
644 if (sg_page(osge
) == sg_page(nsge
) &&
645 osge
->offset
+ osge
->length
== nsge
->offset
) {
646 osge
->length
+= nsge
->length
;
647 put_page(sg_page(nsge
));
650 msg_opl
->sg
.end
= orig_end
;
651 msg_opl
->sg
.curr
= orig_end
;
652 msg_opl
->sg
.copybreak
= 0;
653 msg_opl
->apply_bytes
= msg_opl
->sg
.size
+ msg_npl
->sg
.size
;
654 msg_opl
->sg
.size
+= msg_npl
->sg
.size
;
656 sk_msg_free(sk
, &to
->msg_encrypted
);
657 sk_msg_xfer_full(&to
->msg_encrypted
, &from
->msg_encrypted
);
662 static int tls_push_record(struct sock
*sk
, int flags
,
663 unsigned char record_type
)
665 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
666 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
667 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
668 struct tls_rec
*rec
= ctx
->open_rec
, *tmp
= NULL
;
669 u32 i
, split_point
, uninitialized_var(orig_end
);
670 struct sk_msg
*msg_pl
, *msg_en
;
671 struct aead_request
*req
;
678 msg_pl
= &rec
->msg_plaintext
;
679 msg_en
= &rec
->msg_encrypted
;
681 split_point
= msg_pl
->apply_bytes
;
682 split
= split_point
&& split_point
< msg_pl
->sg
.size
;
683 if (unlikely((!split
&&
685 prot
->overhead_size
> msg_en
->sg
.size
) ||
688 prot
->overhead_size
> msg_en
->sg
.size
))) {
690 split_point
= msg_en
->sg
.size
;
693 rc
= tls_split_open_record(sk
, rec
, &tmp
, msg_pl
, msg_en
,
694 split_point
, prot
->overhead_size
,
698 /* This can happen if above tls_split_open_record allocates
699 * a single large encryption buffer instead of two smaller
700 * ones. In this case adjust pointers and continue without
703 if (!msg_pl
->sg
.size
) {
704 tls_merge_open_record(sk
, rec
, tmp
, orig_end
);
705 msg_pl
= &rec
->msg_plaintext
;
706 msg_en
= &rec
->msg_encrypted
;
709 sk_msg_trim(sk
, msg_en
, msg_pl
->sg
.size
+
710 prot
->overhead_size
);
713 rec
->tx_flags
= flags
;
714 req
= &rec
->aead_req
;
717 sk_msg_iter_var_prev(i
);
719 rec
->content_type
= record_type
;
720 if (prot
->version
== TLS_1_3_VERSION
) {
721 /* Add content type to end of message. No padding added */
722 sg_set_buf(&rec
->sg_content_type
, &rec
->content_type
, 1);
723 sg_mark_end(&rec
->sg_content_type
);
724 sg_chain(msg_pl
->sg
.data
, msg_pl
->sg
.end
+ 1,
725 &rec
->sg_content_type
);
727 sg_mark_end(sk_msg_elem(msg_pl
, i
));
730 if (msg_pl
->sg
.end
< msg_pl
->sg
.start
) {
731 sg_chain(&msg_pl
->sg
.data
[msg_pl
->sg
.start
],
732 MAX_SKB_FRAGS
- msg_pl
->sg
.start
+ 1,
736 i
= msg_pl
->sg
.start
;
737 sg_chain(rec
->sg_aead_in
, 2, &msg_pl
->sg
.data
[i
]);
740 sk_msg_iter_var_prev(i
);
741 sg_mark_end(sk_msg_elem(msg_en
, i
));
743 i
= msg_en
->sg
.start
;
744 sg_chain(rec
->sg_aead_out
, 2, &msg_en
->sg
.data
[i
]);
746 tls_make_aad(rec
->aad_space
, msg_pl
->sg
.size
+ prot
->tail_size
,
747 tls_ctx
->tx
.rec_seq
, prot
->rec_seq_size
,
748 record_type
, prot
->version
);
750 tls_fill_prepend(tls_ctx
,
751 page_address(sg_page(&msg_en
->sg
.data
[i
])) +
752 msg_en
->sg
.data
[i
].offset
,
753 msg_pl
->sg
.size
+ prot
->tail_size
,
754 record_type
, prot
->version
);
756 tls_ctx
->pending_open_record_frags
= false;
758 rc
= tls_do_encryption(sk
, tls_ctx
, ctx
, req
,
759 msg_pl
->sg
.size
+ prot
->tail_size
, i
);
761 if (rc
!= -EINPROGRESS
) {
762 tls_err_abort(sk
, EBADMSG
);
764 tls_ctx
->pending_open_record_frags
= true;
765 tls_merge_open_record(sk
, rec
, tmp
, orig_end
);
768 ctx
->async_capable
= 1;
771 msg_pl
= &tmp
->msg_plaintext
;
772 msg_en
= &tmp
->msg_encrypted
;
773 sk_msg_trim(sk
, msg_en
, msg_pl
->sg
.size
+ prot
->overhead_size
);
774 tls_ctx
->pending_open_record_frags
= true;
778 return tls_tx_records(sk
, flags
);
781 static int bpf_exec_tx_verdict(struct sk_msg
*msg
, struct sock
*sk
,
782 bool full_record
, u8 record_type
,
783 size_t *copied
, int flags
)
785 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
786 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
787 struct sk_msg msg_redir
= { };
788 struct sk_psock
*psock
;
789 struct sock
*sk_redir
;
795 policy
= !(flags
& MSG_SENDPAGE_NOPOLICY
);
796 psock
= sk_psock_get(sk
);
797 if (!psock
|| !policy
) {
798 err
= tls_push_record(sk
, flags
, record_type
);
799 if (err
&& err
!= -EINPROGRESS
) {
800 *copied
-= sk_msg_free(sk
, msg
);
801 tls_free_open_rec(sk
);
804 sk_psock_put(sk
, psock
);
808 enospc
= sk_msg_full(msg
);
809 if (psock
->eval
== __SK_NONE
) {
810 delta
= msg
->sg
.size
;
811 psock
->eval
= sk_psock_msg_verdict(sk
, psock
, msg
);
812 delta
-= msg
->sg
.size
;
814 if (msg
->cork_bytes
&& msg
->cork_bytes
> msg
->sg
.size
&&
815 !enospc
&& !full_record
) {
821 if (msg
->apply_bytes
&& msg
->apply_bytes
< send
)
822 send
= msg
->apply_bytes
;
824 switch (psock
->eval
) {
826 err
= tls_push_record(sk
, flags
, record_type
);
827 if (err
&& err
!= -EINPROGRESS
) {
828 *copied
-= sk_msg_free(sk
, msg
);
829 tls_free_open_rec(sk
);
834 sk_redir
= psock
->sk_redir
;
835 memcpy(&msg_redir
, msg
, sizeof(*msg
));
836 if (msg
->apply_bytes
< send
)
837 msg
->apply_bytes
= 0;
839 msg
->apply_bytes
-= send
;
840 sk_msg_return_zero(sk
, msg
, send
);
841 msg
->sg
.size
-= send
;
843 err
= tcp_bpf_sendmsg_redir(sk_redir
, &msg_redir
, send
, flags
);
846 *copied
-= sk_msg_free_nocharge(sk
, &msg_redir
);
849 if (msg
->sg
.size
== 0)
850 tls_free_open_rec(sk
);
854 sk_msg_free_partial(sk
, msg
, send
);
855 if (msg
->apply_bytes
< send
)
856 msg
->apply_bytes
= 0;
858 msg
->apply_bytes
-= send
;
859 if (msg
->sg
.size
== 0)
860 tls_free_open_rec(sk
);
861 *copied
-= (send
+ delta
);
866 bool reset_eval
= !ctx
->open_rec
;
870 msg
= &rec
->msg_plaintext
;
871 if (!msg
->apply_bytes
)
875 psock
->eval
= __SK_NONE
;
876 if (psock
->sk_redir
) {
877 sock_put(psock
->sk_redir
);
878 psock
->sk_redir
= NULL
;
885 sk_psock_put(sk
, psock
);
889 static int tls_sw_push_pending_record(struct sock
*sk
, int flags
)
891 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
892 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
893 struct tls_rec
*rec
= ctx
->open_rec
;
894 struct sk_msg
*msg_pl
;
900 msg_pl
= &rec
->msg_plaintext
;
901 copied
= msg_pl
->sg
.size
;
905 return bpf_exec_tx_verdict(msg_pl
, sk
, true, TLS_RECORD_TYPE_DATA
,
909 int tls_sw_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t size
)
911 long timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
912 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
913 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
914 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
915 bool async_capable
= ctx
->async_capable
;
916 unsigned char record_type
= TLS_RECORD_TYPE_DATA
;
917 bool is_kvec
= iov_iter_is_kvec(&msg
->msg_iter
);
918 bool eor
= !(msg
->msg_flags
& MSG_MORE
);
919 size_t try_to_copy
, copied
= 0;
920 struct sk_msg
*msg_pl
, *msg_en
;
930 if (msg
->msg_flags
& ~(MSG_MORE
| MSG_DONTWAIT
| MSG_NOSIGNAL
))
933 mutex_lock(&tls_ctx
->tx_lock
);
936 if (unlikely(msg
->msg_controllen
)) {
937 ret
= tls_proccess_cmsg(sk
, msg
, &record_type
);
939 if (ret
== -EINPROGRESS
)
941 else if (ret
!= -EAGAIN
)
946 while (msg_data_left(msg
)) {
955 rec
= ctx
->open_rec
= tls_get_rec(sk
);
961 msg_pl
= &rec
->msg_plaintext
;
962 msg_en
= &rec
->msg_encrypted
;
964 orig_size
= msg_pl
->sg
.size
;
966 try_to_copy
= msg_data_left(msg
);
967 record_room
= TLS_MAX_PAYLOAD_SIZE
- msg_pl
->sg
.size
;
968 if (try_to_copy
>= record_room
) {
969 try_to_copy
= record_room
;
973 required_size
= msg_pl
->sg
.size
+ try_to_copy
+
976 if (!sk_stream_memory_free(sk
))
977 goto wait_for_sndbuf
;
980 ret
= tls_alloc_encrypted_msg(sk
, required_size
);
983 goto wait_for_memory
;
985 /* Adjust try_to_copy according to the amount that was
986 * actually allocated. The difference is due
987 * to max sg elements limit
989 try_to_copy
-= required_size
- msg_en
->sg
.size
;
993 if (!is_kvec
&& (full_record
|| eor
) && !async_capable
) {
994 u32 first
= msg_pl
->sg
.end
;
996 ret
= sk_msg_zerocopy_from_iter(sk
, &msg
->msg_iter
,
997 msg_pl
, try_to_copy
);
999 goto fallback_to_reg_send
;
1002 copied
+= try_to_copy
;
1004 sk_msg_sg_copy_set(msg_pl
, first
);
1005 ret
= bpf_exec_tx_verdict(msg_pl
, sk
, full_record
,
1006 record_type
, &copied
,
1009 if (ret
== -EINPROGRESS
)
1011 else if (ret
== -ENOMEM
)
1012 goto wait_for_memory
;
1013 else if (ctx
->open_rec
&& ret
== -ENOSPC
)
1015 else if (ret
!= -EAGAIN
)
1020 copied
-= try_to_copy
;
1021 sk_msg_sg_copy_clear(msg_pl
, first
);
1022 iov_iter_revert(&msg
->msg_iter
,
1023 msg_pl
->sg
.size
- orig_size
);
1024 fallback_to_reg_send
:
1025 sk_msg_trim(sk
, msg_pl
, orig_size
);
1028 required_size
= msg_pl
->sg
.size
+ try_to_copy
;
1030 ret
= tls_clone_plaintext_msg(sk
, required_size
);
1035 /* Adjust try_to_copy according to the amount that was
1036 * actually allocated. The difference is due
1037 * to max sg elements limit
1039 try_to_copy
-= required_size
- msg_pl
->sg
.size
;
1041 sk_msg_trim(sk
, msg_en
,
1042 msg_pl
->sg
.size
+ prot
->overhead_size
);
1046 ret
= sk_msg_memcopy_from_iter(sk
, &msg
->msg_iter
,
1047 msg_pl
, try_to_copy
);
1052 /* Open records defined only if successfully copied, otherwise
1053 * we would trim the sg but not reset the open record frags.
1055 tls_ctx
->pending_open_record_frags
= true;
1056 copied
+= try_to_copy
;
1057 if (full_record
|| eor
) {
1058 ret
= bpf_exec_tx_verdict(msg_pl
, sk
, full_record
,
1059 record_type
, &copied
,
1062 if (ret
== -EINPROGRESS
)
1064 else if (ret
== -ENOMEM
)
1065 goto wait_for_memory
;
1066 else if (ret
!= -EAGAIN
) {
1077 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1079 ret
= sk_stream_wait_memory(sk
, &timeo
);
1083 tls_trim_both_msgs(sk
, orig_size
);
1087 if (ctx
->open_rec
&& msg_en
->sg
.size
< required_size
)
1088 goto alloc_encrypted
;
1093 } else if (num_zc
) {
1094 /* Wait for pending encryptions to get completed */
1095 smp_store_mb(ctx
->async_notify
, true);
1097 if (atomic_read(&ctx
->encrypt_pending
))
1098 crypto_wait_req(-EINPROGRESS
, &ctx
->async_wait
);
1100 reinit_completion(&ctx
->async_wait
.completion
);
1102 WRITE_ONCE(ctx
->async_notify
, false);
1104 if (ctx
->async_wait
.err
) {
1105 ret
= ctx
->async_wait
.err
;
1110 /* Transmit if any encryptions have completed */
1111 if (test_and_clear_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
)) {
1112 cancel_delayed_work(&ctx
->tx_work
.work
);
1113 tls_tx_records(sk
, msg
->msg_flags
);
1117 ret
= sk_stream_error(sk
, msg
->msg_flags
, ret
);
1120 mutex_unlock(&tls_ctx
->tx_lock
);
1121 return copied
? copied
: ret
;
1124 static int tls_sw_do_sendpage(struct sock
*sk
, struct page
*page
,
1125 int offset
, size_t size
, int flags
)
1127 long timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
1128 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1129 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
1130 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1131 unsigned char record_type
= TLS_RECORD_TYPE_DATA
;
1132 struct sk_msg
*msg_pl
;
1133 struct tls_rec
*rec
;
1141 eor
= !(flags
& (MSG_MORE
| MSG_SENDPAGE_NOTLAST
));
1142 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
1144 /* Call the sk_stream functions to manage the sndbuf mem. */
1146 size_t copy
, required_size
;
1154 rec
= ctx
->open_rec
;
1156 rec
= ctx
->open_rec
= tls_get_rec(sk
);
1162 msg_pl
= &rec
->msg_plaintext
;
1164 full_record
= false;
1165 record_room
= TLS_MAX_PAYLOAD_SIZE
- msg_pl
->sg
.size
;
1167 if (copy
>= record_room
) {
1172 required_size
= msg_pl
->sg
.size
+ copy
+ prot
->overhead_size
;
1174 if (!sk_stream_memory_free(sk
))
1175 goto wait_for_sndbuf
;
1177 ret
= tls_alloc_encrypted_msg(sk
, required_size
);
1180 goto wait_for_memory
;
1182 /* Adjust copy according to the amount that was
1183 * actually allocated. The difference is due
1184 * to max sg elements limit
1186 copy
-= required_size
- msg_pl
->sg
.size
;
1190 sk_msg_page_add(msg_pl
, page
, copy
, offset
);
1191 sk_mem_charge(sk
, copy
);
1197 tls_ctx
->pending_open_record_frags
= true;
1198 if (full_record
|| eor
|| sk_msg_full(msg_pl
)) {
1199 ret
= bpf_exec_tx_verdict(msg_pl
, sk
, full_record
,
1200 record_type
, &copied
, flags
);
1202 if (ret
== -EINPROGRESS
)
1204 else if (ret
== -ENOMEM
)
1205 goto wait_for_memory
;
1206 else if (ret
!= -EAGAIN
) {
1215 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1217 ret
= sk_stream_wait_memory(sk
, &timeo
);
1220 tls_trim_both_msgs(sk
, msg_pl
->sg
.size
);
1229 /* Transmit if any encryptions have completed */
1230 if (test_and_clear_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
)) {
1231 cancel_delayed_work(&ctx
->tx_work
.work
);
1232 tls_tx_records(sk
, flags
);
1236 ret
= sk_stream_error(sk
, flags
, ret
);
1237 return copied
? copied
: ret
;
1240 int tls_sw_sendpage_locked(struct sock
*sk
, struct page
*page
,
1241 int offset
, size_t size
, int flags
)
1243 if (flags
& ~(MSG_MORE
| MSG_DONTWAIT
| MSG_NOSIGNAL
|
1244 MSG_SENDPAGE_NOTLAST
| MSG_SENDPAGE_NOPOLICY
|
1245 MSG_NO_SHARED_FRAGS
))
1248 return tls_sw_do_sendpage(sk
, page
, offset
, size
, flags
);
1251 int tls_sw_sendpage(struct sock
*sk
, struct page
*page
,
1252 int offset
, size_t size
, int flags
)
1254 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1257 if (flags
& ~(MSG_MORE
| MSG_DONTWAIT
| MSG_NOSIGNAL
|
1258 MSG_SENDPAGE_NOTLAST
| MSG_SENDPAGE_NOPOLICY
))
1261 mutex_lock(&tls_ctx
->tx_lock
);
1263 ret
= tls_sw_do_sendpage(sk
, page
, offset
, size
, flags
);
1265 mutex_unlock(&tls_ctx
->tx_lock
);
1269 static struct sk_buff
*tls_wait_data(struct sock
*sk
, struct sk_psock
*psock
,
1270 int flags
, long timeo
, int *err
)
1272 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1273 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1274 struct sk_buff
*skb
;
1275 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
1277 while (!(skb
= ctx
->recv_pkt
) && sk_psock_queue_empty(psock
)) {
1279 *err
= sock_error(sk
);
1283 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1286 if (sock_flag(sk
, SOCK_DONE
))
1289 if ((flags
& MSG_DONTWAIT
) || !timeo
) {
1294 add_wait_queue(sk_sleep(sk
), &wait
);
1295 sk_set_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
1296 sk_wait_event(sk
, &timeo
,
1297 ctx
->recv_pkt
!= skb
||
1298 !sk_psock_queue_empty(psock
),
1300 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
1301 remove_wait_queue(sk_sleep(sk
), &wait
);
1303 /* Handle signals */
1304 if (signal_pending(current
)) {
1305 *err
= sock_intr_errno(timeo
);
1313 static int tls_setup_from_iter(struct sock
*sk
, struct iov_iter
*from
,
1314 int length
, int *pages_used
,
1315 unsigned int *size_used
,
1316 struct scatterlist
*to
,
1319 int rc
= 0, i
= 0, num_elem
= *pages_used
, maxpages
;
1320 struct page
*pages
[MAX_SKB_FRAGS
];
1321 unsigned int size
= *size_used
;
1322 ssize_t copied
, use
;
1325 while (length
> 0) {
1327 maxpages
= to_max_pages
- num_elem
;
1328 if (maxpages
== 0) {
1332 copied
= iov_iter_get_pages(from
, pages
,
1340 iov_iter_advance(from
, copied
);
1345 use
= min_t(int, copied
, PAGE_SIZE
- offset
);
1347 sg_set_page(&to
[num_elem
],
1348 pages
[i
], use
, offset
);
1349 sg_unmark_end(&to
[num_elem
]);
1350 /* We do not uncharge memory from this API */
1359 /* Mark the end in the last sg entry if newly added */
1360 if (num_elem
> *pages_used
)
1361 sg_mark_end(&to
[num_elem
- 1]);
1364 iov_iter_revert(from
, size
- *size_used
);
1366 *pages_used
= num_elem
;
1371 /* This function decrypts the input skb into either out_iov or in out_sg
1372 * or in skb buffers itself. The input parameter 'zc' indicates if
1373 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1374 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1375 * NULL, then the decryption happens inside skb buffers itself, i.e.
1376 * zero-copy gets disabled and 'zc' is updated.
1379 static int decrypt_internal(struct sock
*sk
, struct sk_buff
*skb
,
1380 struct iov_iter
*out_iov
,
1381 struct scatterlist
*out_sg
,
1382 int *chunk
, bool *zc
, bool async
)
1384 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1385 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1386 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1387 struct strp_msg
*rxm
= strp_msg(skb
);
1388 int n_sgin
, n_sgout
, nsg
, mem_size
, aead_size
, err
, pages
= 0;
1389 struct aead_request
*aead_req
;
1390 struct sk_buff
*unused
;
1391 u8
*aad
, *iv
, *mem
= NULL
;
1392 struct scatterlist
*sgin
= NULL
;
1393 struct scatterlist
*sgout
= NULL
;
1394 const int data_len
= rxm
->full_len
- prot
->overhead_size
+
1398 if (*zc
&& (out_iov
|| out_sg
)) {
1400 n_sgout
= iov_iter_npages(out_iov
, INT_MAX
) + 1;
1402 n_sgout
= sg_nents(out_sg
);
1403 n_sgin
= skb_nsg(skb
, rxm
->offset
+ prot
->prepend_size
,
1404 rxm
->full_len
- prot
->prepend_size
);
1408 n_sgin
= skb_cow_data(skb
, 0, &unused
);
1414 /* Increment to accommodate AAD */
1415 n_sgin
= n_sgin
+ 1;
1417 nsg
= n_sgin
+ n_sgout
;
1419 aead_size
= sizeof(*aead_req
) + crypto_aead_reqsize(ctx
->aead_recv
);
1420 mem_size
= aead_size
+ (nsg
* sizeof(struct scatterlist
));
1421 mem_size
= mem_size
+ prot
->aad_size
;
1422 mem_size
= mem_size
+ crypto_aead_ivsize(ctx
->aead_recv
);
1424 /* Allocate a single block of memory which contains
1425 * aead_req || sgin[] || sgout[] || aad || iv.
1426 * This order achieves correct alignment for aead_req, sgin, sgout.
1428 mem
= kmalloc(mem_size
, sk
->sk_allocation
);
1432 /* Segment the allocated memory */
1433 aead_req
= (struct aead_request
*)mem
;
1434 sgin
= (struct scatterlist
*)(mem
+ aead_size
);
1435 sgout
= sgin
+ n_sgin
;
1436 aad
= (u8
*)(sgout
+ n_sgout
);
1437 iv
= aad
+ prot
->aad_size
;
1439 /* For CCM based ciphers, first byte of nonce+iv is always '2' */
1440 if (prot
->cipher_type
== TLS_CIPHER_AES_CCM_128
) {
1446 err
= skb_copy_bits(skb
, rxm
->offset
+ TLS_HEADER_SIZE
,
1447 iv
+ iv_offset
+ prot
->salt_size
,
1453 if (prot
->version
== TLS_1_3_VERSION
)
1454 memcpy(iv
+ iv_offset
, tls_ctx
->rx
.iv
,
1455 crypto_aead_ivsize(ctx
->aead_recv
));
1457 memcpy(iv
+ iv_offset
, tls_ctx
->rx
.iv
, prot
->salt_size
);
1459 xor_iv_with_seq(prot
->version
, iv
, tls_ctx
->rx
.rec_seq
);
1462 tls_make_aad(aad
, rxm
->full_len
- prot
->overhead_size
+
1464 tls_ctx
->rx
.rec_seq
, prot
->rec_seq_size
,
1465 ctx
->control
, prot
->version
);
1468 sg_init_table(sgin
, n_sgin
);
1469 sg_set_buf(&sgin
[0], aad
, prot
->aad_size
);
1470 err
= skb_to_sgvec(skb
, &sgin
[1],
1471 rxm
->offset
+ prot
->prepend_size
,
1472 rxm
->full_len
- prot
->prepend_size
);
1480 sg_init_table(sgout
, n_sgout
);
1481 sg_set_buf(&sgout
[0], aad
, prot
->aad_size
);
1484 err
= tls_setup_from_iter(sk
, out_iov
, data_len
,
1485 &pages
, chunk
, &sgout
[1],
1488 goto fallback_to_reg_recv
;
1489 } else if (out_sg
) {
1490 memcpy(sgout
, out_sg
, n_sgout
* sizeof(*sgout
));
1492 goto fallback_to_reg_recv
;
1495 fallback_to_reg_recv
:
1502 /* Prepare and submit AEAD request */
1503 err
= tls_do_decryption(sk
, skb
, sgin
, sgout
, iv
,
1504 data_len
, aead_req
, async
);
1505 if (err
== -EINPROGRESS
)
1508 /* Release the pages in case iov was mapped to pages */
1509 for (; pages
> 0; pages
--)
1510 put_page(sg_page(&sgout
[pages
]));
1516 static int decrypt_skb_update(struct sock
*sk
, struct sk_buff
*skb
,
1517 struct iov_iter
*dest
, int *chunk
, bool *zc
,
1520 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1521 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1522 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1523 struct strp_msg
*rxm
= strp_msg(skb
);
1526 if (!ctx
->decrypted
) {
1527 if (tls_ctx
->rx_conf
== TLS_HW
) {
1528 err
= tls_device_decrypted(sk
, tls_ctx
, skb
, rxm
);
1533 /* Still not decrypted after tls_device */
1534 if (!ctx
->decrypted
) {
1535 err
= decrypt_internal(sk
, skb
, dest
, NULL
, chunk
, zc
,
1538 if (err
== -EINPROGRESS
)
1539 tls_advance_record_sn(sk
, prot
,
1541 else if (err
== -EBADMSG
)
1542 TLS_INC_STATS(sock_net(sk
),
1543 LINUX_MIB_TLSDECRYPTERROR
);
1550 pad
= padding_length(ctx
, prot
, skb
);
1554 rxm
->full_len
-= pad
;
1555 rxm
->offset
+= prot
->prepend_size
;
1556 rxm
->full_len
-= prot
->overhead_size
;
1557 tls_advance_record_sn(sk
, prot
, &tls_ctx
->rx
);
1559 ctx
->saved_data_ready(sk
);
1567 int decrypt_skb(struct sock
*sk
, struct sk_buff
*skb
,
1568 struct scatterlist
*sgout
)
1573 return decrypt_internal(sk
, skb
, NULL
, sgout
, &chunk
, &zc
, false);
1576 static bool tls_sw_advance_skb(struct sock
*sk
, struct sk_buff
*skb
,
1579 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1580 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1583 struct strp_msg
*rxm
= strp_msg(skb
);
1585 if (len
< rxm
->full_len
) {
1587 rxm
->full_len
-= len
;
1593 /* Finished with message */
1594 ctx
->recv_pkt
= NULL
;
1595 __strp_unpause(&ctx
->strp
);
1600 /* This function traverses the rx_list in tls receive context to copies the
1601 * decrypted records into the buffer provided by caller zero copy is not
1602 * true. Further, the records are removed from the rx_list if it is not a peek
1603 * case and the record has been consumed completely.
1605 static int process_rx_list(struct tls_sw_context_rx
*ctx
,
1614 struct sk_buff
*skb
= skb_peek(&ctx
->rx_list
);
1617 struct tls_msg
*tlm
;
1620 /* Set the record type in 'control' if caller didn't pass it */
1623 ctrl
= tlm
->control
;
1626 while (skip
&& skb
) {
1627 struct strp_msg
*rxm
= strp_msg(skb
);
1630 /* Cannot process a record of different type */
1631 if (ctrl
!= tlm
->control
)
1634 if (skip
< rxm
->full_len
)
1637 skip
= skip
- rxm
->full_len
;
1638 skb
= skb_peek_next(skb
, &ctx
->rx_list
);
1641 while (len
&& skb
) {
1642 struct sk_buff
*next_skb
;
1643 struct strp_msg
*rxm
= strp_msg(skb
);
1644 int chunk
= min_t(unsigned int, rxm
->full_len
- skip
, len
);
1648 /* Cannot process a record of different type */
1649 if (ctrl
!= tlm
->control
)
1652 /* Set record type if not already done. For a non-data record,
1653 * do not proceed if record type could not be copied.
1656 int cerr
= put_cmsg(msg
, SOL_TLS
, TLS_GET_RECORD_TYPE
,
1657 sizeof(ctrl
), &ctrl
);
1659 if (ctrl
!= TLS_RECORD_TYPE_DATA
) {
1660 if (cerr
|| msg
->msg_flags
& MSG_CTRUNC
)
1667 if (!zc
|| (rxm
->full_len
- skip
) > len
) {
1668 int err
= skb_copy_datagram_msg(skb
, rxm
->offset
+ skip
,
1675 copied
= copied
+ chunk
;
1677 /* Consume the data from record if it is non-peek case*/
1679 rxm
->offset
= rxm
->offset
+ chunk
;
1680 rxm
->full_len
= rxm
->full_len
- chunk
;
1682 /* Return if there is unconsumed data in the record */
1683 if (rxm
->full_len
- skip
)
1687 /* The remaining skip-bytes must lie in 1st record in rx_list.
1688 * So from the 2nd record, 'skip' should be 0.
1693 msg
->msg_flags
|= MSG_EOR
;
1695 next_skb
= skb_peek_next(skb
, &ctx
->rx_list
);
1698 skb_unlink(skb
, &ctx
->rx_list
);
1709 int tls_sw_recvmsg(struct sock
*sk
,
1716 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1717 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1718 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1719 struct sk_psock
*psock
;
1720 unsigned char control
= 0;
1721 ssize_t decrypted
= 0;
1722 struct strp_msg
*rxm
;
1723 struct tls_msg
*tlm
;
1724 struct sk_buff
*skb
;
1727 int target
, err
= 0;
1729 bool is_kvec
= iov_iter_is_kvec(&msg
->msg_iter
);
1730 bool is_peek
= flags
& MSG_PEEK
;
1735 if (unlikely(flags
& MSG_ERRQUEUE
))
1736 return sock_recv_errqueue(sk
, msg
, len
, SOL_IP
, IP_RECVERR
);
1738 psock
= sk_psock_get(sk
);
1741 /* Process pending decrypted records. It must be non-zero-copy */
1742 err
= process_rx_list(ctx
, msg
, &control
, &cmsg
, 0, len
, false,
1745 tls_err_abort(sk
, err
);
1754 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
1756 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1758 while (len
&& (decrypted
+ copied
< target
|| ctx
->recv_pkt
)) {
1759 bool retain_skb
= false;
1766 skb
= tls_wait_data(sk
, psock
, flags
, timeo
, &err
);
1769 int ret
= __tcp_bpf_recvmsg(sk
, psock
,
1781 if (prot
->version
== TLS_1_3_VERSION
)
1784 tlm
->control
= ctx
->control
;
1787 rxm
= strp_msg(skb
);
1789 to_decrypt
= rxm
->full_len
- prot
->overhead_size
;
1791 if (to_decrypt
<= len
&& !is_kvec
&& !is_peek
&&
1792 ctx
->control
== TLS_RECORD_TYPE_DATA
&&
1793 prot
->version
!= TLS_1_3_VERSION
)
1796 /* Do not use async mode if record is non-data */
1797 if (ctx
->control
== TLS_RECORD_TYPE_DATA
)
1798 async_capable
= ctx
->async_capable
;
1800 async_capable
= false;
1802 err
= decrypt_skb_update(sk
, skb
, &msg
->msg_iter
,
1803 &chunk
, &zc
, async_capable
);
1804 if (err
< 0 && err
!= -EINPROGRESS
) {
1805 tls_err_abort(sk
, EBADMSG
);
1809 if (err
== -EINPROGRESS
) {
1812 } else if (prot
->version
== TLS_1_3_VERSION
) {
1813 tlm
->control
= ctx
->control
;
1816 /* If the type of records being processed is not known yet,
1817 * set it to record type just dequeued. If it is already known,
1818 * but does not match the record type just dequeued, go to end.
1819 * We always get record type here since for tls1.2, record type
1820 * is known just after record is dequeued from stream parser.
1821 * For tls1.3, we disable async.
1825 control
= tlm
->control
;
1826 else if (control
!= tlm
->control
)
1832 cerr
= put_cmsg(msg
, SOL_TLS
, TLS_GET_RECORD_TYPE
,
1833 sizeof(control
), &control
);
1835 if (control
!= TLS_RECORD_TYPE_DATA
) {
1836 if (cerr
|| msg
->msg_flags
& MSG_CTRUNC
) {
1844 goto pick_next_record
;
1847 if (rxm
->full_len
> len
) {
1851 chunk
= rxm
->full_len
;
1854 err
= skb_copy_datagram_msg(skb
, rxm
->offset
,
1860 rxm
->offset
= rxm
->offset
+ chunk
;
1861 rxm
->full_len
= rxm
->full_len
- chunk
;
1872 /* For async or peek case, queue the current skb */
1873 if (async
|| is_peek
|| retain_skb
) {
1874 skb_queue_tail(&ctx
->rx_list
, skb
);
1878 if (tls_sw_advance_skb(sk
, skb
, chunk
)) {
1879 /* Return full control message to
1880 * userspace before trying to parse
1881 * another message type
1883 msg
->msg_flags
|= MSG_EOR
;
1884 if (ctx
->control
!= TLS_RECORD_TYPE_DATA
)
1893 /* Wait for all previously submitted records to be decrypted */
1894 smp_store_mb(ctx
->async_notify
, true);
1895 if (atomic_read(&ctx
->decrypt_pending
)) {
1896 err
= crypto_wait_req(-EINPROGRESS
, &ctx
->async_wait
);
1898 /* one of async decrypt failed */
1899 tls_err_abort(sk
, err
);
1905 reinit_completion(&ctx
->async_wait
.completion
);
1907 WRITE_ONCE(ctx
->async_notify
, false);
1909 /* Drain records from the rx_list & copy if required */
1910 if (is_peek
|| is_kvec
)
1911 err
= process_rx_list(ctx
, msg
, &control
, &cmsg
, copied
,
1912 decrypted
, false, is_peek
);
1914 err
= process_rx_list(ctx
, msg
, &control
, &cmsg
, 0,
1915 decrypted
, true, is_peek
);
1917 tls_err_abort(sk
, err
);
1923 copied
+= decrypted
;
1928 sk_psock_put(sk
, psock
);
1929 return copied
? : err
;
1932 ssize_t
tls_sw_splice_read(struct socket
*sock
, loff_t
*ppos
,
1933 struct pipe_inode_info
*pipe
,
1934 size_t len
, unsigned int flags
)
1936 struct tls_context
*tls_ctx
= tls_get_ctx(sock
->sk
);
1937 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1938 struct strp_msg
*rxm
= NULL
;
1939 struct sock
*sk
= sock
->sk
;
1940 struct sk_buff
*skb
;
1949 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1951 skb
= tls_wait_data(sk
, NULL
, flags
, timeo
, &err
);
1953 goto splice_read_end
;
1955 if (!ctx
->decrypted
) {
1956 err
= decrypt_skb_update(sk
, skb
, NULL
, &chunk
, &zc
, false);
1958 /* splice does not support reading control messages */
1959 if (ctx
->control
!= TLS_RECORD_TYPE_DATA
) {
1961 goto splice_read_end
;
1965 tls_err_abort(sk
, EBADMSG
);
1966 goto splice_read_end
;
1970 rxm
= strp_msg(skb
);
1972 chunk
= min_t(unsigned int, rxm
->full_len
, len
);
1973 copied
= skb_splice_bits(skb
, sk
, rxm
->offset
, pipe
, chunk
, flags
);
1975 goto splice_read_end
;
1977 if (likely(!(flags
& MSG_PEEK
)))
1978 tls_sw_advance_skb(sk
, skb
, copied
);
1982 return copied
? : err
;
1985 bool tls_sw_stream_read(const struct sock
*sk
)
1987 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1988 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1989 bool ingress_empty
= true;
1990 struct sk_psock
*psock
;
1993 psock
= sk_psock(sk
);
1995 ingress_empty
= list_empty(&psock
->ingress_msg
);
1998 return !ingress_empty
|| ctx
->recv_pkt
||
1999 !skb_queue_empty(&ctx
->rx_list
);
2002 static int tls_read_size(struct strparser
*strp
, struct sk_buff
*skb
)
2004 struct tls_context
*tls_ctx
= tls_get_ctx(strp
->sk
);
2005 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2006 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
2007 char header
[TLS_HEADER_SIZE
+ MAX_IV_SIZE
];
2008 struct strp_msg
*rxm
= strp_msg(skb
);
2009 size_t cipher_overhead
;
2010 size_t data_len
= 0;
2013 /* Verify that we have a full TLS header, or wait for more data */
2014 if (rxm
->offset
+ prot
->prepend_size
> skb
->len
)
2017 /* Sanity-check size of on-stack buffer. */
2018 if (WARN_ON(prot
->prepend_size
> sizeof(header
))) {
2023 /* Linearize header to local buffer */
2024 ret
= skb_copy_bits(skb
, rxm
->offset
, header
, prot
->prepend_size
);
2029 ctx
->control
= header
[0];
2031 data_len
= ((header
[4] & 0xFF) | (header
[3] << 8));
2033 cipher_overhead
= prot
->tag_size
;
2034 if (prot
->version
!= TLS_1_3_VERSION
)
2035 cipher_overhead
+= prot
->iv_size
;
2037 if (data_len
> TLS_MAX_PAYLOAD_SIZE
+ cipher_overhead
+
2042 if (data_len
< cipher_overhead
) {
2047 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2048 if (header
[1] != TLS_1_2_VERSION_MINOR
||
2049 header
[2] != TLS_1_2_VERSION_MAJOR
) {
2054 tls_device_rx_resync_new_rec(strp
->sk
, data_len
+ TLS_HEADER_SIZE
,
2055 TCP_SKB_CB(skb
)->seq
+ rxm
->offset
);
2056 return data_len
+ TLS_HEADER_SIZE
;
2059 tls_err_abort(strp
->sk
, ret
);
2064 static void tls_queue(struct strparser
*strp
, struct sk_buff
*skb
)
2066 struct tls_context
*tls_ctx
= tls_get_ctx(strp
->sk
);
2067 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2071 ctx
->recv_pkt
= skb
;
2074 ctx
->saved_data_ready(strp
->sk
);
2077 static void tls_data_ready(struct sock
*sk
)
2079 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2080 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2081 struct sk_psock
*psock
;
2083 strp_data_ready(&ctx
->strp
);
2085 psock
= sk_psock_get(sk
);
2087 if (!list_empty(&psock
->ingress_msg
))
2088 ctx
->saved_data_ready(sk
);
2089 sk_psock_put(sk
, psock
);
2093 void tls_sw_cancel_work_tx(struct tls_context
*tls_ctx
)
2095 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
2097 set_bit(BIT_TX_CLOSING
, &ctx
->tx_bitmask
);
2098 set_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
);
2099 cancel_delayed_work_sync(&ctx
->tx_work
.work
);
2102 void tls_sw_release_resources_tx(struct sock
*sk
)
2104 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2105 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
2106 struct tls_rec
*rec
, *tmp
;
2108 /* Wait for any pending async encryptions to complete */
2109 smp_store_mb(ctx
->async_notify
, true);
2110 if (atomic_read(&ctx
->encrypt_pending
))
2111 crypto_wait_req(-EINPROGRESS
, &ctx
->async_wait
);
2113 tls_tx_records(sk
, -1);
2115 /* Free up un-sent records in tx_list. First, free
2116 * the partially sent record if any at head of tx_list.
2118 if (tls_ctx
->partially_sent_record
) {
2119 tls_free_partial_record(sk
, tls_ctx
);
2120 rec
= list_first_entry(&ctx
->tx_list
,
2121 struct tls_rec
, list
);
2122 list_del(&rec
->list
);
2123 sk_msg_free(sk
, &rec
->msg_plaintext
);
2127 list_for_each_entry_safe(rec
, tmp
, &ctx
->tx_list
, list
) {
2128 list_del(&rec
->list
);
2129 sk_msg_free(sk
, &rec
->msg_encrypted
);
2130 sk_msg_free(sk
, &rec
->msg_plaintext
);
2134 crypto_free_aead(ctx
->aead_send
);
2135 tls_free_open_rec(sk
);
2138 void tls_sw_free_ctx_tx(struct tls_context
*tls_ctx
)
2140 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
2145 void tls_sw_release_resources_rx(struct sock
*sk
)
2147 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2148 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2150 kfree(tls_ctx
->rx
.rec_seq
);
2151 kfree(tls_ctx
->rx
.iv
);
2153 if (ctx
->aead_recv
) {
2154 kfree_skb(ctx
->recv_pkt
);
2155 ctx
->recv_pkt
= NULL
;
2156 skb_queue_purge(&ctx
->rx_list
);
2157 crypto_free_aead(ctx
->aead_recv
);
2158 strp_stop(&ctx
->strp
);
2159 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2160 * we still want to strp_stop(), but sk->sk_data_ready was
2163 if (ctx
->saved_data_ready
) {
2164 write_lock_bh(&sk
->sk_callback_lock
);
2165 sk
->sk_data_ready
= ctx
->saved_data_ready
;
2166 write_unlock_bh(&sk
->sk_callback_lock
);
2171 void tls_sw_strparser_done(struct tls_context
*tls_ctx
)
2173 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2175 strp_done(&ctx
->strp
);
2178 void tls_sw_free_ctx_rx(struct tls_context
*tls_ctx
)
2180 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2185 void tls_sw_free_resources_rx(struct sock
*sk
)
2187 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2189 tls_sw_release_resources_rx(sk
);
2190 tls_sw_free_ctx_rx(tls_ctx
);
2193 /* The work handler to transmitt the encrypted records in tx_list */
2194 static void tx_work_handler(struct work_struct
*work
)
2196 struct delayed_work
*delayed_work
= to_delayed_work(work
);
2197 struct tx_work
*tx_work
= container_of(delayed_work
,
2198 struct tx_work
, work
);
2199 struct sock
*sk
= tx_work
->sk
;
2200 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2201 struct tls_sw_context_tx
*ctx
;
2203 if (unlikely(!tls_ctx
))
2206 ctx
= tls_sw_ctx_tx(tls_ctx
);
2207 if (test_bit(BIT_TX_CLOSING
, &ctx
->tx_bitmask
))
2210 if (!test_and_clear_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
))
2212 mutex_lock(&tls_ctx
->tx_lock
);
2214 tls_tx_records(sk
, -1);
2216 mutex_unlock(&tls_ctx
->tx_lock
);
2219 void tls_sw_write_space(struct sock
*sk
, struct tls_context
*ctx
)
2221 struct tls_sw_context_tx
*tx_ctx
= tls_sw_ctx_tx(ctx
);
2223 /* Schedule the transmission if tx list is ready */
2224 if (is_tx_ready(tx_ctx
) &&
2225 !test_and_set_bit(BIT_TX_SCHEDULED
, &tx_ctx
->tx_bitmask
))
2226 schedule_delayed_work(&tx_ctx
->tx_work
.work
, 0);
2229 void tls_sw_strparser_arm(struct sock
*sk
, struct tls_context
*tls_ctx
)
2231 struct tls_sw_context_rx
*rx_ctx
= tls_sw_ctx_rx(tls_ctx
);
2233 write_lock_bh(&sk
->sk_callback_lock
);
2234 rx_ctx
->saved_data_ready
= sk
->sk_data_ready
;
2235 sk
->sk_data_ready
= tls_data_ready
;
2236 write_unlock_bh(&sk
->sk_callback_lock
);
2238 strp_check_rcv(&rx_ctx
->strp
);
2241 int tls_set_sw_offload(struct sock
*sk
, struct tls_context
*ctx
, int tx
)
2243 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2244 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
2245 struct tls_crypto_info
*crypto_info
;
2246 struct tls12_crypto_info_aes_gcm_128
*gcm_128_info
;
2247 struct tls12_crypto_info_aes_gcm_256
*gcm_256_info
;
2248 struct tls12_crypto_info_aes_ccm_128
*ccm_128_info
;
2249 struct tls_sw_context_tx
*sw_ctx_tx
= NULL
;
2250 struct tls_sw_context_rx
*sw_ctx_rx
= NULL
;
2251 struct cipher_context
*cctx
;
2252 struct crypto_aead
**aead
;
2253 struct strp_callbacks cb
;
2254 u16 nonce_size
, tag_size
, iv_size
, rec_seq_size
, salt_size
;
2255 struct crypto_tfm
*tfm
;
2256 char *iv
, *rec_seq
, *key
, *salt
, *cipher_name
;
2266 if (!ctx
->priv_ctx_tx
) {
2267 sw_ctx_tx
= kzalloc(sizeof(*sw_ctx_tx
), GFP_KERNEL
);
2272 ctx
->priv_ctx_tx
= sw_ctx_tx
;
2275 (struct tls_sw_context_tx
*)ctx
->priv_ctx_tx
;
2278 if (!ctx
->priv_ctx_rx
) {
2279 sw_ctx_rx
= kzalloc(sizeof(*sw_ctx_rx
), GFP_KERNEL
);
2284 ctx
->priv_ctx_rx
= sw_ctx_rx
;
2287 (struct tls_sw_context_rx
*)ctx
->priv_ctx_rx
;
2292 crypto_init_wait(&sw_ctx_tx
->async_wait
);
2293 crypto_info
= &ctx
->crypto_send
.info
;
2295 aead
= &sw_ctx_tx
->aead_send
;
2296 INIT_LIST_HEAD(&sw_ctx_tx
->tx_list
);
2297 INIT_DELAYED_WORK(&sw_ctx_tx
->tx_work
.work
, tx_work_handler
);
2298 sw_ctx_tx
->tx_work
.sk
= sk
;
2300 crypto_init_wait(&sw_ctx_rx
->async_wait
);
2301 crypto_info
= &ctx
->crypto_recv
.info
;
2303 skb_queue_head_init(&sw_ctx_rx
->rx_list
);
2304 aead
= &sw_ctx_rx
->aead_recv
;
2307 switch (crypto_info
->cipher_type
) {
2308 case TLS_CIPHER_AES_GCM_128
: {
2309 nonce_size
= TLS_CIPHER_AES_GCM_128_IV_SIZE
;
2310 tag_size
= TLS_CIPHER_AES_GCM_128_TAG_SIZE
;
2311 iv_size
= TLS_CIPHER_AES_GCM_128_IV_SIZE
;
2312 iv
= ((struct tls12_crypto_info_aes_gcm_128
*)crypto_info
)->iv
;
2313 rec_seq_size
= TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE
;
2315 ((struct tls12_crypto_info_aes_gcm_128
*)crypto_info
)->rec_seq
;
2317 (struct tls12_crypto_info_aes_gcm_128
*)crypto_info
;
2318 keysize
= TLS_CIPHER_AES_GCM_128_KEY_SIZE
;
2319 key
= gcm_128_info
->key
;
2320 salt
= gcm_128_info
->salt
;
2321 salt_size
= TLS_CIPHER_AES_GCM_128_SALT_SIZE
;
2322 cipher_name
= "gcm(aes)";
2325 case TLS_CIPHER_AES_GCM_256
: {
2326 nonce_size
= TLS_CIPHER_AES_GCM_256_IV_SIZE
;
2327 tag_size
= TLS_CIPHER_AES_GCM_256_TAG_SIZE
;
2328 iv_size
= TLS_CIPHER_AES_GCM_256_IV_SIZE
;
2329 iv
= ((struct tls12_crypto_info_aes_gcm_256
*)crypto_info
)->iv
;
2330 rec_seq_size
= TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE
;
2332 ((struct tls12_crypto_info_aes_gcm_256
*)crypto_info
)->rec_seq
;
2334 (struct tls12_crypto_info_aes_gcm_256
*)crypto_info
;
2335 keysize
= TLS_CIPHER_AES_GCM_256_KEY_SIZE
;
2336 key
= gcm_256_info
->key
;
2337 salt
= gcm_256_info
->salt
;
2338 salt_size
= TLS_CIPHER_AES_GCM_256_SALT_SIZE
;
2339 cipher_name
= "gcm(aes)";
2342 case TLS_CIPHER_AES_CCM_128
: {
2343 nonce_size
= TLS_CIPHER_AES_CCM_128_IV_SIZE
;
2344 tag_size
= TLS_CIPHER_AES_CCM_128_TAG_SIZE
;
2345 iv_size
= TLS_CIPHER_AES_CCM_128_IV_SIZE
;
2346 iv
= ((struct tls12_crypto_info_aes_ccm_128
*)crypto_info
)->iv
;
2347 rec_seq_size
= TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE
;
2349 ((struct tls12_crypto_info_aes_ccm_128
*)crypto_info
)->rec_seq
;
2351 (struct tls12_crypto_info_aes_ccm_128
*)crypto_info
;
2352 keysize
= TLS_CIPHER_AES_CCM_128_KEY_SIZE
;
2353 key
= ccm_128_info
->key
;
2354 salt
= ccm_128_info
->salt
;
2355 salt_size
= TLS_CIPHER_AES_CCM_128_SALT_SIZE
;
2356 cipher_name
= "ccm(aes)";
2364 /* Sanity-check the sizes for stack allocations. */
2365 if (iv_size
> MAX_IV_SIZE
|| nonce_size
> MAX_IV_SIZE
||
2366 rec_seq_size
> TLS_MAX_REC_SEQ_SIZE
) {
2371 if (crypto_info
->version
== TLS_1_3_VERSION
) {
2373 prot
->aad_size
= TLS_HEADER_SIZE
;
2374 prot
->tail_size
= 1;
2376 prot
->aad_size
= TLS_AAD_SPACE_SIZE
;
2377 prot
->tail_size
= 0;
2380 prot
->version
= crypto_info
->version
;
2381 prot
->cipher_type
= crypto_info
->cipher_type
;
2382 prot
->prepend_size
= TLS_HEADER_SIZE
+ nonce_size
;
2383 prot
->tag_size
= tag_size
;
2384 prot
->overhead_size
= prot
->prepend_size
+
2385 prot
->tag_size
+ prot
->tail_size
;
2386 prot
->iv_size
= iv_size
;
2387 prot
->salt_size
= salt_size
;
2388 cctx
->iv
= kmalloc(iv_size
+ salt_size
, GFP_KERNEL
);
2393 /* Note: 128 & 256 bit salt are the same size */
2394 prot
->rec_seq_size
= rec_seq_size
;
2395 memcpy(cctx
->iv
, salt
, salt_size
);
2396 memcpy(cctx
->iv
+ salt_size
, iv
, iv_size
);
2397 cctx
->rec_seq
= kmemdup(rec_seq
, rec_seq_size
, GFP_KERNEL
);
2398 if (!cctx
->rec_seq
) {
2404 *aead
= crypto_alloc_aead(cipher_name
, 0, 0);
2405 if (IS_ERR(*aead
)) {
2406 rc
= PTR_ERR(*aead
);
2412 ctx
->push_pending_record
= tls_sw_push_pending_record
;
2414 rc
= crypto_aead_setkey(*aead
, key
, keysize
);
2419 rc
= crypto_aead_setauthsize(*aead
, prot
->tag_size
);
2424 tfm
= crypto_aead_tfm(sw_ctx_rx
->aead_recv
);
2426 if (crypto_info
->version
== TLS_1_3_VERSION
)
2427 sw_ctx_rx
->async_capable
= 0;
2429 sw_ctx_rx
->async_capable
=
2430 !!(tfm
->__crt_alg
->cra_flags
&
2433 /* Set up strparser */
2434 memset(&cb
, 0, sizeof(cb
));
2435 cb
.rcv_msg
= tls_queue
;
2436 cb
.parse_msg
= tls_read_size
;
2438 strp_init(&sw_ctx_rx
->strp
, sk
, &cb
);
2444 crypto_free_aead(*aead
);
2447 kfree(cctx
->rec_seq
);
2448 cctx
->rec_seq
= NULL
;
2454 kfree(ctx
->priv_ctx_tx
);
2455 ctx
->priv_ctx_tx
= NULL
;
2457 kfree(ctx
->priv_ctx_rx
);
2458 ctx
->priv_ctx_rx
= NULL
;