2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/bug.h>
39 #include <linux/sched/signal.h>
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/splice.h>
43 #include <crypto/aead.h>
45 #include <net/strparser.h>
47 #include <trace/events/sock.h>
51 struct tls_decrypt_arg
{
61 struct tls_decrypt_ctx
{
64 u8 aad
[TLS_MAX_AAD_SIZE
];
66 struct scatterlist sg
[];
69 noinline
void tls_err_abort(struct sock
*sk
, int err
)
71 WARN_ON_ONCE(err
>= 0);
72 /* sk->sk_err should contain a positive error code. */
73 WRITE_ONCE(sk
->sk_err
, -err
);
74 /* Paired with smp_rmb() in tcp_poll() */
79 static int __skb_nsg(struct sk_buff
*skb
, int offset
, int len
,
80 unsigned int recursion_level
)
82 int start
= skb_headlen(skb
);
83 int i
, chunk
= start
- offset
;
84 struct sk_buff
*frag_iter
;
87 if (unlikely(recursion_level
>= 24))
100 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
103 WARN_ON(start
> offset
+ len
);
105 end
= start
+ skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
106 chunk
= end
- offset
;
119 if (unlikely(skb_has_frag_list(skb
))) {
120 skb_walk_frags(skb
, frag_iter
) {
123 WARN_ON(start
> offset
+ len
);
125 end
= start
+ frag_iter
->len
;
126 chunk
= end
- offset
;
130 ret
= __skb_nsg(frag_iter
, offset
- start
, chunk
,
131 recursion_level
+ 1);
132 if (unlikely(ret
< 0))
147 /* Return the number of scatterlist elements required to completely map the
148 * skb, or -EMSGSIZE if the recursion depth is exceeded.
150 static int skb_nsg(struct sk_buff
*skb
, int offset
, int len
)
152 return __skb_nsg(skb
, offset
, len
, 0);
155 static int tls_padding_length(struct tls_prot_info
*prot
, struct sk_buff
*skb
,
156 struct tls_decrypt_arg
*darg
)
158 struct strp_msg
*rxm
= strp_msg(skb
);
159 struct tls_msg
*tlm
= tls_msg(skb
);
162 /* Determine zero-padding length */
163 if (prot
->version
== TLS_1_3_VERSION
) {
164 int offset
= rxm
->full_len
- TLS_TAG_SIZE
- 1;
165 char content_type
= darg
->zc
? darg
->tail
: 0;
168 while (content_type
== 0) {
169 if (offset
< prot
->prepend_size
)
171 err
= skb_copy_bits(skb
, rxm
->offset
+ offset
,
180 tlm
->control
= content_type
;
185 static void tls_decrypt_done(void *data
, int err
)
187 struct aead_request
*aead_req
= data
;
188 struct crypto_aead
*aead
= crypto_aead_reqtfm(aead_req
);
189 struct scatterlist
*sgout
= aead_req
->dst
;
190 struct scatterlist
*sgin
= aead_req
->src
;
191 struct tls_sw_context_rx
*ctx
;
192 struct tls_decrypt_ctx
*dctx
;
193 struct tls_context
*tls_ctx
;
194 struct scatterlist
*sg
;
199 aead_size
= sizeof(*aead_req
) + crypto_aead_reqsize(aead
);
200 aead_size
= ALIGN(aead_size
, __alignof__(*dctx
));
201 dctx
= (void *)((u8
*)aead_req
+ aead_size
);
204 tls_ctx
= tls_get_ctx(sk
);
205 ctx
= tls_sw_ctx_rx(tls_ctx
);
207 /* Propagate if there was an err */
210 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSDECRYPTERROR
);
211 ctx
->async_wait
.err
= err
;
212 tls_err_abort(sk
, err
);
215 /* Free the destination pages if skb was not decrypted inplace */
217 /* Skip the first S/G entry as it points to AAD */
218 for_each_sg(sg_next(sgout
), sg
, UINT_MAX
, pages
) {
221 put_page(sg_page(sg
));
227 spin_lock_bh(&ctx
->decrypt_compl_lock
);
228 if (!atomic_dec_return(&ctx
->decrypt_pending
))
229 complete(&ctx
->async_wait
.completion
);
230 spin_unlock_bh(&ctx
->decrypt_compl_lock
);
233 static int tls_do_decryption(struct sock
*sk
,
234 struct scatterlist
*sgin
,
235 struct scatterlist
*sgout
,
238 struct aead_request
*aead_req
,
239 struct tls_decrypt_arg
*darg
)
241 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
242 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
243 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
246 aead_request_set_tfm(aead_req
, ctx
->aead_recv
);
247 aead_request_set_ad(aead_req
, prot
->aad_size
);
248 aead_request_set_crypt(aead_req
, sgin
, sgout
,
249 data_len
+ prot
->tag_size
,
253 aead_request_set_callback(aead_req
,
254 CRYPTO_TFM_REQ_MAY_BACKLOG
,
255 tls_decrypt_done
, aead_req
);
256 atomic_inc(&ctx
->decrypt_pending
);
258 aead_request_set_callback(aead_req
,
259 CRYPTO_TFM_REQ_MAY_BACKLOG
,
260 crypto_req_done
, &ctx
->async_wait
);
263 ret
= crypto_aead_decrypt(aead_req
);
264 if (ret
== -EINPROGRESS
) {
268 ret
= crypto_wait_req(ret
, &ctx
->async_wait
);
275 static void tls_trim_both_msgs(struct sock
*sk
, int target_size
)
277 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
278 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
279 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
280 struct tls_rec
*rec
= ctx
->open_rec
;
282 sk_msg_trim(sk
, &rec
->msg_plaintext
, target_size
);
284 target_size
+= prot
->overhead_size
;
285 sk_msg_trim(sk
, &rec
->msg_encrypted
, target_size
);
288 static int tls_alloc_encrypted_msg(struct sock
*sk
, int len
)
290 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
291 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
292 struct tls_rec
*rec
= ctx
->open_rec
;
293 struct sk_msg
*msg_en
= &rec
->msg_encrypted
;
295 return sk_msg_alloc(sk
, msg_en
, len
, 0);
298 static int tls_clone_plaintext_msg(struct sock
*sk
, int required
)
300 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
301 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
302 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
303 struct tls_rec
*rec
= ctx
->open_rec
;
304 struct sk_msg
*msg_pl
= &rec
->msg_plaintext
;
305 struct sk_msg
*msg_en
= &rec
->msg_encrypted
;
308 /* We add page references worth len bytes from encrypted sg
309 * at the end of plaintext sg. It is guaranteed that msg_en
310 * has enough required room (ensured by caller).
312 len
= required
- msg_pl
->sg
.size
;
314 /* Skip initial bytes in msg_en's data to be able to use
315 * same offset of both plain and encrypted data.
317 skip
= prot
->prepend_size
+ msg_pl
->sg
.size
;
319 return sk_msg_clone(sk
, msg_pl
, msg_en
, skip
, len
);
322 static struct tls_rec
*tls_get_rec(struct sock
*sk
)
324 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
325 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
326 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
327 struct sk_msg
*msg_pl
, *msg_en
;
331 mem_size
= sizeof(struct tls_rec
) + crypto_aead_reqsize(ctx
->aead_send
);
333 rec
= kzalloc(mem_size
, sk
->sk_allocation
);
337 msg_pl
= &rec
->msg_plaintext
;
338 msg_en
= &rec
->msg_encrypted
;
343 sg_init_table(rec
->sg_aead_in
, 2);
344 sg_set_buf(&rec
->sg_aead_in
[0], rec
->aad_space
, prot
->aad_size
);
345 sg_unmark_end(&rec
->sg_aead_in
[1]);
347 sg_init_table(rec
->sg_aead_out
, 2);
348 sg_set_buf(&rec
->sg_aead_out
[0], rec
->aad_space
, prot
->aad_size
);
349 sg_unmark_end(&rec
->sg_aead_out
[1]);
356 static void tls_free_rec(struct sock
*sk
, struct tls_rec
*rec
)
358 sk_msg_free(sk
, &rec
->msg_encrypted
);
359 sk_msg_free(sk
, &rec
->msg_plaintext
);
363 static void tls_free_open_rec(struct sock
*sk
)
365 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
366 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
367 struct tls_rec
*rec
= ctx
->open_rec
;
370 tls_free_rec(sk
, rec
);
371 ctx
->open_rec
= NULL
;
375 int tls_tx_records(struct sock
*sk
, int flags
)
377 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
378 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
379 struct tls_rec
*rec
, *tmp
;
380 struct sk_msg
*msg_en
;
381 int tx_flags
, rc
= 0;
383 if (tls_is_partially_sent_record(tls_ctx
)) {
384 rec
= list_first_entry(&ctx
->tx_list
,
385 struct tls_rec
, list
);
388 tx_flags
= rec
->tx_flags
;
392 rc
= tls_push_partial_record(sk
, tls_ctx
, tx_flags
);
396 /* Full record has been transmitted.
397 * Remove the head of tx_list
399 list_del(&rec
->list
);
400 sk_msg_free(sk
, &rec
->msg_plaintext
);
404 /* Tx all ready records */
405 list_for_each_entry_safe(rec
, tmp
, &ctx
->tx_list
, list
) {
406 if (READ_ONCE(rec
->tx_ready
)) {
408 tx_flags
= rec
->tx_flags
;
412 msg_en
= &rec
->msg_encrypted
;
413 rc
= tls_push_sg(sk
, tls_ctx
,
414 &msg_en
->sg
.data
[msg_en
->sg
.curr
],
419 list_del(&rec
->list
);
420 sk_msg_free(sk
, &rec
->msg_plaintext
);
428 if (rc
< 0 && rc
!= -EAGAIN
)
429 tls_err_abort(sk
, -EBADMSG
);
434 static void tls_encrypt_done(void *data
, int err
)
436 struct tls_sw_context_tx
*ctx
;
437 struct tls_context
*tls_ctx
;
438 struct tls_prot_info
*prot
;
439 struct tls_rec
*rec
= data
;
440 struct scatterlist
*sge
;
441 struct sk_msg
*msg_en
;
446 msg_en
= &rec
->msg_encrypted
;
449 tls_ctx
= tls_get_ctx(sk
);
450 prot
= &tls_ctx
->prot_info
;
451 ctx
= tls_sw_ctx_tx(tls_ctx
);
453 sge
= sk_msg_elem(msg_en
, msg_en
->sg
.curr
);
454 sge
->offset
-= prot
->prepend_size
;
455 sge
->length
+= prot
->prepend_size
;
457 /* Check if error is previously set on socket */
458 if (err
|| sk
->sk_err
) {
461 /* If err is already set on socket, return the same code */
463 ctx
->async_wait
.err
= -sk
->sk_err
;
465 ctx
->async_wait
.err
= err
;
466 tls_err_abort(sk
, err
);
471 struct tls_rec
*first_rec
;
473 /* Mark the record as ready for transmission */
474 smp_store_mb(rec
->tx_ready
, true);
476 /* If received record is at head of tx_list, schedule tx */
477 first_rec
= list_first_entry(&ctx
->tx_list
,
478 struct tls_rec
, list
);
479 if (rec
== first_rec
)
483 spin_lock_bh(&ctx
->encrypt_compl_lock
);
484 pending
= atomic_dec_return(&ctx
->encrypt_pending
);
486 if (!pending
&& ctx
->async_notify
)
487 complete(&ctx
->async_wait
.completion
);
488 spin_unlock_bh(&ctx
->encrypt_compl_lock
);
493 /* Schedule the transmission */
494 if (!test_and_set_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
))
495 schedule_delayed_work(&ctx
->tx_work
.work
, 1);
498 static int tls_do_encryption(struct sock
*sk
,
499 struct tls_context
*tls_ctx
,
500 struct tls_sw_context_tx
*ctx
,
501 struct aead_request
*aead_req
,
502 size_t data_len
, u32 start
)
504 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
505 struct tls_rec
*rec
= ctx
->open_rec
;
506 struct sk_msg
*msg_en
= &rec
->msg_encrypted
;
507 struct scatterlist
*sge
= sk_msg_elem(msg_en
, start
);
508 int rc
, iv_offset
= 0;
510 /* For CCM based ciphers, first byte of IV is a constant */
511 switch (prot
->cipher_type
) {
512 case TLS_CIPHER_AES_CCM_128
:
513 rec
->iv_data
[0] = TLS_AES_CCM_IV_B0_BYTE
;
516 case TLS_CIPHER_SM4_CCM
:
517 rec
->iv_data
[0] = TLS_SM4_CCM_IV_B0_BYTE
;
522 memcpy(&rec
->iv_data
[iv_offset
], tls_ctx
->tx
.iv
,
523 prot
->iv_size
+ prot
->salt_size
);
525 tls_xor_iv_with_seq(prot
, rec
->iv_data
+ iv_offset
,
526 tls_ctx
->tx
.rec_seq
);
528 sge
->offset
+= prot
->prepend_size
;
529 sge
->length
-= prot
->prepend_size
;
531 msg_en
->sg
.curr
= start
;
533 aead_request_set_tfm(aead_req
, ctx
->aead_send
);
534 aead_request_set_ad(aead_req
, prot
->aad_size
);
535 aead_request_set_crypt(aead_req
, rec
->sg_aead_in
,
537 data_len
, rec
->iv_data
);
539 aead_request_set_callback(aead_req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
540 tls_encrypt_done
, rec
);
542 /* Add the record in tx_list */
543 list_add_tail((struct list_head
*)&rec
->list
, &ctx
->tx_list
);
544 atomic_inc(&ctx
->encrypt_pending
);
546 rc
= crypto_aead_encrypt(aead_req
);
547 if (!rc
|| rc
!= -EINPROGRESS
) {
548 atomic_dec(&ctx
->encrypt_pending
);
549 sge
->offset
-= prot
->prepend_size
;
550 sge
->length
+= prot
->prepend_size
;
554 WRITE_ONCE(rec
->tx_ready
, true);
555 } else if (rc
!= -EINPROGRESS
) {
556 list_del(&rec
->list
);
560 /* Unhook the record from context if encryption is not failure */
561 ctx
->open_rec
= NULL
;
562 tls_advance_record_sn(sk
, prot
, &tls_ctx
->tx
);
566 static int tls_split_open_record(struct sock
*sk
, struct tls_rec
*from
,
567 struct tls_rec
**to
, struct sk_msg
*msg_opl
,
568 struct sk_msg
*msg_oen
, u32 split_point
,
569 u32 tx_overhead_size
, u32
*orig_end
)
571 u32 i
, j
, bytes
= 0, apply
= msg_opl
->apply_bytes
;
572 struct scatterlist
*sge
, *osge
, *nsge
;
573 u32 orig_size
= msg_opl
->sg
.size
;
574 struct scatterlist tmp
= { };
575 struct sk_msg
*msg_npl
;
579 new = tls_get_rec(sk
);
582 ret
= sk_msg_alloc(sk
, &new->msg_encrypted
, msg_opl
->sg
.size
+
583 tx_overhead_size
, 0);
585 tls_free_rec(sk
, new);
589 *orig_end
= msg_opl
->sg
.end
;
590 i
= msg_opl
->sg
.start
;
591 sge
= sk_msg_elem(msg_opl
, i
);
592 while (apply
&& sge
->length
) {
593 if (sge
->length
> apply
) {
594 u32 len
= sge
->length
- apply
;
596 get_page(sg_page(sge
));
597 sg_set_page(&tmp
, sg_page(sge
), len
,
598 sge
->offset
+ apply
);
603 apply
-= sge
->length
;
604 bytes
+= sge
->length
;
607 sk_msg_iter_var_next(i
);
608 if (i
== msg_opl
->sg
.end
)
610 sge
= sk_msg_elem(msg_opl
, i
);
614 msg_opl
->sg
.curr
= i
;
615 msg_opl
->sg
.copybreak
= 0;
616 msg_opl
->apply_bytes
= 0;
617 msg_opl
->sg
.size
= bytes
;
619 msg_npl
= &new->msg_plaintext
;
620 msg_npl
->apply_bytes
= apply
;
621 msg_npl
->sg
.size
= orig_size
- bytes
;
623 j
= msg_npl
->sg
.start
;
624 nsge
= sk_msg_elem(msg_npl
, j
);
626 memcpy(nsge
, &tmp
, sizeof(*nsge
));
627 sk_msg_iter_var_next(j
);
628 nsge
= sk_msg_elem(msg_npl
, j
);
631 osge
= sk_msg_elem(msg_opl
, i
);
632 while (osge
->length
) {
633 memcpy(nsge
, osge
, sizeof(*nsge
));
635 sk_msg_iter_var_next(i
);
636 sk_msg_iter_var_next(j
);
639 osge
= sk_msg_elem(msg_opl
, i
);
640 nsge
= sk_msg_elem(msg_npl
, j
);
644 msg_npl
->sg
.curr
= j
;
645 msg_npl
->sg
.copybreak
= 0;
651 static void tls_merge_open_record(struct sock
*sk
, struct tls_rec
*to
,
652 struct tls_rec
*from
, u32 orig_end
)
654 struct sk_msg
*msg_npl
= &from
->msg_plaintext
;
655 struct sk_msg
*msg_opl
= &to
->msg_plaintext
;
656 struct scatterlist
*osge
, *nsge
;
660 sk_msg_iter_var_prev(i
);
661 j
= msg_npl
->sg
.start
;
663 osge
= sk_msg_elem(msg_opl
, i
);
664 nsge
= sk_msg_elem(msg_npl
, j
);
666 if (sg_page(osge
) == sg_page(nsge
) &&
667 osge
->offset
+ osge
->length
== nsge
->offset
) {
668 osge
->length
+= nsge
->length
;
669 put_page(sg_page(nsge
));
672 msg_opl
->sg
.end
= orig_end
;
673 msg_opl
->sg
.curr
= orig_end
;
674 msg_opl
->sg
.copybreak
= 0;
675 msg_opl
->apply_bytes
= msg_opl
->sg
.size
+ msg_npl
->sg
.size
;
676 msg_opl
->sg
.size
+= msg_npl
->sg
.size
;
678 sk_msg_free(sk
, &to
->msg_encrypted
);
679 sk_msg_xfer_full(&to
->msg_encrypted
, &from
->msg_encrypted
);
684 static int tls_push_record(struct sock
*sk
, int flags
,
685 unsigned char record_type
)
687 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
688 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
689 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
690 struct tls_rec
*rec
= ctx
->open_rec
, *tmp
= NULL
;
691 u32 i
, split_point
, orig_end
;
692 struct sk_msg
*msg_pl
, *msg_en
;
693 struct aead_request
*req
;
700 msg_pl
= &rec
->msg_plaintext
;
701 msg_en
= &rec
->msg_encrypted
;
703 split_point
= msg_pl
->apply_bytes
;
704 split
= split_point
&& split_point
< msg_pl
->sg
.size
;
705 if (unlikely((!split
&&
707 prot
->overhead_size
> msg_en
->sg
.size
) ||
710 prot
->overhead_size
> msg_en
->sg
.size
))) {
712 split_point
= msg_en
->sg
.size
;
715 rc
= tls_split_open_record(sk
, rec
, &tmp
, msg_pl
, msg_en
,
716 split_point
, prot
->overhead_size
,
720 /* This can happen if above tls_split_open_record allocates
721 * a single large encryption buffer instead of two smaller
722 * ones. In this case adjust pointers and continue without
725 if (!msg_pl
->sg
.size
) {
726 tls_merge_open_record(sk
, rec
, tmp
, orig_end
);
727 msg_pl
= &rec
->msg_plaintext
;
728 msg_en
= &rec
->msg_encrypted
;
731 sk_msg_trim(sk
, msg_en
, msg_pl
->sg
.size
+
732 prot
->overhead_size
);
735 rec
->tx_flags
= flags
;
736 req
= &rec
->aead_req
;
739 sk_msg_iter_var_prev(i
);
741 rec
->content_type
= record_type
;
742 if (prot
->version
== TLS_1_3_VERSION
) {
743 /* Add content type to end of message. No padding added */
744 sg_set_buf(&rec
->sg_content_type
, &rec
->content_type
, 1);
745 sg_mark_end(&rec
->sg_content_type
);
746 sg_chain(msg_pl
->sg
.data
, msg_pl
->sg
.end
+ 1,
747 &rec
->sg_content_type
);
749 sg_mark_end(sk_msg_elem(msg_pl
, i
));
752 if (msg_pl
->sg
.end
< msg_pl
->sg
.start
) {
753 sg_chain(&msg_pl
->sg
.data
[msg_pl
->sg
.start
],
754 MAX_SKB_FRAGS
- msg_pl
->sg
.start
+ 1,
758 i
= msg_pl
->sg
.start
;
759 sg_chain(rec
->sg_aead_in
, 2, &msg_pl
->sg
.data
[i
]);
762 sk_msg_iter_var_prev(i
);
763 sg_mark_end(sk_msg_elem(msg_en
, i
));
765 i
= msg_en
->sg
.start
;
766 sg_chain(rec
->sg_aead_out
, 2, &msg_en
->sg
.data
[i
]);
768 tls_make_aad(rec
->aad_space
, msg_pl
->sg
.size
+ prot
->tail_size
,
769 tls_ctx
->tx
.rec_seq
, record_type
, prot
);
771 tls_fill_prepend(tls_ctx
,
772 page_address(sg_page(&msg_en
->sg
.data
[i
])) +
773 msg_en
->sg
.data
[i
].offset
,
774 msg_pl
->sg
.size
+ prot
->tail_size
,
777 tls_ctx
->pending_open_record_frags
= false;
779 rc
= tls_do_encryption(sk
, tls_ctx
, ctx
, req
,
780 msg_pl
->sg
.size
+ prot
->tail_size
, i
);
782 if (rc
!= -EINPROGRESS
) {
783 tls_err_abort(sk
, -EBADMSG
);
785 tls_ctx
->pending_open_record_frags
= true;
786 tls_merge_open_record(sk
, rec
, tmp
, orig_end
);
789 ctx
->async_capable
= 1;
792 msg_pl
= &tmp
->msg_plaintext
;
793 msg_en
= &tmp
->msg_encrypted
;
794 sk_msg_trim(sk
, msg_en
, msg_pl
->sg
.size
+ prot
->overhead_size
);
795 tls_ctx
->pending_open_record_frags
= true;
799 return tls_tx_records(sk
, flags
);
802 static int bpf_exec_tx_verdict(struct sk_msg
*msg
, struct sock
*sk
,
803 bool full_record
, u8 record_type
,
804 ssize_t
*copied
, int flags
)
806 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
807 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
808 struct sk_msg msg_redir
= { };
809 struct sk_psock
*psock
;
810 struct sock
*sk_redir
;
812 bool enospc
, policy
, redir_ingress
;
816 policy
= !(flags
& MSG_SENDPAGE_NOPOLICY
);
817 psock
= sk_psock_get(sk
);
818 if (!psock
|| !policy
) {
819 err
= tls_push_record(sk
, flags
, record_type
);
820 if (err
&& err
!= -EINPROGRESS
&& sk
->sk_err
== EBADMSG
) {
821 *copied
-= sk_msg_free(sk
, msg
);
822 tls_free_open_rec(sk
);
826 sk_psock_put(sk
, psock
);
830 enospc
= sk_msg_full(msg
);
831 if (psock
->eval
== __SK_NONE
) {
832 delta
= msg
->sg
.size
;
833 psock
->eval
= sk_psock_msg_verdict(sk
, psock
, msg
);
834 delta
-= msg
->sg
.size
;
836 if (msg
->cork_bytes
&& msg
->cork_bytes
> msg
->sg
.size
&&
837 !enospc
&& !full_record
) {
843 if (msg
->apply_bytes
&& msg
->apply_bytes
< send
)
844 send
= msg
->apply_bytes
;
846 switch (psock
->eval
) {
848 err
= tls_push_record(sk
, flags
, record_type
);
849 if (err
&& err
!= -EINPROGRESS
&& sk
->sk_err
== EBADMSG
) {
850 *copied
-= sk_msg_free(sk
, msg
);
851 tls_free_open_rec(sk
);
857 redir_ingress
= psock
->redir_ingress
;
858 sk_redir
= psock
->sk_redir
;
859 memcpy(&msg_redir
, msg
, sizeof(*msg
));
860 if (msg
->apply_bytes
< send
)
861 msg
->apply_bytes
= 0;
863 msg
->apply_bytes
-= send
;
864 sk_msg_return_zero(sk
, msg
, send
);
865 msg
->sg
.size
-= send
;
867 err
= tcp_bpf_sendmsg_redir(sk_redir
, redir_ingress
,
868 &msg_redir
, send
, flags
);
871 *copied
-= sk_msg_free_nocharge(sk
, &msg_redir
);
874 if (msg
->sg
.size
== 0)
875 tls_free_open_rec(sk
);
879 sk_msg_free_partial(sk
, msg
, send
);
880 if (msg
->apply_bytes
< send
)
881 msg
->apply_bytes
= 0;
883 msg
->apply_bytes
-= send
;
884 if (msg
->sg
.size
== 0)
885 tls_free_open_rec(sk
);
886 *copied
-= (send
+ delta
);
891 bool reset_eval
= !ctx
->open_rec
;
895 msg
= &rec
->msg_plaintext
;
896 if (!msg
->apply_bytes
)
900 psock
->eval
= __SK_NONE
;
901 if (psock
->sk_redir
) {
902 sock_put(psock
->sk_redir
);
903 psock
->sk_redir
= NULL
;
910 sk_psock_put(sk
, psock
);
914 static int tls_sw_push_pending_record(struct sock
*sk
, int flags
)
916 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
917 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
918 struct tls_rec
*rec
= ctx
->open_rec
;
919 struct sk_msg
*msg_pl
;
925 msg_pl
= &rec
->msg_plaintext
;
926 copied
= msg_pl
->sg
.size
;
930 return bpf_exec_tx_verdict(msg_pl
, sk
, true, TLS_RECORD_TYPE_DATA
,
934 static int tls_sw_sendmsg_splice(struct sock
*sk
, struct msghdr
*msg
,
935 struct sk_msg
*msg_pl
, size_t try_to_copy
,
938 struct page
*page
= NULL
, **pages
= &page
;
944 part
= iov_iter_extract_pages(&msg
->msg_iter
, &pages
,
945 try_to_copy
, 1, 0, &off
);
949 if (WARN_ON_ONCE(!sendpage_ok(page
))) {
950 iov_iter_revert(&msg
->msg_iter
, part
);
954 sk_msg_page_add(msg_pl
, page
, part
, off
);
955 sk_mem_charge(sk
, part
);
958 } while (try_to_copy
&& !sk_msg_full(msg_pl
));
963 static int tls_sw_sendmsg_locked(struct sock
*sk
, struct msghdr
*msg
,
966 long timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
967 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
968 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
969 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
970 bool async_capable
= ctx
->async_capable
;
971 unsigned char record_type
= TLS_RECORD_TYPE_DATA
;
972 bool is_kvec
= iov_iter_is_kvec(&msg
->msg_iter
);
973 bool eor
= !(msg
->msg_flags
& MSG_MORE
);
976 struct sk_msg
*msg_pl
, *msg_en
;
987 if (!eor
&& (msg
->msg_flags
& MSG_EOR
))
990 if (unlikely(msg
->msg_controllen
)) {
991 ret
= tls_process_cmsg(sk
, msg
, &record_type
);
993 if (ret
== -EINPROGRESS
)
995 else if (ret
!= -EAGAIN
)
1000 while (msg_data_left(msg
)) {
1007 rec
= ctx
->open_rec
;
1009 rec
= ctx
->open_rec
= tls_get_rec(sk
);
1015 msg_pl
= &rec
->msg_plaintext
;
1016 msg_en
= &rec
->msg_encrypted
;
1018 orig_size
= msg_pl
->sg
.size
;
1019 full_record
= false;
1020 try_to_copy
= msg_data_left(msg
);
1021 record_room
= TLS_MAX_PAYLOAD_SIZE
- msg_pl
->sg
.size
;
1022 if (try_to_copy
>= record_room
) {
1023 try_to_copy
= record_room
;
1027 required_size
= msg_pl
->sg
.size
+ try_to_copy
+
1028 prot
->overhead_size
;
1030 if (!sk_stream_memory_free(sk
))
1031 goto wait_for_sndbuf
;
1034 ret
= tls_alloc_encrypted_msg(sk
, required_size
);
1037 goto wait_for_memory
;
1039 /* Adjust try_to_copy according to the amount that was
1040 * actually allocated. The difference is due
1041 * to max sg elements limit
1043 try_to_copy
-= required_size
- msg_en
->sg
.size
;
1047 if (try_to_copy
&& (msg
->msg_flags
& MSG_SPLICE_PAGES
)) {
1048 ret
= tls_sw_sendmsg_splice(sk
, msg
, msg_pl
,
1049 try_to_copy
, &copied
);
1052 tls_ctx
->pending_open_record_frags
= true;
1053 if (full_record
|| eor
|| sk_msg_full(msg_pl
))
1058 if (!is_kvec
&& (full_record
|| eor
) && !async_capable
) {
1059 u32 first
= msg_pl
->sg
.end
;
1061 ret
= sk_msg_zerocopy_from_iter(sk
, &msg
->msg_iter
,
1062 msg_pl
, try_to_copy
);
1064 goto fallback_to_reg_send
;
1067 copied
+= try_to_copy
;
1069 sk_msg_sg_copy_set(msg_pl
, first
);
1070 ret
= bpf_exec_tx_verdict(msg_pl
, sk
, full_record
,
1071 record_type
, &copied
,
1074 if (ret
== -EINPROGRESS
)
1076 else if (ret
== -ENOMEM
)
1077 goto wait_for_memory
;
1078 else if (ctx
->open_rec
&& ret
== -ENOSPC
)
1080 else if (ret
!= -EAGAIN
)
1085 copied
-= try_to_copy
;
1086 sk_msg_sg_copy_clear(msg_pl
, first
);
1087 iov_iter_revert(&msg
->msg_iter
,
1088 msg_pl
->sg
.size
- orig_size
);
1089 fallback_to_reg_send
:
1090 sk_msg_trim(sk
, msg_pl
, orig_size
);
1093 required_size
= msg_pl
->sg
.size
+ try_to_copy
;
1095 ret
= tls_clone_plaintext_msg(sk
, required_size
);
1100 /* Adjust try_to_copy according to the amount that was
1101 * actually allocated. The difference is due
1102 * to max sg elements limit
1104 try_to_copy
-= required_size
- msg_pl
->sg
.size
;
1106 sk_msg_trim(sk
, msg_en
,
1107 msg_pl
->sg
.size
+ prot
->overhead_size
);
1111 ret
= sk_msg_memcopy_from_iter(sk
, &msg
->msg_iter
,
1112 msg_pl
, try_to_copy
);
1117 /* Open records defined only if successfully copied, otherwise
1118 * we would trim the sg but not reset the open record frags.
1120 tls_ctx
->pending_open_record_frags
= true;
1121 copied
+= try_to_copy
;
1123 if (full_record
|| eor
) {
1124 ret
= bpf_exec_tx_verdict(msg_pl
, sk
, full_record
,
1125 record_type
, &copied
,
1128 if (ret
== -EINPROGRESS
)
1130 else if (ret
== -ENOMEM
)
1131 goto wait_for_memory
;
1132 else if (ret
!= -EAGAIN
) {
1143 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1145 ret
= sk_stream_wait_memory(sk
, &timeo
);
1149 tls_trim_both_msgs(sk
, orig_size
);
1153 if (ctx
->open_rec
&& msg_en
->sg
.size
< required_size
)
1154 goto alloc_encrypted
;
1159 } else if (num_zc
) {
1160 /* Wait for pending encryptions to get completed */
1161 spin_lock_bh(&ctx
->encrypt_compl_lock
);
1162 ctx
->async_notify
= true;
1164 pending
= atomic_read(&ctx
->encrypt_pending
);
1165 spin_unlock_bh(&ctx
->encrypt_compl_lock
);
1167 crypto_wait_req(-EINPROGRESS
, &ctx
->async_wait
);
1169 reinit_completion(&ctx
->async_wait
.completion
);
1171 /* There can be no concurrent accesses, since we have no
1172 * pending encrypt operations
1174 WRITE_ONCE(ctx
->async_notify
, false);
1176 if (ctx
->async_wait
.err
) {
1177 ret
= ctx
->async_wait
.err
;
1182 /* Transmit if any encryptions have completed */
1183 if (test_and_clear_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
)) {
1184 cancel_delayed_work(&ctx
->tx_work
.work
);
1185 tls_tx_records(sk
, msg
->msg_flags
);
1189 ret
= sk_stream_error(sk
, msg
->msg_flags
, ret
);
1190 return copied
> 0 ? copied
: ret
;
1193 int tls_sw_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t size
)
1195 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1198 if (msg
->msg_flags
& ~(MSG_MORE
| MSG_DONTWAIT
| MSG_NOSIGNAL
|
1199 MSG_CMSG_COMPAT
| MSG_SPLICE_PAGES
| MSG_EOR
|
1200 MSG_SENDPAGE_NOPOLICY
))
1203 ret
= mutex_lock_interruptible(&tls_ctx
->tx_lock
);
1207 ret
= tls_sw_sendmsg_locked(sk
, msg
, size
);
1209 mutex_unlock(&tls_ctx
->tx_lock
);
1214 * Handle unexpected EOF during splice without SPLICE_F_MORE set.
1216 void tls_sw_splice_eof(struct socket
*sock
)
1218 struct sock
*sk
= sock
->sk
;
1219 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1220 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
1221 struct tls_rec
*rec
;
1222 struct sk_msg
*msg_pl
;
1224 bool retrying
= false;
1231 mutex_lock(&tls_ctx
->tx_lock
);
1235 rec
= ctx
->open_rec
;
1239 msg_pl
= &rec
->msg_plaintext
;
1241 /* Check the BPF advisor and perform transmission. */
1242 ret
= bpf_exec_tx_verdict(msg_pl
, sk
, false, TLS_RECORD_TYPE_DATA
,
1257 /* Wait for pending encryptions to get completed */
1258 spin_lock_bh(&ctx
->encrypt_compl_lock
);
1259 ctx
->async_notify
= true;
1261 pending
= atomic_read(&ctx
->encrypt_pending
);
1262 spin_unlock_bh(&ctx
->encrypt_compl_lock
);
1264 crypto_wait_req(-EINPROGRESS
, &ctx
->async_wait
);
1266 reinit_completion(&ctx
->async_wait
.completion
);
1268 /* There can be no concurrent accesses, since we have no pending
1269 * encrypt operations
1271 WRITE_ONCE(ctx
->async_notify
, false);
1273 if (ctx
->async_wait
.err
)
1276 /* Transmit if any encryptions have completed */
1277 if (test_and_clear_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
)) {
1278 cancel_delayed_work(&ctx
->tx_work
.work
);
1279 tls_tx_records(sk
, 0);
1284 mutex_unlock(&tls_ctx
->tx_lock
);
1288 tls_rx_rec_wait(struct sock
*sk
, struct sk_psock
*psock
, bool nonblock
,
1291 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1292 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1293 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
1297 timeo
= sock_rcvtimeo(sk
, nonblock
);
1299 while (!tls_strp_msg_ready(ctx
)) {
1300 if (!sk_psock_queue_empty(psock
))
1304 return sock_error(sk
);
1309 if (!skb_queue_empty(&sk
->sk_receive_queue
)) {
1310 tls_strp_check_rcv(&ctx
->strp
);
1311 if (tls_strp_msg_ready(ctx
))
1315 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1318 if (sock_flag(sk
, SOCK_DONE
))
1325 add_wait_queue(sk_sleep(sk
), &wait
);
1326 sk_set_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
1327 ret
= sk_wait_event(sk
, &timeo
,
1328 tls_strp_msg_ready(ctx
) ||
1329 !sk_psock_queue_empty(psock
),
1331 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
1332 remove_wait_queue(sk_sleep(sk
), &wait
);
1334 /* Handle signals */
1335 if (signal_pending(current
))
1336 return sock_intr_errno(timeo
);
1339 tls_strp_msg_load(&ctx
->strp
, released
);
1344 static int tls_setup_from_iter(struct iov_iter
*from
,
1345 int length
, int *pages_used
,
1346 struct scatterlist
*to
,
1349 int rc
= 0, i
= 0, num_elem
= *pages_used
, maxpages
;
1350 struct page
*pages
[MAX_SKB_FRAGS
];
1351 unsigned int size
= 0;
1352 ssize_t copied
, use
;
1355 while (length
> 0) {
1357 maxpages
= to_max_pages
- num_elem
;
1358 if (maxpages
== 0) {
1362 copied
= iov_iter_get_pages2(from
, pages
,
1373 use
= min_t(int, copied
, PAGE_SIZE
- offset
);
1375 sg_set_page(&to
[num_elem
],
1376 pages
[i
], use
, offset
);
1377 sg_unmark_end(&to
[num_elem
]);
1378 /* We do not uncharge memory from this API */
1387 /* Mark the end in the last sg entry if newly added */
1388 if (num_elem
> *pages_used
)
1389 sg_mark_end(&to
[num_elem
- 1]);
1392 iov_iter_revert(from
, size
);
1393 *pages_used
= num_elem
;
1398 static struct sk_buff
*
1399 tls_alloc_clrtxt_skb(struct sock
*sk
, struct sk_buff
*skb
,
1400 unsigned int full_len
)
1402 struct strp_msg
*clr_rxm
;
1403 struct sk_buff
*clr_skb
;
1406 clr_skb
= alloc_skb_with_frags(0, full_len
, TLS_PAGE_ORDER
,
1407 &err
, sk
->sk_allocation
);
1411 skb_copy_header(clr_skb
, skb
);
1412 clr_skb
->len
= full_len
;
1413 clr_skb
->data_len
= full_len
;
1415 clr_rxm
= strp_msg(clr_skb
);
1416 clr_rxm
->offset
= 0;
1423 * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers.
1424 * They must transform the darg in/out argument are as follows:
1426 * -------------------------------------------------------------------
1427 * zc | Zero-copy decrypt allowed | Zero-copy performed
1428 * async | Async decrypt allowed | Async crypto used / in progress
1429 * skb | * | Output skb
1431 * If ZC decryption was performed darg.skb will point to the input skb.
1434 /* This function decrypts the input skb into either out_iov or in out_sg
1435 * or in skb buffers itself. The input parameter 'darg->zc' indicates if
1436 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1437 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1438 * NULL, then the decryption happens inside skb buffers itself, i.e.
1439 * zero-copy gets disabled and 'darg->zc' is updated.
1441 static int tls_decrypt_sg(struct sock
*sk
, struct iov_iter
*out_iov
,
1442 struct scatterlist
*out_sg
,
1443 struct tls_decrypt_arg
*darg
)
1445 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1446 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1447 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1448 int n_sgin
, n_sgout
, aead_size
, err
, pages
= 0;
1449 struct sk_buff
*skb
= tls_strp_msg(ctx
);
1450 const struct strp_msg
*rxm
= strp_msg(skb
);
1451 const struct tls_msg
*tlm
= tls_msg(skb
);
1452 struct aead_request
*aead_req
;
1453 struct scatterlist
*sgin
= NULL
;
1454 struct scatterlist
*sgout
= NULL
;
1455 const int data_len
= rxm
->full_len
- prot
->overhead_size
;
1456 int tail_pages
= !!prot
->tail_size
;
1457 struct tls_decrypt_ctx
*dctx
;
1458 struct sk_buff
*clear_skb
;
1462 n_sgin
= skb_nsg(skb
, rxm
->offset
+ prot
->prepend_size
,
1463 rxm
->full_len
- prot
->prepend_size
);
1465 return n_sgin
?: -EBADMSG
;
1467 if (darg
->zc
&& (out_iov
|| out_sg
)) {
1471 n_sgout
= 1 + tail_pages
+
1472 iov_iter_npages_cap(out_iov
, INT_MAX
, data_len
);
1474 n_sgout
= sg_nents(out_sg
);
1478 clear_skb
= tls_alloc_clrtxt_skb(sk
, skb
, rxm
->full_len
);
1482 n_sgout
= 1 + skb_shinfo(clear_skb
)->nr_frags
;
1485 /* Increment to accommodate AAD */
1486 n_sgin
= n_sgin
+ 1;
1488 /* Allocate a single block of memory which contains
1489 * aead_req || tls_decrypt_ctx.
1490 * Both structs are variable length.
1492 aead_size
= sizeof(*aead_req
) + crypto_aead_reqsize(ctx
->aead_recv
);
1493 aead_size
= ALIGN(aead_size
, __alignof__(*dctx
));
1494 mem
= kmalloc(aead_size
+ struct_size(dctx
, sg
, n_sgin
+ n_sgout
),
1501 /* Segment the allocated memory */
1502 aead_req
= (struct aead_request
*)mem
;
1503 dctx
= (struct tls_decrypt_ctx
*)(mem
+ aead_size
);
1505 sgin
= &dctx
->sg
[0];
1506 sgout
= &dctx
->sg
[n_sgin
];
1508 /* For CCM based ciphers, first byte of nonce+iv is a constant */
1509 switch (prot
->cipher_type
) {
1510 case TLS_CIPHER_AES_CCM_128
:
1511 dctx
->iv
[0] = TLS_AES_CCM_IV_B0_BYTE
;
1514 case TLS_CIPHER_SM4_CCM
:
1515 dctx
->iv
[0] = TLS_SM4_CCM_IV_B0_BYTE
;
1521 if (prot
->version
== TLS_1_3_VERSION
||
1522 prot
->cipher_type
== TLS_CIPHER_CHACHA20_POLY1305
) {
1523 memcpy(&dctx
->iv
[iv_offset
], tls_ctx
->rx
.iv
,
1524 prot
->iv_size
+ prot
->salt_size
);
1526 err
= skb_copy_bits(skb
, rxm
->offset
+ TLS_HEADER_SIZE
,
1527 &dctx
->iv
[iv_offset
] + prot
->salt_size
,
1531 memcpy(&dctx
->iv
[iv_offset
], tls_ctx
->rx
.iv
, prot
->salt_size
);
1533 tls_xor_iv_with_seq(prot
, &dctx
->iv
[iv_offset
], tls_ctx
->rx
.rec_seq
);
1536 tls_make_aad(dctx
->aad
, rxm
->full_len
- prot
->overhead_size
+
1538 tls_ctx
->rx
.rec_seq
, tlm
->control
, prot
);
1541 sg_init_table(sgin
, n_sgin
);
1542 sg_set_buf(&sgin
[0], dctx
->aad
, prot
->aad_size
);
1543 err
= skb_to_sgvec(skb
, &sgin
[1],
1544 rxm
->offset
+ prot
->prepend_size
,
1545 rxm
->full_len
- prot
->prepend_size
);
1550 sg_init_table(sgout
, n_sgout
);
1551 sg_set_buf(&sgout
[0], dctx
->aad
, prot
->aad_size
);
1553 err
= skb_to_sgvec(clear_skb
, &sgout
[1], prot
->prepend_size
,
1554 data_len
+ prot
->tail_size
);
1557 } else if (out_iov
) {
1558 sg_init_table(sgout
, n_sgout
);
1559 sg_set_buf(&sgout
[0], dctx
->aad
, prot
->aad_size
);
1561 err
= tls_setup_from_iter(out_iov
, data_len
, &pages
, &sgout
[1],
1562 (n_sgout
- 1 - tail_pages
));
1564 goto exit_free_pages
;
1566 if (prot
->tail_size
) {
1567 sg_unmark_end(&sgout
[pages
]);
1568 sg_set_buf(&sgout
[pages
+ 1], &dctx
->tail
,
1570 sg_mark_end(&sgout
[pages
+ 1]);
1572 } else if (out_sg
) {
1573 memcpy(sgout
, out_sg
, n_sgout
* sizeof(*sgout
));
1576 /* Prepare and submit AEAD request */
1577 err
= tls_do_decryption(sk
, sgin
, sgout
, dctx
->iv
,
1578 data_len
+ prot
->tail_size
, aead_req
, darg
);
1580 goto exit_free_pages
;
1582 darg
->skb
= clear_skb
?: tls_strp_msg(ctx
);
1585 if (unlikely(darg
->async
)) {
1586 err
= tls_strp_msg_hold(&ctx
->strp
, &ctx
->async_hold
);
1588 __skb_queue_tail(&ctx
->async_hold
, darg
->skb
);
1592 if (prot
->tail_size
)
1593 darg
->tail
= dctx
->tail
;
1596 /* Release the pages in case iov was mapped to pages */
1597 for (; pages
> 0; pages
--)
1598 put_page(sg_page(&sgout
[pages
]));
1602 consume_skb(clear_skb
);
1607 tls_decrypt_sw(struct sock
*sk
, struct tls_context
*tls_ctx
,
1608 struct msghdr
*msg
, struct tls_decrypt_arg
*darg
)
1610 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1611 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1612 struct strp_msg
*rxm
;
1615 err
= tls_decrypt_sg(sk
, &msg
->msg_iter
, NULL
, darg
);
1617 if (err
== -EBADMSG
)
1618 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSDECRYPTERROR
);
1621 /* keep going even for ->async, the code below is TLS 1.3 */
1623 /* If opportunistic TLS 1.3 ZC failed retry without ZC */
1624 if (unlikely(darg
->zc
&& prot
->version
== TLS_1_3_VERSION
&&
1625 darg
->tail
!= TLS_RECORD_TYPE_DATA
)) {
1628 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSRXNOPADVIOL
);
1629 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSDECRYPTRETRY
);
1630 return tls_decrypt_sw(sk
, tls_ctx
, msg
, darg
);
1633 pad
= tls_padding_length(prot
, darg
->skb
, darg
);
1635 if (darg
->skb
!= tls_strp_msg(ctx
))
1636 consume_skb(darg
->skb
);
1640 rxm
= strp_msg(darg
->skb
);
1641 rxm
->full_len
-= pad
;
1647 tls_decrypt_device(struct sock
*sk
, struct msghdr
*msg
,
1648 struct tls_context
*tls_ctx
, struct tls_decrypt_arg
*darg
)
1650 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1651 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1652 struct strp_msg
*rxm
;
1655 if (tls_ctx
->rx_conf
!= TLS_HW
)
1658 err
= tls_device_decrypted(sk
, tls_ctx
);
1662 pad
= tls_padding_length(prot
, tls_strp_msg(ctx
), darg
);
1666 darg
->async
= false;
1667 darg
->skb
= tls_strp_msg(ctx
);
1668 /* ->zc downgrade check, in case TLS 1.3 gets here */
1669 darg
->zc
&= !(prot
->version
== TLS_1_3_VERSION
&&
1670 tls_msg(darg
->skb
)->control
!= TLS_RECORD_TYPE_DATA
);
1672 rxm
= strp_msg(darg
->skb
);
1673 rxm
->full_len
-= pad
;
1676 /* Non-ZC case needs a real skb */
1677 darg
->skb
= tls_strp_msg_detach(ctx
);
1681 unsigned int off
, len
;
1683 /* In ZC case nobody cares about the output skb.
1684 * Just copy the data here. Note the skb is not fully trimmed.
1686 off
= rxm
->offset
+ prot
->prepend_size
;
1687 len
= rxm
->full_len
- prot
->overhead_size
;
1689 err
= skb_copy_datagram_msg(darg
->skb
, off
, msg
, len
);
1696 static int tls_rx_one_record(struct sock
*sk
, struct msghdr
*msg
,
1697 struct tls_decrypt_arg
*darg
)
1699 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1700 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1701 struct strp_msg
*rxm
;
1704 err
= tls_decrypt_device(sk
, msg
, tls_ctx
, darg
);
1706 err
= tls_decrypt_sw(sk
, tls_ctx
, msg
, darg
);
1710 rxm
= strp_msg(darg
->skb
);
1711 rxm
->offset
+= prot
->prepend_size
;
1712 rxm
->full_len
-= prot
->overhead_size
;
1713 tls_advance_record_sn(sk
, prot
, &tls_ctx
->rx
);
1718 int decrypt_skb(struct sock
*sk
, struct scatterlist
*sgout
)
1720 struct tls_decrypt_arg darg
= { .zc
= true, };
1722 return tls_decrypt_sg(sk
, NULL
, sgout
, &darg
);
1725 static int tls_record_content_type(struct msghdr
*msg
, struct tls_msg
*tlm
,
1731 *control
= tlm
->control
;
1735 err
= put_cmsg(msg
, SOL_TLS
, TLS_GET_RECORD_TYPE
,
1736 sizeof(*control
), control
);
1737 if (*control
!= TLS_RECORD_TYPE_DATA
) {
1738 if (err
|| msg
->msg_flags
& MSG_CTRUNC
)
1741 } else if (*control
!= tlm
->control
) {
1748 static void tls_rx_rec_done(struct tls_sw_context_rx
*ctx
)
1750 tls_strp_msg_done(&ctx
->strp
);
1753 /* This function traverses the rx_list in tls receive context to copies the
1754 * decrypted records into the buffer provided by caller zero copy is not
1755 * true. Further, the records are removed from the rx_list if it is not a peek
1756 * case and the record has been consumed completely.
1758 static int process_rx_list(struct tls_sw_context_rx
*ctx
,
1765 struct sk_buff
*skb
= skb_peek(&ctx
->rx_list
);
1766 struct tls_msg
*tlm
;
1770 while (skip
&& skb
) {
1771 struct strp_msg
*rxm
= strp_msg(skb
);
1774 err
= tls_record_content_type(msg
, tlm
, control
);
1778 if (skip
< rxm
->full_len
)
1781 skip
= skip
- rxm
->full_len
;
1782 skb
= skb_peek_next(skb
, &ctx
->rx_list
);
1785 while (len
&& skb
) {
1786 struct sk_buff
*next_skb
;
1787 struct strp_msg
*rxm
= strp_msg(skb
);
1788 int chunk
= min_t(unsigned int, rxm
->full_len
- skip
, len
);
1792 err
= tls_record_content_type(msg
, tlm
, control
);
1796 err
= skb_copy_datagram_msg(skb
, rxm
->offset
+ skip
,
1802 copied
= copied
+ chunk
;
1804 /* Consume the data from record if it is non-peek case*/
1806 rxm
->offset
= rxm
->offset
+ chunk
;
1807 rxm
->full_len
= rxm
->full_len
- chunk
;
1809 /* Return if there is unconsumed data in the record */
1810 if (rxm
->full_len
- skip
)
1814 /* The remaining skip-bytes must lie in 1st record in rx_list.
1815 * So from the 2nd record, 'skip' should be 0.
1820 msg
->msg_flags
|= MSG_EOR
;
1822 next_skb
= skb_peek_next(skb
, &ctx
->rx_list
);
1825 __skb_unlink(skb
, &ctx
->rx_list
);
1834 return copied
? : err
;
1838 tls_read_flush_backlog(struct sock
*sk
, struct tls_prot_info
*prot
,
1839 size_t len_left
, size_t decrypted
, ssize_t done
,
1844 if (len_left
<= decrypted
)
1847 max_rec
= prot
->overhead_size
- prot
->tail_size
+ TLS_MAX_PAYLOAD_SIZE
;
1848 if (done
- *flushed_at
< SZ_128K
&& tcp_inq(sk
) > max_rec
)
1852 return sk_flush_backlog(sk
);
1855 static int tls_rx_reader_acquire(struct sock
*sk
, struct tls_sw_context_rx
*ctx
,
1861 timeo
= sock_rcvtimeo(sk
, nonblock
);
1863 while (unlikely(ctx
->reader_present
)) {
1864 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
1866 ctx
->reader_contended
= 1;
1868 add_wait_queue(&ctx
->wq
, &wait
);
1869 ret
= sk_wait_event(sk
, &timeo
,
1870 !READ_ONCE(ctx
->reader_present
), &wait
);
1871 remove_wait_queue(&ctx
->wq
, &wait
);
1875 if (signal_pending(current
))
1876 return sock_intr_errno(timeo
);
1881 WRITE_ONCE(ctx
->reader_present
, 1);
1886 static int tls_rx_reader_lock(struct sock
*sk
, struct tls_sw_context_rx
*ctx
,
1892 err
= tls_rx_reader_acquire(sk
, ctx
, nonblock
);
1898 static void tls_rx_reader_release(struct sock
*sk
, struct tls_sw_context_rx
*ctx
)
1900 if (unlikely(ctx
->reader_contended
)) {
1901 if (wq_has_sleeper(&ctx
->wq
))
1904 ctx
->reader_contended
= 0;
1906 WARN_ON_ONCE(!ctx
->reader_present
);
1909 WRITE_ONCE(ctx
->reader_present
, 0);
1912 static void tls_rx_reader_unlock(struct sock
*sk
, struct tls_sw_context_rx
*ctx
)
1914 tls_rx_reader_release(sk
, ctx
);
1918 int tls_sw_recvmsg(struct sock
*sk
,
1924 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1925 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1926 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1927 ssize_t decrypted
= 0, async_copy_bytes
= 0;
1928 struct sk_psock
*psock
;
1929 unsigned char control
= 0;
1930 size_t flushed_at
= 0;
1931 struct strp_msg
*rxm
;
1932 struct tls_msg
*tlm
;
1936 bool is_kvec
= iov_iter_is_kvec(&msg
->msg_iter
);
1937 bool is_peek
= flags
& MSG_PEEK
;
1938 bool released
= true;
1939 bool bpf_strp_enabled
;
1942 if (unlikely(flags
& MSG_ERRQUEUE
))
1943 return sock_recv_errqueue(sk
, msg
, len
, SOL_IP
, IP_RECVERR
);
1945 psock
= sk_psock_get(sk
);
1946 err
= tls_rx_reader_lock(sk
, ctx
, flags
& MSG_DONTWAIT
);
1949 bpf_strp_enabled
= sk_psock_strp_enabled(psock
);
1951 /* If crypto failed the connection is broken */
1952 err
= ctx
->async_wait
.err
;
1956 /* Process pending decrypted records. It must be non-zero-copy */
1957 err
= process_rx_list(ctx
, msg
, &control
, 0, len
, is_peek
);
1965 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
1968 zc_capable
= !bpf_strp_enabled
&& !is_kvec
&& !is_peek
&&
1971 while (len
&& (decrypted
+ copied
< target
|| tls_strp_msg_ready(ctx
))) {
1972 struct tls_decrypt_arg darg
;
1973 int to_decrypt
, chunk
;
1975 err
= tls_rx_rec_wait(sk
, psock
, flags
& MSG_DONTWAIT
,
1979 chunk
= sk_msg_recvmsg(sk
, psock
, msg
, len
,
1990 memset(&darg
.inargs
, 0, sizeof(darg
.inargs
));
1992 rxm
= strp_msg(tls_strp_msg(ctx
));
1993 tlm
= tls_msg(tls_strp_msg(ctx
));
1995 to_decrypt
= rxm
->full_len
- prot
->overhead_size
;
1997 if (zc_capable
&& to_decrypt
<= len
&&
1998 tlm
->control
== TLS_RECORD_TYPE_DATA
)
2001 /* Do not use async mode if record is non-data */
2002 if (tlm
->control
== TLS_RECORD_TYPE_DATA
&& !bpf_strp_enabled
)
2003 darg
.async
= ctx
->async_capable
;
2007 err
= tls_rx_one_record(sk
, msg
, &darg
);
2009 tls_err_abort(sk
, -EBADMSG
);
2013 async
|= darg
.async
;
2015 /* If the type of records being processed is not known yet,
2016 * set it to record type just dequeued. If it is already known,
2017 * but does not match the record type just dequeued, go to end.
2018 * We always get record type here since for tls1.2, record type
2019 * is known just after record is dequeued from stream parser.
2020 * For tls1.3, we disable async.
2022 err
= tls_record_content_type(msg
, tls_msg(darg
.skb
), &control
);
2024 DEBUG_NET_WARN_ON_ONCE(darg
.zc
);
2025 tls_rx_rec_done(ctx
);
2027 __skb_queue_tail(&ctx
->rx_list
, darg
.skb
);
2031 /* periodically flush backlog, and feed strparser */
2032 released
= tls_read_flush_backlog(sk
, prot
, len
, to_decrypt
,
2036 /* TLS 1.3 may have updated the length by more than overhead */
2037 rxm
= strp_msg(darg
.skb
);
2038 chunk
= rxm
->full_len
;
2039 tls_rx_rec_done(ctx
);
2042 bool partially_consumed
= chunk
> len
;
2043 struct sk_buff
*skb
= darg
.skb
;
2045 DEBUG_NET_WARN_ON_ONCE(darg
.skb
== ctx
->strp
.anchor
);
2048 /* TLS 1.2-only, to_decrypt must be text len */
2049 chunk
= min_t(int, to_decrypt
, len
);
2050 async_copy_bytes
+= chunk
;
2054 __skb_queue_tail(&ctx
->rx_list
, skb
);
2058 if (bpf_strp_enabled
) {
2060 err
= sk_psock_tls_strp_read(psock
, skb
);
2061 if (err
!= __SK_PASS
) {
2062 rxm
->offset
= rxm
->offset
+ rxm
->full_len
;
2064 if (err
== __SK_DROP
)
2070 if (partially_consumed
)
2073 err
= skb_copy_datagram_msg(skb
, rxm
->offset
,
2076 goto put_on_rx_list_err
;
2079 goto put_on_rx_list
;
2081 if (partially_consumed
) {
2082 rxm
->offset
+= chunk
;
2083 rxm
->full_len
-= chunk
;
2084 goto put_on_rx_list
;
2093 /* Return full control message to userspace before trying
2094 * to parse another message type
2096 msg
->msg_flags
|= MSG_EOR
;
2097 if (control
!= TLS_RECORD_TYPE_DATA
)
2105 /* Wait for all previously submitted records to be decrypted */
2106 spin_lock_bh(&ctx
->decrypt_compl_lock
);
2107 reinit_completion(&ctx
->async_wait
.completion
);
2108 pending
= atomic_read(&ctx
->decrypt_pending
);
2109 spin_unlock_bh(&ctx
->decrypt_compl_lock
);
2112 ret
= crypto_wait_req(-EINPROGRESS
, &ctx
->async_wait
);
2113 __skb_queue_purge(&ctx
->async_hold
);
2116 if (err
>= 0 || err
== -EINPROGRESS
)
2122 /* Drain records from the rx_list & copy if required */
2123 if (is_peek
|| is_kvec
)
2124 err
= process_rx_list(ctx
, msg
, &control
, copied
,
2125 decrypted
, is_peek
);
2127 err
= process_rx_list(ctx
, msg
, &control
, 0,
2128 async_copy_bytes
, is_peek
);
2129 decrypted
+= max(err
, 0);
2132 copied
+= decrypted
;
2135 tls_rx_reader_unlock(sk
, ctx
);
2137 sk_psock_put(sk
, psock
);
2138 return copied
? : err
;
2141 ssize_t
tls_sw_splice_read(struct socket
*sock
, loff_t
*ppos
,
2142 struct pipe_inode_info
*pipe
,
2143 size_t len
, unsigned int flags
)
2145 struct tls_context
*tls_ctx
= tls_get_ctx(sock
->sk
);
2146 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2147 struct strp_msg
*rxm
= NULL
;
2148 struct sock
*sk
= sock
->sk
;
2149 struct tls_msg
*tlm
;
2150 struct sk_buff
*skb
;
2155 err
= tls_rx_reader_lock(sk
, ctx
, flags
& SPLICE_F_NONBLOCK
);
2159 if (!skb_queue_empty(&ctx
->rx_list
)) {
2160 skb
= __skb_dequeue(&ctx
->rx_list
);
2162 struct tls_decrypt_arg darg
;
2164 err
= tls_rx_rec_wait(sk
, NULL
, flags
& SPLICE_F_NONBLOCK
,
2167 goto splice_read_end
;
2169 memset(&darg
.inargs
, 0, sizeof(darg
.inargs
));
2171 err
= tls_rx_one_record(sk
, NULL
, &darg
);
2173 tls_err_abort(sk
, -EBADMSG
);
2174 goto splice_read_end
;
2177 tls_rx_rec_done(ctx
);
2181 rxm
= strp_msg(skb
);
2184 /* splice does not support reading control messages */
2185 if (tlm
->control
!= TLS_RECORD_TYPE_DATA
) {
2187 goto splice_requeue
;
2190 chunk
= min_t(unsigned int, rxm
->full_len
, len
);
2191 copied
= skb_splice_bits(skb
, sk
, rxm
->offset
, pipe
, chunk
, flags
);
2193 goto splice_requeue
;
2195 if (chunk
< rxm
->full_len
) {
2197 rxm
->full_len
-= len
;
2198 goto splice_requeue
;
2204 tls_rx_reader_unlock(sk
, ctx
);
2205 return copied
? : err
;
2208 __skb_queue_head(&ctx
->rx_list
, skb
);
2209 goto splice_read_end
;
2212 int tls_sw_read_sock(struct sock
*sk
, read_descriptor_t
*desc
,
2213 sk_read_actor_t read_actor
)
2215 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2216 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2217 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
2218 struct strp_msg
*rxm
= NULL
;
2219 struct sk_buff
*skb
= NULL
;
2220 struct sk_psock
*psock
;
2221 size_t flushed_at
= 0;
2222 bool released
= true;
2223 struct tls_msg
*tlm
;
2228 psock
= sk_psock_get(sk
);
2230 sk_psock_put(sk
, psock
);
2233 err
= tls_rx_reader_acquire(sk
, ctx
, true);
2237 /* If crypto failed the connection is broken */
2238 err
= ctx
->async_wait
.err
;
2244 if (!skb_queue_empty(&ctx
->rx_list
)) {
2245 skb
= __skb_dequeue(&ctx
->rx_list
);
2246 rxm
= strp_msg(skb
);
2249 struct tls_decrypt_arg darg
;
2251 err
= tls_rx_rec_wait(sk
, NULL
, true, released
);
2255 memset(&darg
.inargs
, 0, sizeof(darg
.inargs
));
2257 err
= tls_rx_one_record(sk
, NULL
, &darg
);
2259 tls_err_abort(sk
, -EBADMSG
);
2263 released
= tls_read_flush_backlog(sk
, prot
, INT_MAX
,
2267 rxm
= strp_msg(skb
);
2269 decrypted
+= rxm
->full_len
;
2271 tls_rx_rec_done(ctx
);
2274 /* read_sock does not support reading control messages */
2275 if (tlm
->control
!= TLS_RECORD_TYPE_DATA
) {
2277 goto read_sock_requeue
;
2280 used
= read_actor(desc
, skb
, rxm
->offset
, rxm
->full_len
);
2284 goto read_sock_requeue
;
2287 if (used
< rxm
->full_len
) {
2288 rxm
->offset
+= used
;
2289 rxm
->full_len
-= used
;
2291 goto read_sock_requeue
;
2300 tls_rx_reader_release(sk
, ctx
);
2301 return copied
? : err
;
2304 __skb_queue_head(&ctx
->rx_list
, skb
);
2308 bool tls_sw_sock_is_readable(struct sock
*sk
)
2310 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2311 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2312 bool ingress_empty
= true;
2313 struct sk_psock
*psock
;
2316 psock
= sk_psock(sk
);
2318 ingress_empty
= list_empty(&psock
->ingress_msg
);
2321 return !ingress_empty
|| tls_strp_msg_ready(ctx
) ||
2322 !skb_queue_empty(&ctx
->rx_list
);
2325 int tls_rx_msg_size(struct tls_strparser
*strp
, struct sk_buff
*skb
)
2327 struct tls_context
*tls_ctx
= tls_get_ctx(strp
->sk
);
2328 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
2329 char header
[TLS_HEADER_SIZE
+ MAX_IV_SIZE
];
2330 size_t cipher_overhead
;
2331 size_t data_len
= 0;
2334 /* Verify that we have a full TLS header, or wait for more data */
2335 if (strp
->stm
.offset
+ prot
->prepend_size
> skb
->len
)
2338 /* Sanity-check size of on-stack buffer. */
2339 if (WARN_ON(prot
->prepend_size
> sizeof(header
))) {
2344 /* Linearize header to local buffer */
2345 ret
= skb_copy_bits(skb
, strp
->stm
.offset
, header
, prot
->prepend_size
);
2349 strp
->mark
= header
[0];
2351 data_len
= ((header
[4] & 0xFF) | (header
[3] << 8));
2353 cipher_overhead
= prot
->tag_size
;
2354 if (prot
->version
!= TLS_1_3_VERSION
&&
2355 prot
->cipher_type
!= TLS_CIPHER_CHACHA20_POLY1305
)
2356 cipher_overhead
+= prot
->iv_size
;
2358 if (data_len
> TLS_MAX_PAYLOAD_SIZE
+ cipher_overhead
+
2363 if (data_len
< cipher_overhead
) {
2368 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2369 if (header
[1] != TLS_1_2_VERSION_MINOR
||
2370 header
[2] != TLS_1_2_VERSION_MAJOR
) {
2375 tls_device_rx_resync_new_rec(strp
->sk
, data_len
+ TLS_HEADER_SIZE
,
2376 TCP_SKB_CB(skb
)->seq
+ strp
->stm
.offset
);
2377 return data_len
+ TLS_HEADER_SIZE
;
2380 tls_err_abort(strp
->sk
, ret
);
2385 void tls_rx_msg_ready(struct tls_strparser
*strp
)
2387 struct tls_sw_context_rx
*ctx
;
2389 ctx
= container_of(strp
, struct tls_sw_context_rx
, strp
);
2390 ctx
->saved_data_ready(strp
->sk
);
2393 static void tls_data_ready(struct sock
*sk
)
2395 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2396 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2397 struct sk_psock
*psock
;
2400 trace_sk_data_ready(sk
);
2402 alloc_save
= sk
->sk_allocation
;
2403 sk
->sk_allocation
= GFP_ATOMIC
;
2404 tls_strp_data_ready(&ctx
->strp
);
2405 sk
->sk_allocation
= alloc_save
;
2407 psock
= sk_psock_get(sk
);
2409 if (!list_empty(&psock
->ingress_msg
))
2410 ctx
->saved_data_ready(sk
);
2411 sk_psock_put(sk
, psock
);
2415 void tls_sw_cancel_work_tx(struct tls_context
*tls_ctx
)
2417 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
2419 set_bit(BIT_TX_CLOSING
, &ctx
->tx_bitmask
);
2420 set_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
);
2421 cancel_delayed_work_sync(&ctx
->tx_work
.work
);
2424 void tls_sw_release_resources_tx(struct sock
*sk
)
2426 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2427 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
2428 struct tls_rec
*rec
, *tmp
;
2431 /* Wait for any pending async encryptions to complete */
2432 spin_lock_bh(&ctx
->encrypt_compl_lock
);
2433 ctx
->async_notify
= true;
2434 pending
= atomic_read(&ctx
->encrypt_pending
);
2435 spin_unlock_bh(&ctx
->encrypt_compl_lock
);
2438 crypto_wait_req(-EINPROGRESS
, &ctx
->async_wait
);
2440 tls_tx_records(sk
, -1);
2442 /* Free up un-sent records in tx_list. First, free
2443 * the partially sent record if any at head of tx_list.
2445 if (tls_ctx
->partially_sent_record
) {
2446 tls_free_partial_record(sk
, tls_ctx
);
2447 rec
= list_first_entry(&ctx
->tx_list
,
2448 struct tls_rec
, list
);
2449 list_del(&rec
->list
);
2450 sk_msg_free(sk
, &rec
->msg_plaintext
);
2454 list_for_each_entry_safe(rec
, tmp
, &ctx
->tx_list
, list
) {
2455 list_del(&rec
->list
);
2456 sk_msg_free(sk
, &rec
->msg_encrypted
);
2457 sk_msg_free(sk
, &rec
->msg_plaintext
);
2461 crypto_free_aead(ctx
->aead_send
);
2462 tls_free_open_rec(sk
);
2465 void tls_sw_free_ctx_tx(struct tls_context
*tls_ctx
)
2467 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
2472 void tls_sw_release_resources_rx(struct sock
*sk
)
2474 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2475 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2477 kfree(tls_ctx
->rx
.rec_seq
);
2478 kfree(tls_ctx
->rx
.iv
);
2480 if (ctx
->aead_recv
) {
2481 __skb_queue_purge(&ctx
->rx_list
);
2482 crypto_free_aead(ctx
->aead_recv
);
2483 tls_strp_stop(&ctx
->strp
);
2484 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2485 * we still want to tls_strp_stop(), but sk->sk_data_ready was
2488 if (ctx
->saved_data_ready
) {
2489 write_lock_bh(&sk
->sk_callback_lock
);
2490 sk
->sk_data_ready
= ctx
->saved_data_ready
;
2491 write_unlock_bh(&sk
->sk_callback_lock
);
2496 void tls_sw_strparser_done(struct tls_context
*tls_ctx
)
2498 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2500 tls_strp_done(&ctx
->strp
);
2503 void tls_sw_free_ctx_rx(struct tls_context
*tls_ctx
)
2505 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2510 void tls_sw_free_resources_rx(struct sock
*sk
)
2512 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2514 tls_sw_release_resources_rx(sk
);
2515 tls_sw_free_ctx_rx(tls_ctx
);
2518 /* The work handler to transmitt the encrypted records in tx_list */
2519 static void tx_work_handler(struct work_struct
*work
)
2521 struct delayed_work
*delayed_work
= to_delayed_work(work
);
2522 struct tx_work
*tx_work
= container_of(delayed_work
,
2523 struct tx_work
, work
);
2524 struct sock
*sk
= tx_work
->sk
;
2525 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2526 struct tls_sw_context_tx
*ctx
;
2528 if (unlikely(!tls_ctx
))
2531 ctx
= tls_sw_ctx_tx(tls_ctx
);
2532 if (test_bit(BIT_TX_CLOSING
, &ctx
->tx_bitmask
))
2535 if (!test_and_clear_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
))
2538 if (mutex_trylock(&tls_ctx
->tx_lock
)) {
2540 tls_tx_records(sk
, -1);
2542 mutex_unlock(&tls_ctx
->tx_lock
);
2543 } else if (!test_and_set_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
)) {
2544 /* Someone is holding the tx_lock, they will likely run Tx
2545 * and cancel the work on their way out of the lock section.
2546 * Schedule a long delay just in case.
2548 schedule_delayed_work(&ctx
->tx_work
.work
, msecs_to_jiffies(10));
2552 static bool tls_is_tx_ready(struct tls_sw_context_tx
*ctx
)
2554 struct tls_rec
*rec
;
2556 rec
= list_first_entry_or_null(&ctx
->tx_list
, struct tls_rec
, list
);
2560 return READ_ONCE(rec
->tx_ready
);
2563 void tls_sw_write_space(struct sock
*sk
, struct tls_context
*ctx
)
2565 struct tls_sw_context_tx
*tx_ctx
= tls_sw_ctx_tx(ctx
);
2567 /* Schedule the transmission if tx list is ready */
2568 if (tls_is_tx_ready(tx_ctx
) &&
2569 !test_and_set_bit(BIT_TX_SCHEDULED
, &tx_ctx
->tx_bitmask
))
2570 schedule_delayed_work(&tx_ctx
->tx_work
.work
, 0);
2573 void tls_sw_strparser_arm(struct sock
*sk
, struct tls_context
*tls_ctx
)
2575 struct tls_sw_context_rx
*rx_ctx
= tls_sw_ctx_rx(tls_ctx
);
2577 write_lock_bh(&sk
->sk_callback_lock
);
2578 rx_ctx
->saved_data_ready
= sk
->sk_data_ready
;
2579 sk
->sk_data_ready
= tls_data_ready
;
2580 write_unlock_bh(&sk
->sk_callback_lock
);
2583 void tls_update_rx_zc_capable(struct tls_context
*tls_ctx
)
2585 struct tls_sw_context_rx
*rx_ctx
= tls_sw_ctx_rx(tls_ctx
);
2587 rx_ctx
->zc_capable
= tls_ctx
->rx_no_pad
||
2588 tls_ctx
->prot_info
.version
!= TLS_1_3_VERSION
;
2591 int tls_set_sw_offload(struct sock
*sk
, struct tls_context
*ctx
, int tx
)
2593 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2594 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
2595 struct tls_crypto_info
*crypto_info
;
2596 struct tls_sw_context_tx
*sw_ctx_tx
= NULL
;
2597 struct tls_sw_context_rx
*sw_ctx_rx
= NULL
;
2598 struct cipher_context
*cctx
;
2599 struct crypto_aead
**aead
;
2600 struct crypto_tfm
*tfm
;
2601 char *iv
, *rec_seq
, *key
, *salt
;
2602 const struct tls_cipher_desc
*cipher_desc
;
2612 if (!ctx
->priv_ctx_tx
) {
2613 sw_ctx_tx
= kzalloc(sizeof(*sw_ctx_tx
), GFP_KERNEL
);
2618 ctx
->priv_ctx_tx
= sw_ctx_tx
;
2621 (struct tls_sw_context_tx
*)ctx
->priv_ctx_tx
;
2624 if (!ctx
->priv_ctx_rx
) {
2625 sw_ctx_rx
= kzalloc(sizeof(*sw_ctx_rx
), GFP_KERNEL
);
2630 ctx
->priv_ctx_rx
= sw_ctx_rx
;
2633 (struct tls_sw_context_rx
*)ctx
->priv_ctx_rx
;
2638 crypto_init_wait(&sw_ctx_tx
->async_wait
);
2639 spin_lock_init(&sw_ctx_tx
->encrypt_compl_lock
);
2640 crypto_info
= &ctx
->crypto_send
.info
;
2642 aead
= &sw_ctx_tx
->aead_send
;
2643 INIT_LIST_HEAD(&sw_ctx_tx
->tx_list
);
2644 INIT_DELAYED_WORK(&sw_ctx_tx
->tx_work
.work
, tx_work_handler
);
2645 sw_ctx_tx
->tx_work
.sk
= sk
;
2647 crypto_init_wait(&sw_ctx_rx
->async_wait
);
2648 spin_lock_init(&sw_ctx_rx
->decrypt_compl_lock
);
2649 init_waitqueue_head(&sw_ctx_rx
->wq
);
2650 crypto_info
= &ctx
->crypto_recv
.info
;
2652 skb_queue_head_init(&sw_ctx_rx
->rx_list
);
2653 skb_queue_head_init(&sw_ctx_rx
->async_hold
);
2654 aead
= &sw_ctx_rx
->aead_recv
;
2657 cipher_desc
= get_cipher_desc(crypto_info
->cipher_type
);
2663 nonce_size
= cipher_desc
->nonce
;
2665 iv
= crypto_info_iv(crypto_info
, cipher_desc
);
2666 key
= crypto_info_key(crypto_info
, cipher_desc
);
2667 salt
= crypto_info_salt(crypto_info
, cipher_desc
);
2668 rec_seq
= crypto_info_rec_seq(crypto_info
, cipher_desc
);
2670 if (crypto_info
->version
== TLS_1_3_VERSION
) {
2672 prot
->aad_size
= TLS_HEADER_SIZE
;
2673 prot
->tail_size
= 1;
2675 prot
->aad_size
= TLS_AAD_SPACE_SIZE
;
2676 prot
->tail_size
= 0;
2679 /* Sanity-check the sizes for stack allocations. */
2680 if (nonce_size
> MAX_IV_SIZE
|| prot
->aad_size
> TLS_MAX_AAD_SIZE
) {
2685 prot
->version
= crypto_info
->version
;
2686 prot
->cipher_type
= crypto_info
->cipher_type
;
2687 prot
->prepend_size
= TLS_HEADER_SIZE
+ nonce_size
;
2688 prot
->tag_size
= cipher_desc
->tag
;
2689 prot
->overhead_size
= prot
->prepend_size
+
2690 prot
->tag_size
+ prot
->tail_size
;
2691 prot
->iv_size
= cipher_desc
->iv
;
2692 prot
->salt_size
= cipher_desc
->salt
;
2693 cctx
->iv
= kmalloc(cipher_desc
->iv
+ cipher_desc
->salt
, GFP_KERNEL
);
2698 /* Note: 128 & 256 bit salt are the same size */
2699 prot
->rec_seq_size
= cipher_desc
->rec_seq
;
2700 memcpy(cctx
->iv
, salt
, cipher_desc
->salt
);
2701 memcpy(cctx
->iv
+ cipher_desc
->salt
, iv
, cipher_desc
->iv
);
2703 cctx
->rec_seq
= kmemdup(rec_seq
, cipher_desc
->rec_seq
, GFP_KERNEL
);
2704 if (!cctx
->rec_seq
) {
2710 *aead
= crypto_alloc_aead(cipher_desc
->cipher_name
, 0, 0);
2711 if (IS_ERR(*aead
)) {
2712 rc
= PTR_ERR(*aead
);
2718 ctx
->push_pending_record
= tls_sw_push_pending_record
;
2720 rc
= crypto_aead_setkey(*aead
, key
, cipher_desc
->key
);
2724 rc
= crypto_aead_setauthsize(*aead
, prot
->tag_size
);
2729 tfm
= crypto_aead_tfm(sw_ctx_rx
->aead_recv
);
2731 tls_update_rx_zc_capable(ctx
);
2732 sw_ctx_rx
->async_capable
=
2733 crypto_info
->version
!= TLS_1_3_VERSION
&&
2734 !!(tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_ASYNC
);
2736 rc
= tls_strp_init(&sw_ctx_rx
->strp
, sk
);
2744 crypto_free_aead(*aead
);
2747 kfree(cctx
->rec_seq
);
2748 cctx
->rec_seq
= NULL
;
2754 kfree(ctx
->priv_ctx_tx
);
2755 ctx
->priv_ctx_tx
= NULL
;
2757 kfree(ctx
->priv_ctx_rx
);
2758 ctx
->priv_ctx_rx
= NULL
;