1 /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <crypto/aead.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/netdevice.h>
37 #include <net/inet_connection_sock.h>
44 /* device_offload_lock is used to synchronize tls_dev_add
45 * against NETDEV_DOWN notifications.
47 static DECLARE_RWSEM(device_offload_lock
);
49 static struct workqueue_struct
*destruct_wq __read_mostly
;
51 static LIST_HEAD(tls_device_list
);
52 static LIST_HEAD(tls_device_down_list
);
53 static DEFINE_SPINLOCK(tls_device_lock
);
55 static struct page
*dummy_page
;
57 static void tls_device_free_ctx(struct tls_context
*ctx
)
59 if (ctx
->tx_conf
== TLS_HW
)
60 kfree(tls_offload_ctx_tx(ctx
));
62 if (ctx
->rx_conf
== TLS_HW
)
63 kfree(tls_offload_ctx_rx(ctx
));
65 tls_ctx_free(NULL
, ctx
);
68 static void tls_device_tx_del_task(struct work_struct
*work
)
70 struct tls_offload_context_tx
*offload_ctx
=
71 container_of(work
, struct tls_offload_context_tx
, destruct_work
);
72 struct tls_context
*ctx
= offload_ctx
->ctx
;
73 struct net_device
*netdev
;
75 /* Safe, because this is the destroy flow, refcount is 0, so
76 * tls_device_down can't store this field in parallel.
78 netdev
= rcu_dereference_protected(ctx
->netdev
,
79 !refcount_read(&ctx
->refcount
));
81 netdev
->tlsdev_ops
->tls_dev_del(netdev
, ctx
, TLS_OFFLOAD_CTX_DIR_TX
);
84 tls_device_free_ctx(ctx
);
87 static void tls_device_queue_ctx_destruction(struct tls_context
*ctx
)
89 struct net_device
*netdev
;
93 spin_lock_irqsave(&tls_device_lock
, flags
);
94 if (unlikely(!refcount_dec_and_test(&ctx
->refcount
))) {
95 spin_unlock_irqrestore(&tls_device_lock
, flags
);
99 list_del(&ctx
->list
); /* Remove from tls_device_list / tls_device_down_list */
101 /* Safe, because this is the destroy flow, refcount is 0, so
102 * tls_device_down can't store this field in parallel.
104 netdev
= rcu_dereference_protected(ctx
->netdev
,
105 !refcount_read(&ctx
->refcount
));
107 async_cleanup
= netdev
&& ctx
->tx_conf
== TLS_HW
;
109 struct tls_offload_context_tx
*offload_ctx
= tls_offload_ctx_tx(ctx
);
111 /* queue_work inside the spinlock
112 * to make sure tls_device_down waits for that work.
114 queue_work(destruct_wq
, &offload_ctx
->destruct_work
);
116 spin_unlock_irqrestore(&tls_device_lock
, flags
);
119 tls_device_free_ctx(ctx
);
122 /* We assume that the socket is already connected */
123 static struct net_device
*get_netdev_for_sock(struct sock
*sk
)
125 struct dst_entry
*dst
= sk_dst_get(sk
);
126 struct net_device
*netdev
= NULL
;
129 netdev
= netdev_sk_get_lowest_dev(dst
->dev
, sk
);
138 static void destroy_record(struct tls_record_info
*record
)
142 for (i
= 0; i
< record
->num_frags
; i
++)
143 __skb_frag_unref(&record
->frags
[i
], false);
147 static void delete_all_records(struct tls_offload_context_tx
*offload_ctx
)
149 struct tls_record_info
*info
, *temp
;
151 list_for_each_entry_safe(info
, temp
, &offload_ctx
->records_list
, list
) {
152 list_del(&info
->list
);
153 destroy_record(info
);
156 offload_ctx
->retransmit_hint
= NULL
;
159 static void tls_icsk_clean_acked(struct sock
*sk
, u32 acked_seq
)
161 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
162 struct tls_record_info
*info
, *temp
;
163 struct tls_offload_context_tx
*ctx
;
164 u64 deleted_records
= 0;
170 ctx
= tls_offload_ctx_tx(tls_ctx
);
172 spin_lock_irqsave(&ctx
->lock
, flags
);
173 info
= ctx
->retransmit_hint
;
174 if (info
&& !before(acked_seq
, info
->end_seq
))
175 ctx
->retransmit_hint
= NULL
;
177 list_for_each_entry_safe(info
, temp
, &ctx
->records_list
, list
) {
178 if (before(acked_seq
, info
->end_seq
))
180 list_del(&info
->list
);
182 destroy_record(info
);
186 ctx
->unacked_record_sn
+= deleted_records
;
187 spin_unlock_irqrestore(&ctx
->lock
, flags
);
190 /* At this point, there should be no references on this
191 * socket and no in-flight SKBs associated with this
192 * socket, so it is safe to free all the resources.
194 void tls_device_sk_destruct(struct sock
*sk
)
196 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
197 struct tls_offload_context_tx
*ctx
= tls_offload_ctx_tx(tls_ctx
);
199 tls_ctx
->sk_destruct(sk
);
201 if (tls_ctx
->tx_conf
== TLS_HW
) {
202 if (ctx
->open_record
)
203 destroy_record(ctx
->open_record
);
204 delete_all_records(ctx
);
205 crypto_free_aead(ctx
->aead_send
);
206 clean_acked_data_disable(inet_csk(sk
));
209 tls_device_queue_ctx_destruction(tls_ctx
);
211 EXPORT_SYMBOL_GPL(tls_device_sk_destruct
);
213 void tls_device_free_resources_tx(struct sock
*sk
)
215 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
217 tls_free_partial_record(sk
, tls_ctx
);
220 void tls_offload_tx_resync_request(struct sock
*sk
, u32 got_seq
, u32 exp_seq
)
222 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
224 trace_tls_device_tx_resync_req(sk
, got_seq
, exp_seq
);
225 WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED
, &tls_ctx
->flags
));
227 EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request
);
229 static void tls_device_resync_tx(struct sock
*sk
, struct tls_context
*tls_ctx
,
232 struct net_device
*netdev
;
237 skb
= tcp_write_queue_tail(sk
);
239 TCP_SKB_CB(skb
)->eor
= 1;
241 rcd_sn
= tls_ctx
->tx
.rec_seq
;
243 trace_tls_device_tx_resync_send(sk
, seq
, rcd_sn
);
244 down_read(&device_offload_lock
);
245 netdev
= rcu_dereference_protected(tls_ctx
->netdev
,
246 lockdep_is_held(&device_offload_lock
));
248 err
= netdev
->tlsdev_ops
->tls_dev_resync(netdev
, sk
, seq
,
250 TLS_OFFLOAD_CTX_DIR_TX
);
251 up_read(&device_offload_lock
);
255 clear_bit_unlock(TLS_TX_SYNC_SCHED
, &tls_ctx
->flags
);
258 static void tls_append_frag(struct tls_record_info
*record
,
259 struct page_frag
*pfrag
,
264 frag
= &record
->frags
[record
->num_frags
- 1];
265 if (skb_frag_page(frag
) == pfrag
->page
&&
266 skb_frag_off(frag
) + skb_frag_size(frag
) == pfrag
->offset
) {
267 skb_frag_size_add(frag
, size
);
270 skb_frag_fill_page_desc(frag
, pfrag
->page
, pfrag
->offset
,
273 get_page(pfrag
->page
);
276 pfrag
->offset
+= size
;
280 static int tls_push_record(struct sock
*sk
,
281 struct tls_context
*ctx
,
282 struct tls_offload_context_tx
*offload_ctx
,
283 struct tls_record_info
*record
,
286 struct tls_prot_info
*prot
= &ctx
->prot_info
;
287 struct tcp_sock
*tp
= tcp_sk(sk
);
291 record
->end_seq
= tp
->write_seq
+ record
->len
;
292 list_add_tail_rcu(&record
->list
, &offload_ctx
->records_list
);
293 offload_ctx
->open_record
= NULL
;
295 if (test_bit(TLS_TX_SYNC_SCHED
, &ctx
->flags
))
296 tls_device_resync_tx(sk
, ctx
, tp
->write_seq
);
298 tls_advance_record_sn(sk
, prot
, &ctx
->tx
);
300 for (i
= 0; i
< record
->num_frags
; i
++) {
301 frag
= &record
->frags
[i
];
302 sg_unmark_end(&offload_ctx
->sg_tx_data
[i
]);
303 sg_set_page(&offload_ctx
->sg_tx_data
[i
], skb_frag_page(frag
),
304 skb_frag_size(frag
), skb_frag_off(frag
));
305 sk_mem_charge(sk
, skb_frag_size(frag
));
306 get_page(skb_frag_page(frag
));
308 sg_mark_end(&offload_ctx
->sg_tx_data
[record
->num_frags
- 1]);
310 /* all ready, send */
311 return tls_push_sg(sk
, ctx
, offload_ctx
->sg_tx_data
, 0, flags
);
314 static void tls_device_record_close(struct sock
*sk
,
315 struct tls_context
*ctx
,
316 struct tls_record_info
*record
,
317 struct page_frag
*pfrag
,
318 unsigned char record_type
)
320 struct tls_prot_info
*prot
= &ctx
->prot_info
;
321 struct page_frag dummy_tag_frag
;
324 * device will fill in the tag, we just need to append a placeholder
325 * use socket memory to improve coalescing (re-using a single buffer
326 * increases frag count)
327 * if we can't allocate memory now use the dummy page
329 if (unlikely(pfrag
->size
- pfrag
->offset
< prot
->tag_size
) &&
330 !skb_page_frag_refill(prot
->tag_size
, pfrag
, sk
->sk_allocation
)) {
331 dummy_tag_frag
.page
= dummy_page
;
332 dummy_tag_frag
.offset
= 0;
333 pfrag
= &dummy_tag_frag
;
335 tls_append_frag(record
, pfrag
, prot
->tag_size
);
338 tls_fill_prepend(ctx
, skb_frag_address(&record
->frags
[0]),
339 record
->len
- prot
->overhead_size
,
343 static int tls_create_new_record(struct tls_offload_context_tx
*offload_ctx
,
344 struct page_frag
*pfrag
,
347 struct tls_record_info
*record
;
350 record
= kmalloc(sizeof(*record
), GFP_KERNEL
);
354 frag
= &record
->frags
[0];
355 skb_frag_fill_page_desc(frag
, pfrag
->page
, pfrag
->offset
,
358 get_page(pfrag
->page
);
359 pfrag
->offset
+= prepend_size
;
361 record
->num_frags
= 1;
362 record
->len
= prepend_size
;
363 offload_ctx
->open_record
= record
;
367 static int tls_do_allocation(struct sock
*sk
,
368 struct tls_offload_context_tx
*offload_ctx
,
369 struct page_frag
*pfrag
,
374 if (!offload_ctx
->open_record
) {
375 if (unlikely(!skb_page_frag_refill(prepend_size
, pfrag
,
376 sk
->sk_allocation
))) {
377 READ_ONCE(sk
->sk_prot
)->enter_memory_pressure(sk
);
378 sk_stream_moderate_sndbuf(sk
);
382 ret
= tls_create_new_record(offload_ctx
, pfrag
, prepend_size
);
386 if (pfrag
->size
> pfrag
->offset
)
390 if (!sk_page_frag_refill(sk
, pfrag
))
396 static int tls_device_copy_data(void *addr
, size_t bytes
, struct iov_iter
*i
)
398 size_t pre_copy
, nocache
;
400 pre_copy
= ~((unsigned long)addr
- 1) & (SMP_CACHE_BYTES
- 1);
402 pre_copy
= min(pre_copy
, bytes
);
403 if (copy_from_iter(addr
, pre_copy
, i
) != pre_copy
)
409 nocache
= round_down(bytes
, SMP_CACHE_BYTES
);
410 if (copy_from_iter_nocache(addr
, nocache
, i
) != nocache
)
415 if (bytes
&& copy_from_iter(addr
, bytes
, i
) != bytes
)
421 static int tls_push_data(struct sock
*sk
,
422 struct iov_iter
*iter
,
423 size_t size
, int flags
,
424 unsigned char record_type
)
426 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
427 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
428 struct tls_offload_context_tx
*ctx
= tls_offload_ctx_tx(tls_ctx
);
429 struct tls_record_info
*record
;
430 int tls_push_record_flags
;
431 struct page_frag
*pfrag
;
432 size_t orig_size
= size
;
433 u32 max_open_record_len
;
440 ~(MSG_MORE
| MSG_DONTWAIT
| MSG_NOSIGNAL
|
441 MSG_SPLICE_PAGES
| MSG_EOR
))
444 if ((flags
& (MSG_MORE
| MSG_EOR
)) == (MSG_MORE
| MSG_EOR
))
447 if (unlikely(sk
->sk_err
))
450 flags
|= MSG_SENDPAGE_DECRYPTED
;
451 tls_push_record_flags
= flags
| MSG_MORE
;
453 timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
454 if (tls_is_partially_sent_record(tls_ctx
)) {
455 rc
= tls_push_partial_record(sk
, tls_ctx
, flags
);
460 pfrag
= sk_page_frag(sk
);
462 /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
463 * we need to leave room for an authentication tag.
465 max_open_record_len
= TLS_MAX_PAYLOAD_SIZE
+
468 rc
= tls_do_allocation(sk
, ctx
, pfrag
, prot
->prepend_size
);
470 rc
= sk_stream_wait_memory(sk
, &timeo
);
474 record
= ctx
->open_record
;
478 if (record_type
!= TLS_RECORD_TYPE_DATA
) {
479 /* avoid sending partial
480 * record with type !=
484 destroy_record(record
);
485 ctx
->open_record
= NULL
;
486 } else if (record
->len
> prot
->prepend_size
) {
493 record
= ctx
->open_record
;
495 copy
= min_t(size_t, size
, max_open_record_len
- record
->len
);
496 if (copy
&& (flags
& MSG_SPLICE_PAGES
)) {
497 struct page_frag zc_pfrag
;
498 struct page
**pages
= &zc_pfrag
.page
;
501 rc
= iov_iter_extract_pages(iter
, &pages
,
510 if (WARN_ON_ONCE(!sendpage_ok(zc_pfrag
.page
))) {
511 iov_iter_revert(iter
, copy
);
516 zc_pfrag
.offset
= off
;
517 zc_pfrag
.size
= copy
;
518 tls_append_frag(record
, &zc_pfrag
, copy
);
520 copy
= min_t(size_t, copy
, pfrag
->size
- pfrag
->offset
);
522 rc
= tls_device_copy_data(page_address(pfrag
->page
) +
527 tls_append_frag(record
, pfrag
, copy
);
533 tls_push_record_flags
= flags
;
534 if (flags
& MSG_MORE
) {
542 if (done
|| record
->len
>= max_open_record_len
||
543 (record
->num_frags
>= MAX_SKB_FRAGS
- 1)) {
544 tls_device_record_close(sk
, tls_ctx
, record
,
547 rc
= tls_push_record(sk
,
551 tls_push_record_flags
);
557 tls_ctx
->pending_open_record_frags
= more
;
559 if (orig_size
- size
> 0)
560 rc
= orig_size
- size
;
565 int tls_device_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t size
)
567 unsigned char record_type
= TLS_RECORD_TYPE_DATA
;
568 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
571 if (!tls_ctx
->zerocopy_sendfile
)
572 msg
->msg_flags
&= ~MSG_SPLICE_PAGES
;
574 mutex_lock(&tls_ctx
->tx_lock
);
577 if (unlikely(msg
->msg_controllen
)) {
578 rc
= tls_process_cmsg(sk
, msg
, &record_type
);
583 rc
= tls_push_data(sk
, &msg
->msg_iter
, size
, msg
->msg_flags
,
588 mutex_unlock(&tls_ctx
->tx_lock
);
592 void tls_device_splice_eof(struct socket
*sock
)
594 struct sock
*sk
= sock
->sk
;
595 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
596 struct iov_iter iter
= {};
598 if (!tls_is_partially_sent_record(tls_ctx
))
601 mutex_lock(&tls_ctx
->tx_lock
);
604 if (tls_is_partially_sent_record(tls_ctx
)) {
605 iov_iter_bvec(&iter
, ITER_SOURCE
, NULL
, 0, 0);
606 tls_push_data(sk
, &iter
, 0, 0, TLS_RECORD_TYPE_DATA
);
610 mutex_unlock(&tls_ctx
->tx_lock
);
613 struct tls_record_info
*tls_get_record(struct tls_offload_context_tx
*context
,
614 u32 seq
, u64
*p_record_sn
)
616 u64 record_sn
= context
->hint_record_sn
;
617 struct tls_record_info
*info
, *last
;
619 info
= context
->retransmit_hint
;
621 before(seq
, info
->end_seq
- info
->len
)) {
622 /* if retransmit_hint is irrelevant start
623 * from the beginning of the list
625 info
= list_first_entry_or_null(&context
->records_list
,
626 struct tls_record_info
, list
);
629 /* send the start_marker record if seq number is before the
630 * tls offload start marker sequence number. This record is
631 * required to handle TCP packets which are before TLS offload
633 * And if it's not start marker, look if this seq number
634 * belongs to the list.
636 if (likely(!tls_record_is_start_marker(info
))) {
637 /* we have the first record, get the last record to see
638 * if this seq number belongs to the list.
640 last
= list_last_entry(&context
->records_list
,
641 struct tls_record_info
, list
);
643 if (!between(seq
, tls_record_start_seq(info
),
647 record_sn
= context
->unacked_record_sn
;
650 /* We just need the _rcu for the READ_ONCE() */
652 list_for_each_entry_from_rcu(info
, &context
->records_list
, list
) {
653 if (before(seq
, info
->end_seq
)) {
654 if (!context
->retransmit_hint
||
656 context
->retransmit_hint
->end_seq
)) {
657 context
->hint_record_sn
= record_sn
;
658 context
->retransmit_hint
= info
;
660 *p_record_sn
= record_sn
;
661 goto exit_rcu_unlock
;
671 EXPORT_SYMBOL(tls_get_record
);
673 static int tls_device_push_pending_record(struct sock
*sk
, int flags
)
675 struct iov_iter iter
;
677 iov_iter_kvec(&iter
, ITER_SOURCE
, NULL
, 0, 0);
678 return tls_push_data(sk
, &iter
, 0, flags
, TLS_RECORD_TYPE_DATA
);
681 void tls_device_write_space(struct sock
*sk
, struct tls_context
*ctx
)
683 if (tls_is_partially_sent_record(ctx
)) {
684 gfp_t sk_allocation
= sk
->sk_allocation
;
686 WARN_ON_ONCE(sk
->sk_write_pending
);
688 sk
->sk_allocation
= GFP_ATOMIC
;
689 tls_push_partial_record(sk
, ctx
,
690 MSG_DONTWAIT
| MSG_NOSIGNAL
|
691 MSG_SENDPAGE_DECRYPTED
);
692 sk
->sk_allocation
= sk_allocation
;
696 static void tls_device_resync_rx(struct tls_context
*tls_ctx
,
697 struct sock
*sk
, u32 seq
, u8
*rcd_sn
)
699 struct tls_offload_context_rx
*rx_ctx
= tls_offload_ctx_rx(tls_ctx
);
700 struct net_device
*netdev
;
702 trace_tls_device_rx_resync_send(sk
, seq
, rcd_sn
, rx_ctx
->resync_type
);
704 netdev
= rcu_dereference(tls_ctx
->netdev
);
706 netdev
->tlsdev_ops
->tls_dev_resync(netdev
, sk
, seq
, rcd_sn
,
707 TLS_OFFLOAD_CTX_DIR_RX
);
709 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSRXDEVICERESYNC
);
713 tls_device_rx_resync_async(struct tls_offload_resync_async
*resync_async
,
714 s64 resync_req
, u32
*seq
, u16
*rcd_delta
)
716 u32 is_async
= resync_req
& RESYNC_REQ_ASYNC
;
717 u32 req_seq
= resync_req
>> 32;
718 u32 req_end
= req_seq
+ ((resync_req
>> 16) & 0xffff);
724 /* shouldn't get to wraparound:
725 * too long in async stage, something bad happened
727 if (WARN_ON_ONCE(resync_async
->rcd_delta
== USHRT_MAX
))
730 /* asynchronous stage: log all headers seq such that
731 * req_seq <= seq <= end_seq, and wait for real resync request
733 if (before(*seq
, req_seq
))
735 if (!after(*seq
, req_end
) &&
736 resync_async
->loglen
< TLS_DEVICE_RESYNC_ASYNC_LOGMAX
)
737 resync_async
->log
[resync_async
->loglen
++] = *seq
;
739 resync_async
->rcd_delta
++;
744 /* synchronous stage: check against the logged entries and
745 * proceed to check the next entries if no match was found
747 for (i
= 0; i
< resync_async
->loglen
; i
++)
748 if (req_seq
== resync_async
->log
[i
] &&
749 atomic64_try_cmpxchg(&resync_async
->req
, &resync_req
, 0)) {
750 *rcd_delta
= resync_async
->rcd_delta
- i
;
752 resync_async
->loglen
= 0;
753 resync_async
->rcd_delta
= 0;
757 resync_async
->loglen
= 0;
758 resync_async
->rcd_delta
= 0;
760 if (req_seq
== *seq
&&
761 atomic64_try_cmpxchg(&resync_async
->req
,
768 void tls_device_rx_resync_new_rec(struct sock
*sk
, u32 rcd_len
, u32 seq
)
770 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
771 struct tls_offload_context_rx
*rx_ctx
;
772 u8 rcd_sn
[TLS_MAX_REC_SEQ_SIZE
];
773 u32 sock_data
, is_req_pending
;
774 struct tls_prot_info
*prot
;
779 if (tls_ctx
->rx_conf
!= TLS_HW
)
781 if (unlikely(test_bit(TLS_RX_DEV_DEGRADED
, &tls_ctx
->flags
)))
784 prot
= &tls_ctx
->prot_info
;
785 rx_ctx
= tls_offload_ctx_rx(tls_ctx
);
786 memcpy(rcd_sn
, tls_ctx
->rx
.rec_seq
, prot
->rec_seq_size
);
788 switch (rx_ctx
->resync_type
) {
789 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ
:
790 resync_req
= atomic64_read(&rx_ctx
->resync_req
);
791 req_seq
= resync_req
>> 32;
792 seq
+= TLS_HEADER_SIZE
- 1;
793 is_req_pending
= resync_req
;
795 if (likely(!is_req_pending
) || req_seq
!= seq
||
796 !atomic64_try_cmpxchg(&rx_ctx
->resync_req
, &resync_req
, 0))
799 case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT
:
800 if (likely(!rx_ctx
->resync_nh_do_now
))
803 /* head of next rec is already in, note that the sock_inq will
804 * include the currently parsed message when called from parser
806 sock_data
= tcp_inq(sk
);
807 if (sock_data
> rcd_len
) {
808 trace_tls_device_rx_resync_nh_delay(sk
, sock_data
,
813 rx_ctx
->resync_nh_do_now
= 0;
815 tls_bigint_increment(rcd_sn
, prot
->rec_seq_size
);
817 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC
:
818 resync_req
= atomic64_read(&rx_ctx
->resync_async
->req
);
819 is_req_pending
= resync_req
;
820 if (likely(!is_req_pending
))
823 if (!tls_device_rx_resync_async(rx_ctx
->resync_async
,
824 resync_req
, &seq
, &rcd_delta
))
826 tls_bigint_subtract(rcd_sn
, rcd_delta
);
830 tls_device_resync_rx(tls_ctx
, sk
, seq
, rcd_sn
);
833 static void tls_device_core_ctrl_rx_resync(struct tls_context
*tls_ctx
,
834 struct tls_offload_context_rx
*ctx
,
835 struct sock
*sk
, struct sk_buff
*skb
)
837 struct strp_msg
*rxm
;
839 /* device will request resyncs by itself based on stream scan */
840 if (ctx
->resync_type
!= TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT
)
842 /* already scheduled */
843 if (ctx
->resync_nh_do_now
)
845 /* seen decrypted fragments since last fully-failed record */
846 if (ctx
->resync_nh_reset
) {
847 ctx
->resync_nh_reset
= 0;
848 ctx
->resync_nh
.decrypted_failed
= 1;
849 ctx
->resync_nh
.decrypted_tgt
= TLS_DEVICE_RESYNC_NH_START_IVAL
;
853 if (++ctx
->resync_nh
.decrypted_failed
<= ctx
->resync_nh
.decrypted_tgt
)
856 /* doing resync, bump the next target in case it fails */
857 if (ctx
->resync_nh
.decrypted_tgt
< TLS_DEVICE_RESYNC_NH_MAX_IVAL
)
858 ctx
->resync_nh
.decrypted_tgt
*= 2;
860 ctx
->resync_nh
.decrypted_tgt
+= TLS_DEVICE_RESYNC_NH_MAX_IVAL
;
864 /* head of next rec is already in, parser will sync for us */
865 if (tcp_inq(sk
) > rxm
->full_len
) {
866 trace_tls_device_rx_resync_nh_schedule(sk
);
867 ctx
->resync_nh_do_now
= 1;
869 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
870 u8 rcd_sn
[TLS_MAX_REC_SEQ_SIZE
];
872 memcpy(rcd_sn
, tls_ctx
->rx
.rec_seq
, prot
->rec_seq_size
);
873 tls_bigint_increment(rcd_sn
, prot
->rec_seq_size
);
875 tls_device_resync_rx(tls_ctx
, sk
, tcp_sk(sk
)->copied_seq
,
881 tls_device_reencrypt(struct sock
*sk
, struct tls_context
*tls_ctx
)
883 struct tls_sw_context_rx
*sw_ctx
= tls_sw_ctx_rx(tls_ctx
);
884 const struct tls_cipher_desc
*cipher_desc
;
885 int err
, offset
, copy
, data_len
, pos
;
886 struct sk_buff
*skb
, *skb_iter
;
887 struct scatterlist sg
[1];
888 struct strp_msg
*rxm
;
889 char *orig_buf
, *buf
;
891 cipher_desc
= get_cipher_desc(tls_ctx
->crypto_recv
.info
.cipher_type
);
892 DEBUG_NET_WARN_ON_ONCE(!cipher_desc
|| !cipher_desc
->offloadable
);
894 rxm
= strp_msg(tls_strp_msg(sw_ctx
));
895 orig_buf
= kmalloc(rxm
->full_len
+ TLS_HEADER_SIZE
+ cipher_desc
->iv
,
901 err
= tls_strp_msg_cow(sw_ctx
);
905 skb
= tls_strp_msg(sw_ctx
);
907 offset
= rxm
->offset
;
909 sg_init_table(sg
, 1);
910 sg_set_buf(&sg
[0], buf
,
911 rxm
->full_len
+ TLS_HEADER_SIZE
+ cipher_desc
->iv
);
912 err
= skb_copy_bits(skb
, offset
, buf
, TLS_HEADER_SIZE
+ cipher_desc
->iv
);
916 /* We are interested only in the decrypted data not the auth */
917 err
= decrypt_skb(sk
, sg
);
923 data_len
= rxm
->full_len
- cipher_desc
->tag
;
925 if (skb_pagelen(skb
) > offset
) {
926 copy
= min_t(int, skb_pagelen(skb
) - offset
, data_len
);
928 if (skb
->decrypted
) {
929 err
= skb_store_bits(skb
, offset
, buf
, copy
);
938 pos
= skb_pagelen(skb
);
939 skb_walk_frags(skb
, skb_iter
) {
942 /* Practically all frags must belong to msg if reencrypt
943 * is needed with current strparser and coalescing logic,
944 * but strparser may "get optimized", so let's be safe.
946 if (pos
+ skb_iter
->len
<= offset
)
948 if (pos
>= data_len
+ rxm
->offset
)
951 frag_pos
= offset
- pos
;
952 copy
= min_t(int, skb_iter
->len
- frag_pos
,
953 data_len
+ rxm
->offset
- offset
);
955 if (skb_iter
->decrypted
) {
956 err
= skb_store_bits(skb_iter
, frag_pos
, buf
, copy
);
964 pos
+= skb_iter
->len
;
972 int tls_device_decrypted(struct sock
*sk
, struct tls_context
*tls_ctx
)
974 struct tls_offload_context_rx
*ctx
= tls_offload_ctx_rx(tls_ctx
);
975 struct tls_sw_context_rx
*sw_ctx
= tls_sw_ctx_rx(tls_ctx
);
976 struct sk_buff
*skb
= tls_strp_msg(sw_ctx
);
977 struct strp_msg
*rxm
= strp_msg(skb
);
978 int is_decrypted
, is_encrypted
;
980 if (!tls_strp_msg_mixed_decrypted(sw_ctx
)) {
981 is_decrypted
= skb
->decrypted
;
982 is_encrypted
= !is_decrypted
;
988 trace_tls_device_decrypted(sk
, tcp_sk(sk
)->copied_seq
- rxm
->full_len
,
989 tls_ctx
->rx
.rec_seq
, rxm
->full_len
,
990 is_encrypted
, is_decrypted
);
992 if (unlikely(test_bit(TLS_RX_DEV_DEGRADED
, &tls_ctx
->flags
))) {
993 if (likely(is_encrypted
|| is_decrypted
))
996 /* After tls_device_down disables the offload, the next SKB will
997 * likely have initial fragments decrypted, and final ones not
998 * decrypted. We need to reencrypt that single SKB.
1000 return tls_device_reencrypt(sk
, tls_ctx
);
1003 /* Return immediately if the record is either entirely plaintext or
1004 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
1008 ctx
->resync_nh_reset
= 1;
1009 return is_decrypted
;
1012 tls_device_core_ctrl_rx_resync(tls_ctx
, ctx
, sk
, skb
);
1016 ctx
->resync_nh_reset
= 1;
1017 return tls_device_reencrypt(sk
, tls_ctx
);
1020 static void tls_device_attach(struct tls_context
*ctx
, struct sock
*sk
,
1021 struct net_device
*netdev
)
1023 if (sk
->sk_destruct
!= tls_device_sk_destruct
) {
1024 refcount_set(&ctx
->refcount
, 1);
1026 RCU_INIT_POINTER(ctx
->netdev
, netdev
);
1027 spin_lock_irq(&tls_device_lock
);
1028 list_add_tail(&ctx
->list
, &tls_device_list
);
1029 spin_unlock_irq(&tls_device_lock
);
1031 ctx
->sk_destruct
= sk
->sk_destruct
;
1032 smp_store_release(&sk
->sk_destruct
, tls_device_sk_destruct
);
1036 static struct tls_offload_context_tx
*alloc_offload_ctx_tx(struct tls_context
*ctx
)
1038 struct tls_offload_context_tx
*offload_ctx
;
1041 offload_ctx
= kzalloc(sizeof(*offload_ctx
), GFP_KERNEL
);
1045 INIT_WORK(&offload_ctx
->destruct_work
, tls_device_tx_del_task
);
1046 INIT_LIST_HEAD(&offload_ctx
->records_list
);
1047 spin_lock_init(&offload_ctx
->lock
);
1048 sg_init_table(offload_ctx
->sg_tx_data
,
1049 ARRAY_SIZE(offload_ctx
->sg_tx_data
));
1051 /* start at rec_seq - 1 to account for the start marker record */
1052 memcpy(&rcd_sn
, ctx
->tx
.rec_seq
, sizeof(rcd_sn
));
1053 offload_ctx
->unacked_record_sn
= be64_to_cpu(rcd_sn
) - 1;
1055 offload_ctx
->ctx
= ctx
;
1060 int tls_set_device_offload(struct sock
*sk
)
1062 struct tls_record_info
*start_marker_record
;
1063 struct tls_offload_context_tx
*offload_ctx
;
1064 const struct tls_cipher_desc
*cipher_desc
;
1065 struct tls_crypto_info
*crypto_info
;
1066 struct tls_prot_info
*prot
;
1067 struct net_device
*netdev
;
1068 struct tls_context
*ctx
;
1069 struct sk_buff
*skb
;
1073 ctx
= tls_get_ctx(sk
);
1074 prot
= &ctx
->prot_info
;
1076 if (ctx
->priv_ctx_tx
)
1079 netdev
= get_netdev_for_sock(sk
);
1081 pr_err_ratelimited("%s: netdev not found\n", __func__
);
1085 if (!(netdev
->features
& NETIF_F_HW_TLS_TX
)) {
1087 goto release_netdev
;
1090 crypto_info
= &ctx
->crypto_send
.info
;
1091 if (crypto_info
->version
!= TLS_1_2_VERSION
) {
1093 goto release_netdev
;
1096 cipher_desc
= get_cipher_desc(crypto_info
->cipher_type
);
1097 if (!cipher_desc
|| !cipher_desc
->offloadable
) {
1099 goto release_netdev
;
1102 rc
= init_prot_info(prot
, crypto_info
, cipher_desc
);
1104 goto release_netdev
;
1106 iv
= crypto_info_iv(crypto_info
, cipher_desc
);
1107 rec_seq
= crypto_info_rec_seq(crypto_info
, cipher_desc
);
1109 memcpy(ctx
->tx
.iv
+ cipher_desc
->salt
, iv
, cipher_desc
->iv
);
1110 memcpy(ctx
->tx
.rec_seq
, rec_seq
, cipher_desc
->rec_seq
);
1112 start_marker_record
= kmalloc(sizeof(*start_marker_record
), GFP_KERNEL
);
1113 if (!start_marker_record
) {
1115 goto release_netdev
;
1118 offload_ctx
= alloc_offload_ctx_tx(ctx
);
1121 goto free_marker_record
;
1124 rc
= tls_sw_fallback_init(sk
, offload_ctx
, crypto_info
);
1126 goto free_offload_ctx
;
1128 start_marker_record
->end_seq
= tcp_sk(sk
)->write_seq
;
1129 start_marker_record
->len
= 0;
1130 start_marker_record
->num_frags
= 0;
1131 list_add_tail(&start_marker_record
->list
, &offload_ctx
->records_list
);
1133 clean_acked_data_enable(inet_csk(sk
), &tls_icsk_clean_acked
);
1134 ctx
->push_pending_record
= tls_device_push_pending_record
;
1136 /* TLS offload is greatly simplified if we don't send
1137 * SKBs where only part of the payload needs to be encrypted.
1138 * So mark the last skb in the write queue as end of record.
1140 skb
= tcp_write_queue_tail(sk
);
1142 TCP_SKB_CB(skb
)->eor
= 1;
1144 /* Avoid offloading if the device is down
1145 * We don't want to offload new flows after
1146 * the NETDEV_DOWN event
1148 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1149 * handler thus protecting from the device going down before
1150 * ctx was added to tls_device_list.
1152 down_read(&device_offload_lock
);
1153 if (!(netdev
->flags
& IFF_UP
)) {
1158 ctx
->priv_ctx_tx
= offload_ctx
;
1159 rc
= netdev
->tlsdev_ops
->tls_dev_add(netdev
, sk
, TLS_OFFLOAD_CTX_DIR_TX
,
1160 &ctx
->crypto_send
.info
,
1161 tcp_sk(sk
)->write_seq
);
1162 trace_tls_device_offload_set(sk
, TLS_OFFLOAD_CTX_DIR_TX
,
1163 tcp_sk(sk
)->write_seq
, rec_seq
, rc
);
1167 tls_device_attach(ctx
, sk
, netdev
);
1168 up_read(&device_offload_lock
);
1170 /* following this assignment tls_is_skb_tx_device_offloaded
1171 * will return true and the context might be accessed
1172 * by the netdev's xmit function.
1174 smp_store_release(&sk
->sk_validate_xmit_skb
, tls_validate_xmit_skb
);
1180 up_read(&device_offload_lock
);
1181 clean_acked_data_disable(inet_csk(sk
));
1182 crypto_free_aead(offload_ctx
->aead_send
);
1185 ctx
->priv_ctx_tx
= NULL
;
1187 kfree(start_marker_record
);
1193 int tls_set_device_offload_rx(struct sock
*sk
, struct tls_context
*ctx
)
1195 struct tls12_crypto_info_aes_gcm_128
*info
;
1196 struct tls_offload_context_rx
*context
;
1197 struct net_device
*netdev
;
1200 if (ctx
->crypto_recv
.info
.version
!= TLS_1_2_VERSION
)
1203 netdev
= get_netdev_for_sock(sk
);
1205 pr_err_ratelimited("%s: netdev not found\n", __func__
);
1209 if (!(netdev
->features
& NETIF_F_HW_TLS_RX
)) {
1211 goto release_netdev
;
1214 /* Avoid offloading if the device is down
1215 * We don't want to offload new flows after
1216 * the NETDEV_DOWN event
1218 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1219 * handler thus protecting from the device going down before
1220 * ctx was added to tls_device_list.
1222 down_read(&device_offload_lock
);
1223 if (!(netdev
->flags
& IFF_UP
)) {
1228 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
1233 context
->resync_nh_reset
= 1;
1235 ctx
->priv_ctx_rx
= context
;
1236 rc
= tls_set_sw_offload(sk
, 0);
1240 rc
= netdev
->tlsdev_ops
->tls_dev_add(netdev
, sk
, TLS_OFFLOAD_CTX_DIR_RX
,
1241 &ctx
->crypto_recv
.info
,
1242 tcp_sk(sk
)->copied_seq
);
1243 info
= (void *)&ctx
->crypto_recv
.info
;
1244 trace_tls_device_offload_set(sk
, TLS_OFFLOAD_CTX_DIR_RX
,
1245 tcp_sk(sk
)->copied_seq
, info
->rec_seq
, rc
);
1247 goto free_sw_resources
;
1249 tls_device_attach(ctx
, sk
, netdev
);
1250 up_read(&device_offload_lock
);
1257 up_read(&device_offload_lock
);
1258 tls_sw_free_resources_rx(sk
);
1259 down_read(&device_offload_lock
);
1261 ctx
->priv_ctx_rx
= NULL
;
1263 up_read(&device_offload_lock
);
1269 void tls_device_offload_cleanup_rx(struct sock
*sk
)
1271 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1272 struct net_device
*netdev
;
1274 down_read(&device_offload_lock
);
1275 netdev
= rcu_dereference_protected(tls_ctx
->netdev
,
1276 lockdep_is_held(&device_offload_lock
));
1280 netdev
->tlsdev_ops
->tls_dev_del(netdev
, tls_ctx
,
1281 TLS_OFFLOAD_CTX_DIR_RX
);
1283 if (tls_ctx
->tx_conf
!= TLS_HW
) {
1285 rcu_assign_pointer(tls_ctx
->netdev
, NULL
);
1287 set_bit(TLS_RX_DEV_CLOSED
, &tls_ctx
->flags
);
1290 up_read(&device_offload_lock
);
1291 tls_sw_release_resources_rx(sk
);
1294 static int tls_device_down(struct net_device
*netdev
)
1296 struct tls_context
*ctx
, *tmp
;
1297 unsigned long flags
;
1300 /* Request a write lock to block new offload attempts */
1301 down_write(&device_offload_lock
);
1303 spin_lock_irqsave(&tls_device_lock
, flags
);
1304 list_for_each_entry_safe(ctx
, tmp
, &tls_device_list
, list
) {
1305 struct net_device
*ctx_netdev
=
1306 rcu_dereference_protected(ctx
->netdev
,
1307 lockdep_is_held(&device_offload_lock
));
1309 if (ctx_netdev
!= netdev
||
1310 !refcount_inc_not_zero(&ctx
->refcount
))
1313 list_move(&ctx
->list
, &list
);
1315 spin_unlock_irqrestore(&tls_device_lock
, flags
);
1317 list_for_each_entry_safe(ctx
, tmp
, &list
, list
) {
1318 /* Stop offloaded TX and switch to the fallback.
1319 * tls_is_skb_tx_device_offloaded will return false.
1321 WRITE_ONCE(ctx
->sk
->sk_validate_xmit_skb
, tls_validate_xmit_skb_sw
);
1323 /* Stop the RX and TX resync.
1324 * tls_dev_resync must not be called after tls_dev_del.
1326 rcu_assign_pointer(ctx
->netdev
, NULL
);
1328 /* Start skipping the RX resync logic completely. */
1329 set_bit(TLS_RX_DEV_DEGRADED
, &ctx
->flags
);
1331 /* Sync with inflight packets. After this point:
1332 * TX: no non-encrypted packets will be passed to the driver.
1333 * RX: resync requests from the driver will be ignored.
1337 /* Release the offload context on the driver side. */
1338 if (ctx
->tx_conf
== TLS_HW
)
1339 netdev
->tlsdev_ops
->tls_dev_del(netdev
, ctx
,
1340 TLS_OFFLOAD_CTX_DIR_TX
);
1341 if (ctx
->rx_conf
== TLS_HW
&&
1342 !test_bit(TLS_RX_DEV_CLOSED
, &ctx
->flags
))
1343 netdev
->tlsdev_ops
->tls_dev_del(netdev
, ctx
,
1344 TLS_OFFLOAD_CTX_DIR_RX
);
1348 /* Move the context to a separate list for two reasons:
1349 * 1. When the context is deallocated, list_del is called.
1350 * 2. It's no longer an offloaded context, so we don't want to
1351 * run offload-specific code on this context.
1353 spin_lock_irqsave(&tls_device_lock
, flags
);
1354 list_move_tail(&ctx
->list
, &tls_device_down_list
);
1355 spin_unlock_irqrestore(&tls_device_lock
, flags
);
1357 /* Device contexts for RX and TX will be freed in on sk_destruct
1358 * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
1359 * Now release the ref taken above.
1361 if (refcount_dec_and_test(&ctx
->refcount
)) {
1362 /* sk_destruct ran after tls_device_down took a ref, and
1363 * it returned early. Complete the destruction here.
1365 list_del(&ctx
->list
);
1366 tls_device_free_ctx(ctx
);
1370 up_write(&device_offload_lock
);
1372 flush_workqueue(destruct_wq
);
1377 static int tls_dev_event(struct notifier_block
*this, unsigned long event
,
1380 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1382 if (!dev
->tlsdev_ops
&&
1383 !(dev
->features
& (NETIF_F_HW_TLS_RX
| NETIF_F_HW_TLS_TX
)))
1387 case NETDEV_REGISTER
:
1388 case NETDEV_FEAT_CHANGE
:
1389 if (netif_is_bond_master(dev
))
1391 if ((dev
->features
& NETIF_F_HW_TLS_RX
) &&
1392 !dev
->tlsdev_ops
->tls_dev_resync
)
1395 if (dev
->tlsdev_ops
&&
1396 dev
->tlsdev_ops
->tls_dev_add
&&
1397 dev
->tlsdev_ops
->tls_dev_del
)
1402 return tls_device_down(dev
);
1407 static struct notifier_block tls_dev_notifier
= {
1408 .notifier_call
= tls_dev_event
,
1411 int __init
tls_device_init(void)
1415 dummy_page
= alloc_page(GFP_KERNEL
);
1419 destruct_wq
= alloc_workqueue("ktls_device_destruct", 0, 0);
1422 goto err_free_dummy
;
1425 err
= register_netdevice_notifier(&tls_dev_notifier
);
1427 goto err_destroy_wq
;
1432 destroy_workqueue(destruct_wq
);
1434 put_page(dummy_page
);
1438 void __exit
tls_device_cleanup(void)
1440 unregister_netdevice_notifier(&tls_dev_notifier
);
1441 destroy_workqueue(destruct_wq
);
1442 clean_acked_data_flush();
1443 put_page(dummy_page
);