]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net: tls: Cancel RX async resync request on rcd_delta overflow
authorShahar Shitrit <shshitrit@nvidia.com>
Sun, 26 Oct 2025 20:03:02 +0000 (22:03 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Dec 2025 10:46:04 +0000 (11:46 +0100)
[ Upstream commit c15d5c62ab313c19121f10e25d4fec852bd1c40c ]

When a netdev issues a RX async resync request for a TLS connection,
the TLS module handles it by logging record headers and attempting to
match them to the tcp_sn provided by the device. If a match is found,
the TLS module approves the tcp_sn for resynchronization.

While waiting for a device response, the TLS module also increments
rcd_delta each time a new TLS record is received, tracking the distance
from the original resync request.

However, if the device response is delayed or fails (e.g due to
unstable connection and device getting out of tracking, hardware
errors, resource exhaustion etc.), the TLS module keeps logging and
incrementing, which can lead to a WARN() when rcd_delta exceeds the
threshold.

To address this, introduce tls_offload_rx_resync_async_request_cancel()
to explicitly cancel resync requests when a device response failure is
detected. Call this helper also as a final safeguard when rcd_delta
crosses its threshold, as reaching this point implies that earlier
cancellation did not occur.

Signed-off-by: Shahar Shitrit <shshitrit@nvidia.com>
Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/1761508983-937977-3-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
include/net/tls.h
net/tls/tls_device.c

index b90f3b675c3c438a09983d6d770040a19a2d1497..c7bcdb3afad756418093d79fb871cf49fc535b8f 100644 (file)
@@ -467,6 +467,12 @@ tls_offload_rx_resync_async_request_end(struct tls_offload_resync_async *resync_
        atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
 }
 
+static inline void
+tls_offload_rx_resync_async_request_cancel(struct tls_offload_resync_async *resync_async)
+{
+       atomic64_set(&resync_async->req, 0);
+}
+
 static inline void
 tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
 {
index a82fdcf199690fae5dc63cb100fa5e43c04271dd..bb14d9b467f28b20e1206c61d76d4ea87ca732f5 100644 (file)
@@ -723,8 +723,10 @@ tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
                /* shouldn't get to wraparound:
                 * too long in async stage, something bad happened
                 */
-               if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
+               if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) {
+                       tls_offload_rx_resync_async_request_cancel(resync_async);
                        return false;
+               }
 
                /* asynchronous stage: log all headers seq such that
                 * req_seq <= seq <= end_seq, and wait for real resync request