]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net: tls: Cancel RX async resync request on rcd_delta overflow
authorShahar Shitrit <shshitrit@nvidia.com>
Sun, 26 Oct 2025 20:03:02 +0000 (22:03 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Dec 2025 10:43:36 +0000 (11:43 +0100)
[ Upstream commit c15d5c62ab313c19121f10e25d4fec852bd1c40c ]

When a netdev issues a RX async resync request for a TLS connection,
the TLS module handles it by logging record headers and attempting to
match them to the tcp_sn provided by the device. If a match is found,
the TLS module approves the tcp_sn for resynchronization.

While waiting for a device response, the TLS module also increments
rcd_delta each time a new TLS record is received, tracking the distance
from the original resync request.

However, if the device response is delayed or fails (e.g due to
unstable connection and device getting out of tracking, hardware
errors, resource exhaustion etc.), the TLS module keeps logging and
incrementing, which can lead to a WARN() when rcd_delta exceeds the
threshold.

To address this, introduce tls_offload_rx_resync_async_request_cancel()
to explicitly cancel resync requests when a device response failure is
detected. Call this helper also as a final safeguard when rcd_delta
crosses its threshold, as reaching this point implies that earlier
cancellation did not occur.

Signed-off-by: Shahar Shitrit <shshitrit@nvidia.com>
Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/1761508983-937977-3-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
include/net/tls.h
net/tls/tls_device.c

index 181173e62a0687f7591eccd8beca3a909b73968c..3f4235cc0207c4f512bbde75a79c0b67449751bd 100644 (file)
@@ -464,6 +464,12 @@ tls_offload_rx_resync_async_request_end(struct tls_offload_resync_async *resync_
        atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
 }
 
+static inline void
+tls_offload_rx_resync_async_request_cancel(struct tls_offload_resync_async *resync_async)
+{
+       atomic64_set(&resync_async->req, 0);
+}
+
 static inline void
 tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
 {
index dc063c2c7950edc28961d877712827bc56bb58e6..0af7b3c529678f11aca3bf590345105864b38a6a 100644 (file)
@@ -721,8 +721,10 @@ tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
                /* shouldn't get to wraparound:
                 * too long in async stage, something bad happened
                 */
-               if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
+               if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) {
+                       tls_offload_rx_resync_async_request_cancel(resync_async);
                        return false;
+               }
 
                /* asynchronous stage: log all headers seq such that
                 * req_seq <= seq <= end_seq, and wait for real resync request