]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net/tls: replace the sleeping lock around RX resync with a bit lock
authorJakub Kicinski <jakub.kicinski@netronome.com>
Tue, 4 Jun 2019 19:00:12 +0000 (12:00 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 11 Jun 2019 10:20:49 +0000 (12:20 +0200)
[ Upstream commit e52972c11d6b1262964db96d65934196db621685 ]

Commit 38030d7cb779 ("net/tls: avoid NULL-deref on resync during device removal")
tried to fix a potential NULL-dereference by taking the
context rwsem.  Unfortunately the RX resync may get called
from soft IRQ, so we can't use the rwsem to protect from
the device disappearing.  Because we are guaranteed there
can be only one resync at a time (it's called from strparser)
use a bit to indicate resync is busy and make device
removal wait for the bit to get cleared.

Note that there is a leftover "flags" field in struct
tls_context already.

Fixes: 4799ac81e52a ("tls: Add rx inline crypto offload")
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/net/tls.h
net/tls/tls_device.c

index c423b7d0b6abc065352da8fc57a5f5d9614eea66..95411057589146c810f56b65c3c2d148c672055c 100644 (file)
@@ -161,6 +161,10 @@ enum {
        TLS_PENDING_CLOSED_RECORD
 };
 
+enum tls_context_flags {
+       TLS_RX_SYNC_RUNNING = 0,
+};
+
 struct cipher_context {
        u16 prepend_size;
        u16 tag_size;
index 8035bf495eb2c21320dc98b7fd2b0919559e7eef..ead29c2aefa762b8aff13eaab72b55c2671e276e 100644 (file)
@@ -545,10 +545,22 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
        return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
 }
 
+static void tls_device_resync_rx(struct tls_context *tls_ctx,
+                                struct sock *sk, u32 seq, u64 rcd_sn)
+{
+       struct net_device *netdev;
+
+       if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
+               return;
+       netdev = READ_ONCE(tls_ctx->netdev);
+       if (netdev)
+               netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn);
+       clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
+}
+
 void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
-       struct net_device *netdev = tls_ctx->netdev;
        struct tls_offload_context_rx *rx_ctx;
        u32 is_req_pending;
        s64 resync_req;
@@ -563,10 +575,10 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
        is_req_pending = resync_req;
 
        if (unlikely(is_req_pending) && req_seq == seq &&
-           atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
-               netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk,
-                                                     seq + TLS_HEADER_SIZE - 1,
-                                                     rcd_sn);
+           atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) {
+               seq += TLS_HEADER_SIZE - 1;
+               tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
+       }
 }
 
 static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
@@ -954,7 +966,10 @@ static int tls_device_down(struct net_device *netdev)
                if (ctx->rx_conf == TLS_HW)
                        netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
                                                        TLS_OFFLOAD_CTX_DIR_RX);
-               ctx->netdev = NULL;
+               WRITE_ONCE(ctx->netdev, NULL);
+               smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
+               while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
+                       usleep_range(10, 200);
                dev_put(netdev);
                list_del_init(&ctx->list);