]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net: tls: factor out tls_*crypt_async_wait()
authorJakub Kicinski <kuba@kernel.org>
Wed, 7 Feb 2024 01:18:18 +0000 (17:18 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 25 May 2024 14:20:17 +0000 (16:20 +0200)
commit c57ca512f3b68ddcd62bda9cc24a8f5584ab01b1 upstream.

Factor out waiting for async encrypt and decrypt to finish.
There are already multiple copies and a subsequent fix will
need more. No functional changes.

Note that crypto_wait_req() returns wait->err

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Reviewed-by: Simon Horman <horms@kernel.org>
Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Stable-dep-of: aec7961916f3 ("tls: fix race between async notify and socket close")
[v5.15: removed changes in tls_sw_splice_eof and adjusted waiting factor out for
async descrypt in tls_sw_recvmsg]
Cc: <stable@vger.kernel.org> # 5.15
Signed-off-by: Shaoying Xu <shaoyi@amazon.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
net/tls/tls_sw.c

index 40d1f205c92f0eadac0e725cb9af84eb1e98e331..614cb30dae134c8ca023ca4d93f9eccb66b8f9b8 100644 (file)
@@ -226,6 +226,20 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
        spin_unlock_bh(&ctx->decrypt_compl_lock);
 }
 
+static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
+{
+       int pending;
+
+       spin_lock_bh(&ctx->decrypt_compl_lock);
+       reinit_completion(&ctx->async_wait.completion);
+       pending = atomic_read(&ctx->decrypt_pending);
+       spin_unlock_bh(&ctx->decrypt_compl_lock);
+       if (pending)
+               crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+
+       return ctx->async_wait.err;
+}
+
 static int tls_do_decryption(struct sock *sk,
                             struct sk_buff *skb,
                             struct scatterlist *sgin,
@@ -496,6 +510,28 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
                schedule_delayed_work(&ctx->tx_work.work, 1);
 }
 
+static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
+{
+       int pending;
+
+       spin_lock_bh(&ctx->encrypt_compl_lock);
+       ctx->async_notify = true;
+
+       pending = atomic_read(&ctx->encrypt_pending);
+       spin_unlock_bh(&ctx->encrypt_compl_lock);
+       if (pending)
+               crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+       else
+               reinit_completion(&ctx->async_wait.completion);
+
+       /* There can be no concurrent accesses, since we have no
+        * pending encrypt operations
+        */
+       WRITE_ONCE(ctx->async_notify, false);
+
+       return ctx->async_wait.err;
+}
+
 static int tls_do_encryption(struct sock *sk,
                             struct tls_context *tls_ctx,
                             struct tls_sw_context_tx *ctx,
@@ -946,7 +982,6 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        int num_zc = 0;
        int orig_size;
        int ret = 0;
-       int pending;
 
        if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
                               MSG_CMSG_COMPAT))
@@ -1115,24 +1150,12 @@ trim_sgl:
        if (!num_async) {
                goto send_end;
        } else if (num_zc) {
-               /* Wait for pending encryptions to get completed */
-               spin_lock_bh(&ctx->encrypt_compl_lock);
-               ctx->async_notify = true;
-
-               pending = atomic_read(&ctx->encrypt_pending);
-               spin_unlock_bh(&ctx->encrypt_compl_lock);
-               if (pending)
-                       crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
-               else
-                       reinit_completion(&ctx->async_wait.completion);
-
-               /* There can be no concurrent accesses, since we have no
-                * pending encrypt operations
-                */
-               WRITE_ONCE(ctx->async_notify, false);
+               int err;
 
-               if (ctx->async_wait.err) {
-                       ret = ctx->async_wait.err;
+               /* Wait for pending encryptions to get completed */
+               err = tls_encrypt_async_wait(ctx);
+               if (err) {
+                       ret = err;
                        copied = 0;
                }
        }
@@ -1910,22 +1933,14 @@ pick_next_record:
 
 recv_end:
        if (async) {
-               int pending;
-
                /* Wait for all previously submitted records to be decrypted */
-               spin_lock_bh(&ctx->decrypt_compl_lock);
-               reinit_completion(&ctx->async_wait.completion);
-               pending = atomic_read(&ctx->decrypt_pending);
-               spin_unlock_bh(&ctx->decrypt_compl_lock);
-               if (pending) {
-                       err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
-                       if (err) {
-                               /* one of async decrypt failed */
-                               tls_err_abort(sk, err);
-                               copied = 0;
-                               decrypted = 0;
-                               goto end;
-                       }
+               err = tls_decrypt_async_wait(ctx);
+               if (err) {
+                       /* one of async decrypt failed */
+                       tls_err_abort(sk, err);
+                       copied = 0;
+                       decrypted = 0;
+                       goto end;
                }
 
                /* Drain records from the rx_list & copy if required */
@@ -2144,16 +2159,9 @@ void tls_sw_release_resources_tx(struct sock *sk)
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
        struct tls_rec *rec, *tmp;
-       int pending;
 
        /* Wait for any pending async encryptions to complete */
-       spin_lock_bh(&ctx->encrypt_compl_lock);
-       ctx->async_notify = true;
-       pending = atomic_read(&ctx->encrypt_pending);
-       spin_unlock_bh(&ctx->encrypt_compl_lock);
-
-       if (pending)
-               crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+       tls_encrypt_async_wait(ctx);
 
        tls_tx_records(sk, -1);