]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
dm-crypt: don't update io->sector after kcryptd_crypt_write_io_submit()
authorHou Tao <houtao1@huawei.com>
Mon, 20 Jan 2025 08:29:49 +0000 (16:29 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 21 Feb 2025 12:49:44 +0000 (13:49 +0100)
commit 9fdbbdbbc92b1474a87b89f8b964892a63734492 upstream.

The updates of io->sector are the leftovers when dm-crypt allocated
pages for partial write request. However, since commit cf2f1abfbd0db
("dm crypt: don't allocate pages for a partial request"), there is no
partial request anymore.

After the introduction of write request rb-tree, the updates of
io->sectors may interfere the insertion procedure, because ->sectors of
these write requests which have already been added in the rb-tree may be
changed during the insertion of new write request.

Fix it by removing these buggy updates of io->sectors. Considering these
updates only effect the write request rb-tree, the commit which
introduces the write request rb-tree is used as the fix tag.

Fixes: b3c5fd305249 ("dm crypt: sort writes")
Cc: stable@vger.kernel.org
Signed-off-by: Hou Tao <houtao1@huawei.com>
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/md/dm-crypt.c

index 25e51dc6e55985705c258a334ba9dee087c797d4..0213d1807ddc5c41be5438471934c74e4737dd46 100644 (file)
@@ -2029,7 +2029,6 @@ static void kcryptd_crypt_write_continue(struct work_struct *work)
        struct crypt_config *cc = io->cc;
        struct convert_context *ctx = &io->ctx;
        int crypt_finished;
-       sector_t sector = io->sector;
        blk_status_t r;
 
        wait_for_completion(&ctx->restart);
@@ -2046,10 +2045,8 @@ static void kcryptd_crypt_write_continue(struct work_struct *work)
        }
 
        /* Encryption was already finished, submit io now */
-       if (crypt_finished) {
+       if (crypt_finished)
                kcryptd_crypt_write_io_submit(io, 0);
-               io->sector = sector;
-       }
 
        crypt_dec_pending(io);
 }
@@ -2060,14 +2057,13 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
        struct convert_context *ctx = &io->ctx;
        struct bio *clone;
        int crypt_finished;
-       sector_t sector = io->sector;
        blk_status_t r;
 
        /*
         * Prevent io from disappearing until this function completes.
         */
        crypt_inc_pending(io);
-       crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
+       crypt_convert_init(cc, ctx, NULL, io->base_bio, io->sector);
 
        clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
        if (unlikely(!clone)) {
@@ -2084,8 +2080,6 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
                io->ctx.iter_in = clone->bi_iter;
        }
 
-       sector += bio_sectors(clone);
-
        crypt_inc_pending(io);
        r = crypt_convert(cc, ctx,
                          test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
@@ -2109,10 +2103,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
        }
 
        /* Encryption was already finished, submit io now */
-       if (crypt_finished) {
+       if (crypt_finished)
                kcryptd_crypt_write_io_submit(io, 0);
-               io->sector = sector;
-       }
 
 dec:
        crypt_dec_pending(io);