]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
block: Ensure start sector is aligned for stacking atomic writes
authorJohn Garry <john.g.garry@oracle.com>
Thu, 9 Jan 2025 11:39:59 +0000 (11:39 +0000)
committerJens Axboe <axboe@kernel.dk>
Wed, 15 Jan 2025 16:47:43 +0000 (09:47 -0700)
For stacking atomic writes, ensure that the start sector is aligned with
the device atomic write unit min and any boundary. Otherwise, we may
permit misaligned atomic writes.

Rework bdev_can_atomic_write() into a common helper to resuse the
alignment check. There also use atomic_write_hw_unit_min, which is more
proper (than atomic_write_unit_min).

Fixes: d7f36dc446e89 ("block: Support atomic writes limits for stacked devices")
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: John Garry <john.g.garry@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Link: https://lore.kernel.org/r/20250109114000.2299896-2-john.g.garry@oracle.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-settings.c
include/linux/blkdev.h

index 6c96a73261d1b690ecb06aad41236dbc2423381d..c2b99262db26cfd86c4ece182c1a9a9edcf513c3 100644 (file)
@@ -609,7 +609,7 @@ static bool blk_stack_atomic_writes_head(struct queue_limits *t,
 }
 
 static void blk_stack_atomic_writes_limits(struct queue_limits *t,
-                               struct queue_limits *b)
+                               struct queue_limits *b, sector_t start)
 {
        if (!(t->features & BLK_FEAT_ATOMIC_WRITES_STACKED))
                goto unsupported;
@@ -617,6 +617,9 @@ static void blk_stack_atomic_writes_limits(struct queue_limits *t,
        if (!b->atomic_write_unit_min)
                goto unsupported;
 
+       if (!blk_atomic_write_start_sect_aligned(start, b))
+               goto unsupported;
+
        /*
         * If atomic_write_hw_max is set, we have already stacked 1x bottom
         * device, so check for compliance.
@@ -799,7 +802,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                t->zone_write_granularity = 0;
                t->max_zone_append_sectors = 0;
        }
-       blk_stack_atomic_writes_limits(t, b);
+       blk_stack_atomic_writes_limits(t, b, start);
 
        return ret;
 }
index 13d353351c37d52582eb421b8414e6e0efaed780..7ac153e4423a6fff5aa63ced47808674c51355cd 100644 (file)
@@ -1706,6 +1706,15 @@ struct io_comp_batch {
        void (*complete)(struct io_comp_batch *);
 };
 
+static inline bool blk_atomic_write_start_sect_aligned(sector_t sector,
+                                               struct queue_limits *limits)
+{
+       unsigned int alignment = max(limits->atomic_write_hw_unit_min,
+                               limits->atomic_write_hw_boundary);
+
+       return IS_ALIGNED(sector, alignment >> SECTOR_SHIFT);
+}
+
 static inline bool bdev_can_atomic_write(struct block_device *bdev)
 {
        struct request_queue *bd_queue = bdev->bd_queue;
@@ -1714,15 +1723,9 @@ static inline bool bdev_can_atomic_write(struct block_device *bdev)
        if (!limits->atomic_write_unit_min)
                return false;
 
-       if (bdev_is_partition(bdev)) {
-               sector_t bd_start_sect = bdev->bd_start_sect;
-               unsigned int alignment =
-                       max(limits->atomic_write_unit_min,
-                           limits->atomic_write_hw_boundary);
-
-               if (!IS_ALIGNED(bd_start_sect, alignment >> SECTOR_SHIFT))
-                       return false;
-       }
+       if (bdev_is_partition(bdev))
+               return blk_atomic_write_start_sect_aligned(bdev->bd_start_sect,
+                                                       limits);
 
        return true;
 }