]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
block: make BLK_DEF_MAX_SECTORS unsigned
authorKeith Busch <kbusch@kernel.org>
Thu, 5 Jan 2023 20:51:45 +0000 (12:51 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 25 Jan 2024 22:52:40 +0000 (14:52 -0800)
[ Upstream commit 0a26f327e46c203229e72c823dfec71a2b405ec5 ]

This is used as an unsigned value, so define it that way to avoid
having to cast it.

Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Link: https://lore.kernel.org/r/20230105205146.3610282-2-kbusch@meta.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Stable-dep-of: 9a9525de8654 ("null_blk: don't cap max_hw_sectors to BLK_DEF_MAX_SECTORS")
Signed-off-by: Sasha Levin <sashal@kernel.org>
block/blk-settings.c
drivers/block/null_blk/main.c
include/linux/blkdev.h

index 73a80895e3ae18bcaf399fcae70af42b8c327218..959b5c1e6d3b7eae60842f45b14dcc089e04a953 100644 (file)
@@ -136,7 +136,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
        limits->max_hw_sectors = max_hw_sectors;
 
        max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
-       max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
+       max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS);
        max_sectors = round_down(max_sectors,
                                 limits->logical_block_size >> SECTOR_SHIFT);
        limits->max_sectors = max_sectors;
index 686ec6bcdef3d59e6405b0f683dfb8b31d12d7ed..4a867233b14abf82c90359ea779e07123c87dd2e 100644 (file)
@@ -1902,8 +1902,7 @@ static int null_add_dev(struct nullb_device *dev)
        blk_queue_physical_block_size(nullb->q, dev->blocksize);
        if (!dev->max_sectors)
                dev->max_sectors = queue_max_hw_sectors(nullb->q);
-       dev->max_sectors = min_t(unsigned int, dev->max_sectors,
-                                BLK_DEF_MAX_SECTORS);
+       dev->max_sectors = min(dev->max_sectors, BLK_DEF_MAX_SECTORS);
        blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
 
        if (dev->virt_boundary)
index 67344dfe07a7cf9ecab3ca42a0eb4da267697384..905844172cfd8f422e2748d8bbd16f131b8861ae 100644 (file)
@@ -1334,11 +1334,12 @@ static inline bool bdev_is_partition(struct block_device *bdev)
 enum blk_default_limits {
        BLK_MAX_SEGMENTS        = 128,
        BLK_SAFE_MAX_SECTORS    = 255,
-       BLK_DEF_MAX_SECTORS     = 2560,
        BLK_MAX_SEGMENT_SIZE    = 65536,
        BLK_SEG_BOUNDARY_MASK   = 0xFFFFFFFFUL,
 };
 
+#define BLK_DEF_MAX_SECTORS 2560u
+
 static inline unsigned long queue_segment_boundary(const struct request_queue *q)
 {
        return q->limits.seg_boundary_mask;