From ee81212f74a57c5d2b56cf504f40d528dac6faaf Mon Sep 17 00:00:00 2001 From: Luke Wang Date: Wed, 4 Feb 2026 11:40:02 +0800 Subject: [PATCH] block: decouple secure erase size limit from discard size limit Secure erase should use max_secure_erase_sectors instead of being limited by max_discard_sectors. Separate the handling of REQ_OP_SECURE_ERASE from REQ_OP_DISCARD to allow each operation to use its own size limit. Signed-off-by: Luke Wang Reviewed-by: Ulf Hansson Signed-off-by: Jens Axboe --- block/blk-merge.c | 21 +++++++++++++++++---- block/blk.h | 6 +++++- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index b82c6d3046583..c18bc440d6474 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -158,8 +158,9 @@ static struct bio *bio_submit_split(struct bio *bio, int split_sectors) return bio; } -struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim, - unsigned *nsegs) +static struct bio *__bio_split_discard(struct bio *bio, + const struct queue_limits *lim, unsigned *nsegs, + unsigned int max_sectors) { unsigned int max_discard_sectors, granularity; sector_t tmp; @@ -169,8 +170,7 @@ struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim, granularity = max(lim->discard_granularity >> 9, 1U); - max_discard_sectors = - min(lim->max_discard_sectors, bio_allowed_max_sectors(lim)); + max_discard_sectors = min(max_sectors, bio_allowed_max_sectors(lim)); max_discard_sectors -= max_discard_sectors % granularity; if (unlikely(!max_discard_sectors)) return bio; @@ -194,6 +194,19 @@ struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim, return bio_submit_split(bio, split_sectors); } +struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim, + unsigned *nsegs) +{ + unsigned int max_sectors; + + if (bio_op(bio) == REQ_OP_SECURE_ERASE) + max_sectors = lim->max_secure_erase_sectors; + else + max_sectors = lim->max_discard_sectors; + + return __bio_split_discard(bio, lim, nsegs, max_sectors); +} + static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim, bool is_atomic) { diff --git a/block/blk.h b/block/blk.h index 980eef1f56903..401d19ed08a60 100644 --- a/block/blk.h +++ b/block/blk.h @@ -208,10 +208,14 @@ static inline unsigned int blk_queue_get_max_sectors(struct request *rq) struct request_queue *q = rq->q; enum req_op op = req_op(rq); - if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) + if (unlikely(op == REQ_OP_DISCARD)) return min(q->limits.max_discard_sectors, UINT_MAX >> SECTOR_SHIFT); + if (unlikely(op == REQ_OP_SECURE_ERASE)) + return min(q->limits.max_secure_erase_sectors, + UINT_MAX >> SECTOR_SHIFT); + if (unlikely(op == REQ_OP_WRITE_ZEROES)) return q->limits.max_write_zeroes_sectors; -- 2.47.3