return bio;
}
-struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
- unsigned *nsegs)
+static struct bio *__bio_split_discard(struct bio *bio,
+ const struct queue_limits *lim, unsigned *nsegs,
+ unsigned int max_sectors)
{
unsigned int max_discard_sectors, granularity;
sector_t tmp;
granularity = max(lim->discard_granularity >> 9, 1U);
- max_discard_sectors =
- min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
+ max_discard_sectors = min(max_sectors, bio_allowed_max_sectors(lim));
max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors))
return bio;
return bio_submit_split(bio, split_sectors);
}
+struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
+ unsigned *nsegs)
+{
+ unsigned int max_sectors;
+
+ if (bio_op(bio) == REQ_OP_SECURE_ERASE)
+ max_sectors = lim->max_secure_erase_sectors;
+ else
+ max_sectors = lim->max_discard_sectors;
+
+ return __bio_split_discard(bio, lim, nsegs, max_sectors);
+}
+
static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim,
bool is_atomic)
{
struct request_queue *q = rq->q;
enum req_op op = req_op(rq);
- if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
+ if (unlikely(op == REQ_OP_DISCARD))
return min(q->limits.max_discard_sectors,
UINT_MAX >> SECTOR_SHIFT);
+ if (unlikely(op == REQ_OP_SECURE_ERASE))
+ return min(q->limits.max_secure_erase_sectors,
+ UINT_MAX >> SECTOR_SHIFT);
+
if (unlikely(op == REQ_OP_WRITE_ZEROES))
return q->limits.max_write_zeroes_sectors;