{
if (bio->bi_vcnt >= bio->bi_max_vecs)
return true;
- if (bio->bi_iter.bi_size > UINT_MAX - len)
+ if (bio->bi_iter.bi_size > BIO_MAX_SIZE - len)
return true;
return false;
}
{
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
return 0;
- if (bio->bi_iter.bi_size > UINT_MAX - len)
+ if (bio->bi_iter.bi_size > BIO_MAX_SIZE - len)
return 0;
if (bio->bi_vcnt > 0) {
{
unsigned long nr = off / PAGE_SIZE;
- WARN_ON_ONCE(len > UINT_MAX);
+ WARN_ON_ONCE(len > BIO_MAX_SIZE);
__bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE);
}
EXPORT_SYMBOL_GPL(bio_add_folio_nofail);
{
unsigned long nr = off / PAGE_SIZE;
- if (len > UINT_MAX)
+ if (len > BIO_MAX_SIZE)
return false;
return bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE) > 0;
}
extraction_flags |= ITER_ALLOW_P2PDMA;
size = iov_iter_extract_pages(iter, &pages,
- UINT_MAX - bio->bi_iter.bi_size,
+ BIO_MAX_SIZE - bio->bi_iter.bi_size,
nr_pages, extraction_flags, &offset);
if (unlikely(size <= 0))
return size ? size : -EFAULT;
* Align the bio size to the discard granularity to make splitting the bio
* at discard granularity boundaries easier in the driver if needed.
*/
- return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT;
+ return round_down(BIO_MAX_SIZE, discard_granularity) >> SECTOR_SHIFT;
}
struct bio *blk_alloc_discard_bio(struct block_device *bdev,
{
sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
- return min(bdev_write_zeroes_sectors(bdev),
- (UINT_MAX >> SECTOR_SHIFT) & ~bs_mask);
+ return min(bdev_write_zeroes_sectors(bdev), BIO_MAX_SECTORS & ~bs_mask);
}
/*
int ret = 0;
/* make sure that "len << SECTOR_SHIFT" doesn't overflow */
- if (max_sectors > UINT_MAX >> SECTOR_SHIFT)
- max_sectors = UINT_MAX >> SECTOR_SHIFT;
+ if (max_sectors > BIO_MAX_SECTORS)
+ max_sectors = BIO_MAX_SECTORS;
max_sectors &= ~bs_mask;
if (max_sectors == 0)
}
/*
- * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
- * is defined as 'unsigned int', meantime it has to be aligned to with the
+ * The maximum size that a bio can fit has to be aligned down to the
* logical block size, which is the minimum accepted unit by hardware.
*/
static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
{
- return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
+ return round_down(BIO_MAX_SIZE, lim->logical_block_size) >>
+ SECTOR_SHIFT;
}
/*
rq_for_each_bvec(bv, rq, iter)
bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
- UINT_MAX, UINT_MAX);
+ UINT_MAX, BIO_MAX_SIZE);
return nr_phys_segs;
}
};
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
-#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
+#define BIO_MAX_SIZE UINT_MAX /* max value of bi_iter.bi_size */
+#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> SECTOR_SHIFT)
static inline struct bio_vec *bio_inline_vecs(struct bio *bio)
{