]> git.ipfire.org Git - thirdparty/kernel/linux.git/blobdiff - block/blk-lib.c
Merge tag 'for-6.10/block-20240511' of git://git.kernel.dk/linux
[thirdparty/kernel/linux.git] / block / blk-lib.c
index a6954eafb8c8af324971bd2d293fdceb2d481303..442da9dad04213d8014164784204d552fab04feb 100644 (file)
@@ -35,51 +35,39 @@ static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
        return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT;
 }
 
-int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
-               sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
+struct bio *blk_alloc_discard_bio(struct block_device *bdev,
+               sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask)
 {
-       struct bio *bio = *biop;
-       sector_t bs_mask;
-
-       if (bdev_read_only(bdev))
-               return -EPERM;
-       if (!bdev_max_discard_sectors(bdev))
-               return -EOPNOTSUPP;
-
-       /* In case the discard granularity isn't set by buggy device driver */
-       if (WARN_ON_ONCE(!bdev_discard_granularity(bdev))) {
-               pr_err_ratelimited("%pg: Error: discard_granularity is 0.\n",
-                                  bdev);
-               return -EOPNOTSUPP;
-       }
-
-       bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
-       if ((sector | nr_sects) & bs_mask)
-               return -EINVAL;
+       sector_t bio_sects = min(*nr_sects, bio_discard_limit(bdev, *sector));
+       struct bio *bio;
 
-       if (!nr_sects)
-               return -EINVAL;
+       if (!bio_sects)
+               return NULL;
 
-       while (nr_sects) {
-               sector_t req_sects =
-                       min(nr_sects, bio_discard_limit(bdev, sector));
+       bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask);
+       if (!bio)
+               return NULL;
+       bio->bi_iter.bi_sector = *sector;
+       bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT;
+       *sector += bio_sects;
+       *nr_sects -= bio_sects;
+       /*
+        * We can loop for a long time in here if someone does full device
+        * discards (like mkfs).  Be nice and allow us to schedule out to avoid
+        * softlocking if preempt is disabled.
+        */
+       cond_resched();
+       return bio;
+}
 
-               bio = blk_next_bio(bio, bdev, 0, REQ_OP_DISCARD, gfp_mask);
-               bio->bi_iter.bi_sector = sector;
-               bio->bi_iter.bi_size = req_sects << 9;
-               sector += req_sects;
-               nr_sects -= req_sects;
-
-               /*
-                * We can loop for a long time in here, if someone does
-                * full device discards (like mkfs). Be nice and allow
-                * us to schedule out to avoid softlocking if preempt
-                * is disabled.
-                */
-               cond_resched();
-       }
+int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+               sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
+{
+       struct bio *bio;
 
-       *biop = bio;
+       while ((bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects,
+                       gfp_mask)))
+               *biop = bio_chain_and_submit(*biop, bio);
        return 0;
 }
 EXPORT_SYMBOL(__blkdev_issue_discard);