}
#ifdef CONFIG_BLK_DEV_ZONED
-static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
- struct bio *bio)
+static inline bool dm_zone_bio_needs_split(struct bio *bio)
{
/*
* Special case the zone operations that cannot or should not be split.
}
/*
- * Mapped devices that require zone append emulation will use the block
- * layer zone write plugging. In such case, we must split any large BIO
- * to the mapped device limits to avoid potential deadlocks with queue
- * freeze operations.
+ * When mapped devices use the block layer zone write plugging, we must
+ * split any large BIO to the mapped device limits to not submit BIOs
+ * that span zone boundaries and to avoid potential deadlocks with
+ * queue freeze operations.
*/
- if (!dm_emulate_zone_append(md))
- return false;
return bio_needs_zone_write_plugging(bio) || bio_straddles_zones(bio);
}
}
#else
-static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
- struct bio *bio)
+static inline bool dm_zone_bio_needs_split(struct bio *bio)
{
return false;
}
is_abnormal = is_abnormal_io(bio);
if (static_branch_unlikely(&zoned_enabled)) {
- need_split = is_abnormal || dm_zone_bio_needs_split(md, bio);
+ need_split = is_abnormal || dm_zone_bio_needs_split(bio);
} else {
need_split = is_abnormal;
}