u16 q_id, u16 tag, struct ublk_io *io, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
-static void ublk_partition_scan_work(struct work_struct *work)
-{
- struct ublk_device *ub =
- container_of(work, struct ublk_device, partition_scan_work);
-
- if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
- &ub->ub_disk->state)))
- return;
-
- mutex_lock(&ub->ub_disk->open_mutex);
- bdev_disk_changed(ub->ub_disk, false);
- mutex_unlock(&ub->ub_disk->open_mutex);
-}
-
static inline struct ublksrv_io_desc *
ublk_get_iod(const struct ublk_queue *ubq, unsigned tag)
{
put_device(disk_to_dev(disk));
}
+static void ublk_partition_scan_work(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, partition_scan_work);
+ /* Hold disk reference to prevent UAF during concurrent teardown */
+ struct gendisk *disk = ublk_get_disk(ub);
+
+ if (!disk)
+ return;
+
+ if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
+ &disk->state)))
+ goto out;
+
+ mutex_lock(&disk->open_mutex);
+ bdev_disk_changed(disk, false);
+ mutex_unlock(&disk->open_mutex);
+out:
+ ublk_put_disk(disk);
+}
+
/*
* Use this function to ensure that ->canceling is consistently set for
* the device and all queues. Do not set these flags directly.
mutex_lock(&ub->mutex);
ublk_stop_dev_unlocked(ub);
mutex_unlock(&ub->mutex);
- flush_work(&ub->partition_scan_work);
+ cancel_work_sync(&ub->partition_scan_work);
ublk_cancel_dev(ub);
}