if (!cache->features.discard_passdown) {
/* No passdown is done so setting own virtual limits */
- limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
- cache->origin_sectors);
+ limits->max_hw_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
+ cache->origin_sectors);
limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
return;
}
* cache_iterate_devices() is stacking both origin and fast device limits
* but discards aren't passed to fast device, so inherit origin's limits.
*/
- limits->max_discard_sectors = origin_limits->max_discard_sectors;
limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors;
limits->discard_granularity = origin_limits->discard_granularity;
limits->discard_alignment = origin_limits->discard_alignment;
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
/* No passdown is done so we set our own virtual limits */
limits->discard_granularity = clone->region_size << SECTOR_SHIFT;
- limits->max_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT, clone->region_size);
+ limits->max_hw_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT,
+ clone->region_size);
return;
}
* device limits but discards aren't passed to the source device, so
* inherit destination's limits.
*/
- limits->max_discard_sectors = dest_limits->max_discard_sectors;
limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors;
limits->discard_granularity = dest_limits->discard_granularity;
limits->discard_alignment = dest_limits->discard_alignment;
limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
limits->dma_alignment = limits->logical_block_size - 1;
+ limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT;
}
limits->max_integrity_segments = USHRT_MAX;
}
if (!bdev_max_discard_sectors(lc->dev->bdev)) {
lc->device_supports_discard = false;
limits->discard_granularity = lc->sectorsize;
- limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
+ limits->max_hw_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
}
limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev);
limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev);
/* All discards are split on chunk_size boundary */
limits->discard_granularity = snap->store->chunk_size;
- limits->max_discard_sectors = snap->store->chunk_size;
+ limits->max_hw_discard_sectors = snap->store->chunk_size;
up_read(&_origins_lock);
}
static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
- limits->max_discard_sectors = UINT_MAX;
limits->max_hw_discard_sectors = UINT_MAX;
limits->discard_granularity = 512;
}
if (pt->adjusted_pf.discard_enabled) {
disable_discard_passdown_if_not_supported(pt);
if (!pt->adjusted_pf.discard_passdown)
- limits->max_discard_sectors = 0;
+ limits->max_hw_discard_sectors = 0;
/*
* The pool uses the same discard limits as the underlying data
* device. DM core has already set this up.
if (pool->pf.discard_enabled) {
limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
- limits->max_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
+ limits->max_hw_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
}
}
static void zero_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
- limits->max_discard_sectors = UINT_MAX;
limits->max_hw_discard_sectors = UINT_MAX;
limits->discard_granularity = 512;
}
limits->discard_alignment = 0;
limits->discard_granularity = DMZ_BLOCK_SIZE;
- limits->max_discard_sectors = chunk_sectors;
limits->max_hw_discard_sectors = chunk_sectors;
limits->max_write_zeroes_sectors = chunk_sectors;
struct queue_limits *limits = dm_get_queue_limits(md);
/* device doesn't really support DISCARD, disable it */
- limits->max_discard_sectors = 0;
+ limits->max_hw_discard_sectors = 0;
}
void disable_write_zeroes(struct mapped_device *md)