return true;
}
-static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim)
-{
- struct nvme_ctrl *ctrl = ns->ctrl;
-
- if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
- lim->max_hw_discard_sectors =
- nvme_lba_to_sect(ns->head, ctrl->dmrsl);
- else if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
- lim->max_hw_discard_sectors = UINT_MAX;
- else
- lim->max_hw_discard_sectors = 0;
-
- lim->discard_granularity = lim->logical_block_size;
-
- if (ctrl->dmrl)
- lim->max_discard_segments = ctrl->dmrl;
- else
- lim->max_discard_segments = NVME_DSM_MAX_RANGES;
-}
-
static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
{
return uuid_equal(&a->uuid, &b->uuid) &&
struct queue_limits *lim)
{
struct nvme_ns_head *head = ns->head;
+ struct nvme_ctrl *ctrl = ns->ctrl;
u32 bs = 1U << head->lba_shift;
u32 atomic_bs, phys_bs, io_opt = 0;
bool valid = true;
lim->physical_block_size = min(phys_bs, atomic_bs);
lim->io_min = phys_bs;
lim->io_opt = io_opt;
- if ((ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) &&
- (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM))
+ if ((ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) &&
+ (ctrl->oncs & NVME_CTRL_ONCS_DSM))
lim->max_write_zeroes_sectors = UINT_MAX;
else
- lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors;
+ lim->max_write_zeroes_sectors = ctrl->max_zeroes_sectors;
+
+ if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
+ lim->max_hw_discard_sectors =
+ nvme_lba_to_sect(ns->head, ctrl->dmrsl);
+ else if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
+ lim->max_hw_discard_sectors = UINT_MAX;
+ else
+ lim->max_hw_discard_sectors = 0;
+
+ lim->discard_granularity = lim->logical_block_size;
+
+ if (ctrl->dmrl)
+ lim->max_discard_segments = ctrl->dmrl;
+ else
+ lim->max_discard_segments = NVME_DSM_MAX_RANGES;
return valid;
}
if (!nvme_update_disk_info(ns, id, &lim))
capacity = 0;
- nvme_config_discard(ns, &lim);
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
ns->head->ids.csi == NVME_CSI_ZNS)
nvme_update_zone_info(ns, &lim, &zi);