]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
nvme: split nvme_update_zone_info
authorChristoph Hellwig <hch@lst.de>
Tue, 2 Apr 2024 14:47:54 +0000 (16:47 +0200)
committerKeith Busch <kbusch@kernel.org>
Tue, 2 Apr 2024 15:21:33 +0000 (08:21 -0700)
nvme_update_zone_info does (admin queue) I/O to the device and can fail.
We fail to abort the queue limits update if that happen, but really
should avoid with the frozen I/O queue as much as possible anyway.

Split the logic into a helper to query the information that can be
called on an unfrozen queue and one to apply it to the queue limits.

Fixes: 9b130d681443 ("nvme: use the atomic queue limits update API")
Reported-by: Kanchan Joshi <joshi.k@samsung.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/nvme/host/zns.c

index 0cf46068f1d014fe6a4e5078ca0900d64268ec46..504dc352c458dda0c00688d49e0c4e2dba6186df 100644 (file)
@@ -2076,6 +2076,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
        bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT;
        struct queue_limits lim;
        struct nvme_id_ns_nvm *nvm = NULL;
+       struct nvme_zone_info zi = {};
        struct nvme_id_ns *id;
        sector_t capacity;
        unsigned lbaf;
@@ -2091,6 +2092,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
                ret = -ENODEV;
                goto out;
        }
+       lbaf = nvme_lbaf_index(id->flbas);
 
        if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) {
                ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm);
@@ -2098,8 +2100,14 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
                        goto out;
        }
 
+       if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+           ns->head->ids.csi == NVME_CSI_ZNS) {
+               ret = nvme_query_zone_info(ns, lbaf, &zi);
+               if (ret < 0)
+                       goto out;
+       }
+
        blk_mq_freeze_queue(ns->disk->queue);
-       lbaf = nvme_lbaf_index(id->flbas);
        ns->head->lba_shift = id->lbaf[lbaf].ds;
        ns->head->nuse = le64_to_cpu(id->nuse);
        capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
@@ -2112,13 +2120,8 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
                capacity = 0;
        nvme_config_discard(ns, &lim);
        if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
-           ns->head->ids.csi == NVME_CSI_ZNS) {
-               ret = nvme_update_zone_info(ns, lbaf, &lim);
-               if (ret) {
-                       blk_mq_unfreeze_queue(ns->disk->queue);
-                       goto out;
-               }
-       }
+           ns->head->ids.csi == NVME_CSI_ZNS)
+               nvme_update_zone_info(ns, &lim, &zi);
        ret = queue_limits_commit_update(ns->disk->queue, &lim);
        if (ret) {
                blk_mq_unfreeze_queue(ns->disk->queue);
index 24193fcb8bd584de277606d57738c1aea5a9cb49..d0ed64dc7380e51577bc6ece92db1d0a273905a7 100644 (file)
@@ -1036,10 +1036,18 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
 }
 #endif /* CONFIG_NVME_MULTIPATH */
 
+struct nvme_zone_info {
+       u64 zone_size;
+       unsigned int max_open_zones;
+       unsigned int max_active_zones;
+};
+
 int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
                unsigned int nr_zones, report_zones_cb cb, void *data);
-int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
-               struct queue_limits *lim);
+int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
+               struct nvme_zone_info *zi);
+void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
+               struct nvme_zone_info *zi);
 #ifdef CONFIG_BLK_DEV_ZONED
 blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
                                       struct nvme_command *cmnd,
index 722384bcc765cda778972c8a86345eaaf18a7353..77aa0f440a6d2a5a538ad06e9cae9ae97c409aef 100644 (file)
@@ -35,8 +35,8 @@ static int nvme_set_max_append(struct nvme_ctrl *ctrl)
        return 0;
 }
 
-int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
-               struct queue_limits *lim)
+int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
+               struct nvme_zone_info *zi)
 {
        struct nvme_effects_log *log = ns->head->effects;
        struct nvme_command c = { };
@@ -89,27 +89,34 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
                goto free_data;
        }
 
-       ns->head->zsze =
-               nvme_lba_to_sect(ns->head, le64_to_cpu(id->lbafe[lbaf].zsze));
-       if (!is_power_of_2(ns->head->zsze)) {
+       zi->zone_size = le64_to_cpu(id->lbafe[lbaf].zsze);
+       if (!is_power_of_2(zi->zone_size)) {
                dev_warn(ns->ctrl->device,
-                       "invalid zone size:%llu for namespace:%u\n",
-                       ns->head->zsze, ns->head->ns_id);
+                       "invalid zone size: %llu for namespace: %u\n",
+                       zi->zone_size, ns->head->ns_id);
                status = -ENODEV;
                goto free_data;
        }
+       zi->max_open_zones = le32_to_cpu(id->mor) + 1;
+       zi->max_active_zones = le32_to_cpu(id->mar) + 1;
 
-       blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
-       lim->zoned = 1;
-       lim->max_open_zones = le32_to_cpu(id->mor) + 1;
-       lim->max_active_zones = le32_to_cpu(id->mar) + 1;
-       lim->chunk_sectors = ns->head->zsze;
-       lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
 free_data:
        kfree(id);
        return status;
 }
 
+void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
+               struct nvme_zone_info *zi)
+{
+       lim->zoned = 1;
+       lim->max_open_zones = zi->max_open_zones;
+       lim->max_active_zones = zi->max_active_zones;
+       lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
+       lim->chunk_sectors = ns->head->zsze =
+               nvme_lba_to_sect(ns->head, zi->zone_size);
+       blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
+}
+
 static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
                                          unsigned int nr_zones, size_t *buflen)
 {