]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
nvme-multipath: Add visibility for queue-depth io-policy
authorNilay Shroff <nilay@linux.ibm.com>
Sun, 12 Jan 2025 12:41:46 +0000 (18:11 +0530)
committerKeith Busch <kbusch@kernel.org>
Thu, 20 Mar 2025 23:53:55 +0000 (16:53 -0700)
This patch helps add nvme native multipath visibility for queue-depth
io-policy. It adds a new attribute file named "queue_depth" under
namespace device path node which would print the number of active/
in-flight I/O requests currently queued for the given path.

For instance, if we have a shared namespace accessible from two different
controllers/paths then accessing head block node of the shared namespace
would show the following output:

$ ls -l /sys/block/nvme1n1/multipath/
nvme1c1n1 -> ../../../../../pci052e:78/052e:78:00.0/nvme/nvme1/nvme1c1n1
nvme1c3n1 -> ../../../../../pci058e:78/058e:78:00.0/nvme/nvme3/nvme1c3n1

In the above example, nvme1n1 is head gendisk node created for a shared
namespace and the namespace is accessible from nvme1c1n1 and nvme1c3n1
paths. For queue-depth io-policy we can then refer the "queue_depth"
attribute file created under each namespace path:

$ cat /sys/block/nvme1n1/multipath/nvme1c1n1/queue_depth
518

$cat /sys/block/nvme1n1/multipath/nvme1c3n1/queue_depth
504

>From the above output, we can infer that I/O workload targeted at nvme1n1
uses two paths nvme1c1n1 and nvme1c3n1 and the current queue depth of each
path is 518 and 504 respectively. Reading "queue_depth" file when
configured io-policy is anything but queue-depth would show no output.

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/sysfs.c

index b47e01942ca4a659dd6bc0e59ef05a1f6fba02cf..6b12ca80aa27305a759990348c4c107655664d94 100644 (file)
@@ -976,6 +976,18 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
 }
 DEVICE_ATTR_RO(ana_state);
 
+static ssize_t queue_depth_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+
+       if (ns->head->subsys->iopolicy != NVME_IOPOLICY_QD)
+               return 0;
+
+       return sysfs_emit(buf, "%d\n", atomic_read(&ns->ctrl->nr_active));
+}
+DEVICE_ATTR_RO(queue_depth);
+
 static ssize_t numa_nodes_show(struct device *dev, struct device_attribute *attr,
                char *buf)
 {
index ff4bd0c2e401e71241c8487795272728b5ad2dae..51e07864212710d8d0abca707b6afa89757094c1 100644 (file)
@@ -984,6 +984,7 @@ static inline void nvme_trace_bio_complete(struct request *req)
 extern bool multipath;
 extern struct device_attribute dev_attr_ana_grpid;
 extern struct device_attribute dev_attr_ana_state;
+extern struct device_attribute dev_attr_queue_depth;
 extern struct device_attribute dev_attr_numa_nodes;
 extern struct device_attribute subsys_attr_iopolicy;
 
index 96eaecda0ab915fac7d530409460547420afbf1b..6d31226f7a4f8419fd69a2857952e4085dc27aa2 100644 (file)
@@ -258,6 +258,7 @@ static struct attribute *nvme_ns_attrs[] = {
 #ifdef CONFIG_NVME_MULTIPATH
        &dev_attr_ana_grpid.attr,
        &dev_attr_ana_state.attr,
+       &dev_attr_queue_depth.attr,
        &dev_attr_numa_nodes.attr,
 #endif
        &dev_attr_io_passthru_err_log_enabled.attr,
@@ -291,7 +292,7 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
                if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
                        return 0;
        }
-       if (a == &dev_attr_numa_nodes.attr) {
+       if (a == &dev_attr_queue_depth.attr || a == &dev_attr_numa_nodes.attr) {
                if (nvme_disk_is_ns_head(dev_to_disk(dev)))
                        return 0;
        }