]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.8.16/nvmet-fix-possible-infinite-loop-triggered-on-hot-namespace-removal.patch
Drop watchdog patch
[thirdparty/kernel/stable-queue.git] / releases / 4.8.16 / nvmet-fix-possible-infinite-loop-triggered-on-hot-namespace-removal.patch
CommitLineData
b732247b
GKH
1From e4fcf07cca6a3b6c4be00df16f08be894325eaa3 Mon Sep 17 00:00:00 2001
2From: Solganik Alexander <sashas@lightbitslabs.com>
3Date: Sun, 30 Oct 2016 10:35:15 +0200
4Subject: nvmet: Fix possible infinite loop triggered on hot namespace removal
5
6From: Solganik Alexander <sashas@lightbitslabs.com>
7
8commit e4fcf07cca6a3b6c4be00df16f08be894325eaa3 upstream.
9
10When removing a namespace we delete it from the subsystem namespaces
11list with list_del_init which allows us to know if it is enabled or
12not.
13
14The problem is that list_del_init initialize the list next and does
15not respect the RCU list-traversal we do on the IO path for locating
16a namespace. Instead we need to use list_del_rcu which is allowed to
17run concurrently with the _rcu list-traversal primitives (keeps list
18next intact) and guarantees concurrent nvmet_find_naespace forward
19progress.
20
21By changing that, we cannot rely on ns->dev_link for knowing if the
22namspace is enabled, so add enabled indicator entry to nvmet_ns for
23that.
24
25Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
26Signed-off-by: Solganik Alexander <sashas@lightbitslabs.com>
27Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
28
29---
30 drivers/nvme/target/configfs.c | 6 +++---
31 drivers/nvme/target/core.c | 14 ++++++++------
32 drivers/nvme/target/nvmet.h | 6 +-----
33 3 files changed, 12 insertions(+), 14 deletions(-)
34
35--- a/drivers/nvme/target/configfs.c
36+++ b/drivers/nvme/target/configfs.c
37@@ -271,7 +271,7 @@ static ssize_t nvmet_ns_device_path_stor
38
39 mutex_lock(&subsys->lock);
40 ret = -EBUSY;
41- if (nvmet_ns_enabled(ns))
42+ if (ns->enabled)
43 goto out_unlock;
44
45 kfree(ns->device_path);
46@@ -307,7 +307,7 @@ static ssize_t nvmet_ns_device_nguid_sto
47 int ret = 0;
48
49 mutex_lock(&subsys->lock);
50- if (nvmet_ns_enabled(ns)) {
51+ if (ns->enabled) {
52 ret = -EBUSY;
53 goto out_unlock;
54 }
55@@ -339,7 +339,7 @@ CONFIGFS_ATTR(nvmet_ns_, device_nguid);
56
57 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
58 {
59- return sprintf(page, "%d\n", nvmet_ns_enabled(to_nvmet_ns(item)));
60+ return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
61 }
62
63 static ssize_t nvmet_ns_enable_store(struct config_item *item,
64--- a/drivers/nvme/target/core.c
65+++ b/drivers/nvme/target/core.c
66@@ -264,7 +264,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
67 int ret = 0;
68
69 mutex_lock(&subsys->lock);
70- if (!list_empty(&ns->dev_link))
71+ if (ns->enabled)
72 goto out_unlock;
73
74 ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
75@@ -309,6 +309,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
76 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
77 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
78
79+ ns->enabled = true;
80 ret = 0;
81 out_unlock:
82 mutex_unlock(&subsys->lock);
83@@ -325,11 +326,11 @@ void nvmet_ns_disable(struct nvmet_ns *n
84 struct nvmet_ctrl *ctrl;
85
86 mutex_lock(&subsys->lock);
87- if (list_empty(&ns->dev_link)) {
88- mutex_unlock(&subsys->lock);
89- return;
90- }
91- list_del_init(&ns->dev_link);
92+ if (!ns->enabled)
93+ goto out_unlock;
94+
95+ ns->enabled = false;
96+ list_del_rcu(&ns->dev_link);
97 mutex_unlock(&subsys->lock);
98
99 /*
100@@ -351,6 +352,7 @@ void nvmet_ns_disable(struct nvmet_ns *n
101
102 if (ns->bdev)
103 blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
104+out_unlock:
105 mutex_unlock(&subsys->lock);
106 }
107
108--- a/drivers/nvme/target/nvmet.h
109+++ b/drivers/nvme/target/nvmet.h
110@@ -47,6 +47,7 @@ struct nvmet_ns {
111 loff_t size;
112 u8 nguid[16];
113
114+ bool enabled;
115 struct nvmet_subsys *subsys;
116 const char *device_path;
117
118@@ -61,11 +62,6 @@ static inline struct nvmet_ns *to_nvmet_
119 return container_of(to_config_group(item), struct nvmet_ns, group);
120 }
121
122-static inline bool nvmet_ns_enabled(struct nvmet_ns *ns)
123-{
124- return !list_empty_careful(&ns->dev_link);
125-}
126-
127 struct nvmet_cq {
128 u16 qid;
129 u16 size;