]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.19.9/libnvdimm-pfn-pad-pfn-namespaces-relative-to-other-regions.patch
Linux 5.1.6
[thirdparty/kernel/stable-queue.git] / releases / 4.19.9 / libnvdimm-pfn-pad-pfn-namespaces-relative-to-other-regions.patch
CommitLineData
1976ca32
GKH
1From ae86cbfef3818300f1972e52f67a93211acb0e24 Mon Sep 17 00:00:00 2001
2From: Dan Williams <dan.j.williams@intel.com>
3Date: Sat, 24 Nov 2018 10:47:04 -0800
4Subject: libnvdimm, pfn: Pad pfn namespaces relative to other regions
5
6From: Dan Williams <dan.j.williams@intel.com>
7
8commit ae86cbfef3818300f1972e52f67a93211acb0e24 upstream.
9
10Commit cfe30b872058 "libnvdimm, pmem: adjust for section collisions with
11'System RAM'" enabled Linux to workaround occasions where platform
12firmware arranges for "System RAM" and "Persistent Memory" to collide
13within a single section boundary. Unfortunately, as reported in this
14issue [1], platform firmware can inflict the same collision between
15persistent memory regions.
16
17The approach of interrogating iomem_resource does not work in this
18case because platform firmware may merge multiple regions into a single
19iomem_resource range. Instead provide a method to interrogate regions
20that share the same parent bus.
21
22This is a stop-gap until the core-MM can grow support for hotplug on
23sub-section boundaries.
24
25[1]: https://github.com/pmem/ndctl/issues/76
26
27Fixes: cfe30b872058 ("libnvdimm, pmem: adjust for section collisions with...")
28Cc: <stable@vger.kernel.org>
29Reported-by: Patrick Geary <patrickg@supermicro.com>
30Tested-by: Patrick Geary <patrickg@supermicro.com>
31Reviewed-by: Vishal Verma <vishal.l.verma@intel.com>
32Signed-off-by: Dan Williams <dan.j.williams@intel.com>
33Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
34
35---
36 drivers/nvdimm/nd-core.h | 2 +
37 drivers/nvdimm/pfn_devs.c | 64 ++++++++++++++++++++++++-------------------
38 drivers/nvdimm/region_devs.c | 41 +++++++++++++++++++++++++++
39 3 files changed, 80 insertions(+), 27 deletions(-)
40
41--- a/drivers/nvdimm/nd-core.h
42+++ b/drivers/nvdimm/nd-core.h
43@@ -112,6 +112,8 @@ resource_size_t nd_pmem_available_dpa(st
44 struct nd_mapping *nd_mapping, resource_size_t *overlap);
45 resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
46 resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
47+int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
48+ resource_size_t size);
49 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
50 struct nd_label_id *label_id);
51 int alias_dpa_busy(struct device *dev, void *data);
52--- a/drivers/nvdimm/pfn_devs.c
53+++ b/drivers/nvdimm/pfn_devs.c
54@@ -590,14 +590,47 @@ static u64 phys_pmem_align_down(struct n
55 ALIGN_DOWN(phys, nd_pfn->align));
56 }
57
58+/*
59+ * Check if pmem collides with 'System RAM', or other regions when
60+ * section aligned. Trim it accordingly.
61+ */
62+static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trunc)
63+{
64+ struct nd_namespace_common *ndns = nd_pfn->ndns;
65+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
66+ struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
67+ const resource_size_t start = nsio->res.start;
68+ const resource_size_t end = start + resource_size(&nsio->res);
69+ resource_size_t adjust, size;
70+
71+ *start_pad = 0;
72+ *end_trunc = 0;
73+
74+ adjust = start - PHYS_SECTION_ALIGN_DOWN(start);
75+ size = resource_size(&nsio->res) + adjust;
76+ if (region_intersects(start - adjust, size, IORESOURCE_SYSTEM_RAM,
77+ IORES_DESC_NONE) == REGION_MIXED
78+ || nd_region_conflict(nd_region, start - adjust, size))
79+ *start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
80+
81+ /* Now check that end of the range does not collide. */
82+ adjust = PHYS_SECTION_ALIGN_UP(end) - end;
83+ size = resource_size(&nsio->res) + adjust;
84+ if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
85+ IORES_DESC_NONE) == REGION_MIXED
86+ || !IS_ALIGNED(end, nd_pfn->align)
87+ || nd_region_conflict(nd_region, start, size + adjust))
88+ *end_trunc = end - phys_pmem_align_down(nd_pfn, end);
89+}
90+
91 static int nd_pfn_init(struct nd_pfn *nd_pfn)
92 {
93 u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
94 struct nd_namespace_common *ndns = nd_pfn->ndns;
95- u32 start_pad = 0, end_trunc = 0;
96+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
97 resource_size_t start, size;
98- struct nd_namespace_io *nsio;
99 struct nd_region *nd_region;
100+ u32 start_pad, end_trunc;
101 struct nd_pfn_sb *pfn_sb;
102 unsigned long npfns;
103 phys_addr_t offset;
104@@ -629,30 +662,7 @@ static int nd_pfn_init(struct nd_pfn *nd
105
106 memset(pfn_sb, 0, sizeof(*pfn_sb));
107
108- /*
109- * Check if pmem collides with 'System RAM' when section aligned and
110- * trim it accordingly
111- */
112- nsio = to_nd_namespace_io(&ndns->dev);
113- start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
114- size = resource_size(&nsio->res);
115- if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
116- IORES_DESC_NONE) == REGION_MIXED) {
117- start = nsio->res.start;
118- start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
119- }
120-
121- start = nsio->res.start;
122- size = PHYS_SECTION_ALIGN_UP(start + size) - start;
123- if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
124- IORES_DESC_NONE) == REGION_MIXED
125- || !IS_ALIGNED(start + resource_size(&nsio->res),
126- nd_pfn->align)) {
127- size = resource_size(&nsio->res);
128- end_trunc = start + size - phys_pmem_align_down(nd_pfn,
129- start + size);
130- }
131-
132+ trim_pfn_device(nd_pfn, &start_pad, &end_trunc);
133 if (start_pad + end_trunc)
134 dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
135 dev_name(&ndns->dev), start_pad + end_trunc);
136@@ -663,7 +673,7 @@ static int nd_pfn_init(struct nd_pfn *nd
137 * implementation will limit the pfns advertised through
138 * ->direct_access() to those that are included in the memmap.
139 */
140- start += start_pad;
141+ start = nsio->res.start + start_pad;
142 size = resource_size(&nsio->res);
143 npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
144 / PAGE_SIZE);
145--- a/drivers/nvdimm/region_devs.c
146+++ b/drivers/nvdimm/region_devs.c
147@@ -1184,6 +1184,47 @@ int nvdimm_has_cache(struct nd_region *n
148 }
149 EXPORT_SYMBOL_GPL(nvdimm_has_cache);
150
151+struct conflict_context {
152+ struct nd_region *nd_region;
153+ resource_size_t start, size;
154+};
155+
156+static int region_conflict(struct device *dev, void *data)
157+{
158+ struct nd_region *nd_region;
159+ struct conflict_context *ctx = data;
160+ resource_size_t res_end, region_end, region_start;
161+
162+ if (!is_memory(dev))
163+ return 0;
164+
165+ nd_region = to_nd_region(dev);
166+ if (nd_region == ctx->nd_region)
167+ return 0;
168+
169+ res_end = ctx->start + ctx->size;
170+ region_start = nd_region->ndr_start;
171+ region_end = region_start + nd_region->ndr_size;
172+ if (ctx->start >= region_start && ctx->start < region_end)
173+ return -EBUSY;
174+ if (res_end > region_start && res_end <= region_end)
175+ return -EBUSY;
176+ return 0;
177+}
178+
179+int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
180+ resource_size_t size)
181+{
182+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
183+ struct conflict_context ctx = {
184+ .nd_region = nd_region,
185+ .start = start,
186+ .size = size,
187+ };
188+
189+ return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
190+}
191+
192 void __exit nd_region_devs_exit(void)
193 {
194 ida_destroy(&region_ida);