]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-5.0/libnvdimm-pmem-honor-force_raw-for-legacy-pmem-regions.patch
5.0-stable patches
[thirdparty/kernel/stable-queue.git] / queue-5.0 / libnvdimm-pmem-honor-force_raw-for-legacy-pmem-regions.patch
1 From fa7d2e639cd90442d868dfc6ca1d4cc9d8bf206e Mon Sep 17 00:00:00 2001
2 From: Dan Williams <dan.j.williams@intel.com>
3 Date: Thu, 24 Jan 2019 17:33:06 -0800
4 Subject: libnvdimm/pmem: Honor force_raw for legacy pmem regions
5
6 From: Dan Williams <dan.j.williams@intel.com>
7
8 commit fa7d2e639cd90442d868dfc6ca1d4cc9d8bf206e upstream.
9
10 For recovery, where non-dax access is needed to a given physical address
11 range, and testing, allow the 'force_raw' attribute to override the
12 default establishment of a dev_pagemap.
13
14 Otherwise without this capability it is possible to end up with a
15 namespace that can not be activated due to corrupted info-block, and one
16 that can not be repaired due to a section collision.
17
18 Cc: <stable@vger.kernel.org>
19 Fixes: 004f1afbe199 ("libnvdimm, pmem: direct map legacy pmem by default")
20 Signed-off-by: Dan Williams <dan.j.williams@intel.com>
21 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
22
23 ---
24 drivers/nvdimm/namespace_devs.c | 4 ++++
25 1 file changed, 4 insertions(+)
26
27 --- a/drivers/nvdimm/namespace_devs.c
28 +++ b/drivers/nvdimm/namespace_devs.c
29 @@ -138,6 +138,7 @@ bool nd_is_uuid_unique(struct device *de
30 bool pmem_should_map_pages(struct device *dev)
31 {
32 struct nd_region *nd_region = to_nd_region(dev->parent);
33 + struct nd_namespace_common *ndns = to_ndns(dev);
34 struct nd_namespace_io *nsio;
35
36 if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
37 @@ -149,6 +150,9 @@ bool pmem_should_map_pages(struct device
38 if (is_nd_pfn(dev) || is_nd_btt(dev))
39 return false;
40
41 + if (ndns->force_raw)
42 + return false;
43 +
44 nsio = to_nd_namespace_io(dev);
45 if (region_intersects(nsio->res.start, resource_size(&nsio->res),
46 IORESOURCE_SYSTEM_RAM,