]>
Commit | Line | Data |
---|---|---|
5c17fab8 GKH |
1 | From 07a57a338adb6ec9e766d6a6790f76527f45ceb5 Mon Sep 17 00:00:00 2001 |
2 | From: Oscar Salvador <osalvador@suse.de> | |
3 | Date: Sun, 7 Apr 2024 15:05:37 +0200 | |
4 | Subject: mm,swapops: update check in is_pfn_swap_entry for hwpoison entries | |
5 | ||
6 | From: Oscar Salvador <osalvador@suse.de> | |
7 | ||
8 | commit 07a57a338adb6ec9e766d6a6790f76527f45ceb5 upstream. | |
9 | ||
10 | Tony reported that the Machine check recovery was broken in v6.9-rc1, as | |
11 | he was hitting a VM_BUG_ON when injecting uncorrectable memory errors to | |
12 | DRAM. | |
13 | ||
14 | After some more digging and debugging on his side, he realized that this | |
15 | went back to v6.1, with the introduction of 'commit 0d206b5d2e0d | |
16 | ("mm/swap: add swp_offset_pfn() to fetch PFN from swap entry")'. That | |
17 | commit, among other things, introduced swp_offset_pfn(), replacing | |
18 | hwpoison_entry_to_pfn() in its favour. | |
19 | ||
20 | The patch also introduced a VM_BUG_ON() check for is_pfn_swap_entry(), but | |
21 | is_pfn_swap_entry() never got updated to cover hwpoison entries, which | |
22 | means that we would hit the VM_BUG_ON whenever we would call | |
23 | swp_offset_pfn() for such entries on environments with CONFIG_DEBUG_VM | |
24 | set. Fix this by updating the check to cover hwpoison entries as well, | |
25 | and update the comment while we are it. | |
26 | ||
27 | Link: https://lkml.kernel.org/r/20240407130537.16977-1-osalvador@suse.de | |
28 | Fixes: 0d206b5d2e0d ("mm/swap: add swp_offset_pfn() to fetch PFN from swap entry") | |
29 | Signed-off-by: Oscar Salvador <osalvador@suse.de> | |
30 | Reported-by: Tony Luck <tony.luck@intel.com> | |
31 | Closes: https://lore.kernel.org/all/Zg8kLSl2yAlA3o5D@agluck-desk3/ | |
32 | Tested-by: Tony Luck <tony.luck@intel.com> | |
33 | Reviewed-by: Peter Xu <peterx@redhat.com> | |
34 | Reviewed-by: David Hildenbrand <david@redhat.com> | |
35 | Acked-by: Miaohe Lin <linmiaohe@huawei.com> | |
36 | Cc: <stable@vger.kernel.org> [6.1.x] | |
37 | Signed-off-by: Andrew Morton <akpm@linux-foundation.org> | |
38 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | |
39 | --- | |
40 | include/linux/swapops.h | 65 ++++++++++++++++++++++++------------------------ | |
41 | 1 file changed, 33 insertions(+), 32 deletions(-) | |
42 | ||
43 | --- a/include/linux/swapops.h | |
44 | +++ b/include/linux/swapops.h | |
45 | @@ -390,6 +390,35 @@ static inline bool is_migration_entry_di | |
46 | } | |
47 | #endif /* CONFIG_MIGRATION */ | |
48 | ||
49 | +#ifdef CONFIG_MEMORY_FAILURE | |
50 | + | |
51 | +/* | |
52 | + * Support for hardware poisoned pages | |
53 | + */ | |
54 | +static inline swp_entry_t make_hwpoison_entry(struct page *page) | |
55 | +{ | |
56 | + BUG_ON(!PageLocked(page)); | |
57 | + return swp_entry(SWP_HWPOISON, page_to_pfn(page)); | |
58 | +} | |
59 | + | |
60 | +static inline int is_hwpoison_entry(swp_entry_t entry) | |
61 | +{ | |
62 | + return swp_type(entry) == SWP_HWPOISON; | |
63 | +} | |
64 | + | |
65 | +#else | |
66 | + | |
67 | +static inline swp_entry_t make_hwpoison_entry(struct page *page) | |
68 | +{ | |
69 | + return swp_entry(0, 0); | |
70 | +} | |
71 | + | |
72 | +static inline int is_hwpoison_entry(swp_entry_t swp) | |
73 | +{ | |
74 | + return 0; | |
75 | +} | |
76 | +#endif | |
77 | + | |
78 | typedef unsigned long pte_marker; | |
79 | ||
80 | #define PTE_MARKER_UFFD_WP BIT(0) | |
81 | @@ -470,8 +499,9 @@ static inline struct page *pfn_swap_entr | |
82 | ||
83 | /* | |
84 | * A pfn swap entry is a special type of swap entry that always has a pfn stored | |
85 | - * in the swap offset. They are used to represent unaddressable device memory | |
86 | - * and to restrict access to a page undergoing migration. | |
87 | + * in the swap offset. They can either be used to represent unaddressable device | |
88 | + * memory, to restrict access to a page undergoing migration or to represent a | |
89 | + * pfn which has been hwpoisoned and unmapped. | |
90 | */ | |
91 | static inline bool is_pfn_swap_entry(swp_entry_t entry) | |
92 | { | |
93 | @@ -479,7 +509,7 @@ static inline bool is_pfn_swap_entry(swp | |
94 | BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS); | |
95 | ||
96 | return is_migration_entry(entry) || is_device_private_entry(entry) || | |
97 | - is_device_exclusive_entry(entry); | |
98 | + is_device_exclusive_entry(entry) || is_hwpoison_entry(entry); | |
99 | } | |
100 | ||
101 | struct page_vma_mapped_walk; | |
102 | @@ -548,35 +578,6 @@ static inline int is_pmd_migration_entry | |
103 | } | |
104 | #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ | |
105 | ||
106 | -#ifdef CONFIG_MEMORY_FAILURE | |
107 | - | |
108 | -/* | |
109 | - * Support for hardware poisoned pages | |
110 | - */ | |
111 | -static inline swp_entry_t make_hwpoison_entry(struct page *page) | |
112 | -{ | |
113 | - BUG_ON(!PageLocked(page)); | |
114 | - return swp_entry(SWP_HWPOISON, page_to_pfn(page)); | |
115 | -} | |
116 | - | |
117 | -static inline int is_hwpoison_entry(swp_entry_t entry) | |
118 | -{ | |
119 | - return swp_type(entry) == SWP_HWPOISON; | |
120 | -} | |
121 | - | |
122 | -#else | |
123 | - | |
124 | -static inline swp_entry_t make_hwpoison_entry(struct page *page) | |
125 | -{ | |
126 | - return swp_entry(0, 0); | |
127 | -} | |
128 | - | |
129 | -static inline int is_hwpoison_entry(swp_entry_t swp) | |
130 | -{ | |
131 | - return 0; | |
132 | -} | |
133 | -#endif | |
134 | - | |
135 | static inline int non_swap_entry(swp_entry_t entry) | |
136 | { | |
137 | return swp_type(entry) >= MAX_SWAPFILES; |