]>
Commit | Line | Data |
---|---|---|
9b68cb68 GKH |
1 | From 785a19f9d1dd8a4ab2d0633be4656653bd3de1fc Mon Sep 17 00:00:00 2001 |
2 | From: Chintan Pandya <cpandya@codeaurora.org> | |
3 | Date: Wed, 27 Jun 2018 08:13:47 -0600 | |
4 | Subject: ioremap: Update pgtable free interfaces with addr | |
5 | ||
6 | From: Chintan Pandya <cpandya@codeaurora.org> | |
7 | ||
8 | commit 785a19f9d1dd8a4ab2d0633be4656653bd3de1fc upstream. | |
9 | ||
10 | The following kernel panic was observed on ARM64 platform due to a stale | |
11 | TLB entry. | |
12 | ||
13 | 1. ioremap with 4K size, a valid pte page table is set. | |
14 | 2. iounmap it, its pte entry is set to 0. | |
15 | 3. ioremap the same address with 2M size, update its pmd entry with | |
16 | a new value. | |
17 | 4. CPU may hit an exception because the old pmd entry is still in TLB, | |
18 | which leads to a kernel panic. | |
19 | ||
20 | Commit b6bdb7517c3d ("mm/vmalloc: add interfaces to free unmapped page | |
21 | table") has addressed this panic by falling to pte mappings in the above | |
22 | case on ARM64. | |
23 | ||
24 | To support pmd mappings in all cases, TLB purge needs to be performed | |
25 | in this case on ARM64. | |
26 | ||
27 | Add a new arg, 'addr', to pud_free_pmd_page() and pmd_free_pte_page() | |
28 | so that TLB purge can be added later in seprate patches. | |
29 | ||
30 | [toshi.kani@hpe.com: merge changes, rewrite patch description] | |
31 | Fixes: 28ee90fe6048 ("x86/mm: implement free pmd/pte page interfaces") | |
32 | Signed-off-by: Chintan Pandya <cpandya@codeaurora.org> | |
33 | Signed-off-by: Toshi Kani <toshi.kani@hpe.com> | |
34 | Signed-off-by: Thomas Gleixner <tglx@linutronix.de> | |
35 | Cc: mhocko@suse.com | |
36 | Cc: akpm@linux-foundation.org | |
37 | Cc: hpa@zytor.com | |
38 | Cc: linux-mm@kvack.org | |
39 | Cc: linux-arm-kernel@lists.infradead.org | |
40 | Cc: Will Deacon <will.deacon@arm.com> | |
41 | Cc: Joerg Roedel <joro@8bytes.org> | |
42 | Cc: stable@vger.kernel.org | |
43 | Cc: Andrew Morton <akpm@linux-foundation.org> | |
44 | Cc: Michal Hocko <mhocko@suse.com> | |
45 | Cc: "H. Peter Anvin" <hpa@zytor.com> | |
46 | Cc: <stable@vger.kernel.org> | |
47 | Link: https://lkml.kernel.org/r/20180627141348.21777-3-toshi.kani@hpe.com | |
48 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | |
49 | ||
50 | --- | |
51 | arch/arm64/mm/mmu.c | 4 ++-- | |
52 | arch/x86/mm/pgtable.c | 12 +++++++----- | |
53 | include/asm-generic/pgtable.h | 8 ++++---- | |
54 | lib/ioremap.c | 4 ++-- | |
55 | 4 files changed, 15 insertions(+), 13 deletions(-) | |
56 | ||
57 | --- a/arch/arm64/mm/mmu.c | |
58 | +++ b/arch/arm64/mm/mmu.c | |
59 | @@ -938,12 +938,12 @@ int pmd_clear_huge(pmd_t *pmd) | |
60 | return 1; | |
61 | } | |
62 | ||
63 | -int pud_free_pmd_page(pud_t *pud) | |
64 | +int pud_free_pmd_page(pud_t *pud, unsigned long addr) | |
65 | { | |
66 | return pud_none(*pud); | |
67 | } | |
68 | ||
69 | -int pmd_free_pte_page(pmd_t *pmd) | |
70 | +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) | |
71 | { | |
72 | return pmd_none(*pmd); | |
73 | } | |
74 | --- a/arch/x86/mm/pgtable.c | |
75 | +++ b/arch/x86/mm/pgtable.c | |
76 | @@ -716,11 +716,12 @@ int pmd_clear_huge(pmd_t *pmd) | |
77 | /** | |
78 | * pud_free_pmd_page - Clear pud entry and free pmd page. | |
79 | * @pud: Pointer to a PUD. | |
80 | + * @addr: Virtual address associated with pud. | |
81 | * | |
82 | * Context: The pud range has been unmaped and TLB purged. | |
83 | * Return: 1 if clearing the entry succeeded. 0 otherwise. | |
84 | */ | |
85 | -int pud_free_pmd_page(pud_t *pud) | |
86 | +int pud_free_pmd_page(pud_t *pud, unsigned long addr) | |
87 | { | |
88 | pmd_t *pmd; | |
89 | int i; | |
90 | @@ -731,7 +732,7 @@ int pud_free_pmd_page(pud_t *pud) | |
91 | pmd = (pmd_t *)pud_page_vaddr(*pud); | |
92 | ||
93 | for (i = 0; i < PTRS_PER_PMD; i++) | |
94 | - if (!pmd_free_pte_page(&pmd[i])) | |
95 | + if (!pmd_free_pte_page(&pmd[i], addr + (i * PMD_SIZE))) | |
96 | return 0; | |
97 | ||
98 | pud_clear(pud); | |
99 | @@ -743,11 +744,12 @@ int pud_free_pmd_page(pud_t *pud) | |
100 | /** | |
101 | * pmd_free_pte_page - Clear pmd entry and free pte page. | |
102 | * @pmd: Pointer to a PMD. | |
103 | + * @addr: Virtual address associated with pmd. | |
104 | * | |
105 | * Context: The pmd range has been unmaped and TLB purged. | |
106 | * Return: 1 if clearing the entry succeeded. 0 otherwise. | |
107 | */ | |
108 | -int pmd_free_pte_page(pmd_t *pmd) | |
109 | +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) | |
110 | { | |
111 | pte_t *pte; | |
112 | ||
113 | @@ -763,7 +765,7 @@ int pmd_free_pte_page(pmd_t *pmd) | |
114 | ||
115 | #else /* !CONFIG_X86_64 */ | |
116 | ||
117 | -int pud_free_pmd_page(pud_t *pud) | |
118 | +int pud_free_pmd_page(pud_t *pud, unsigned long addr) | |
119 | { | |
120 | return pud_none(*pud); | |
121 | } | |
122 | @@ -772,7 +774,7 @@ int pud_free_pmd_page(pud_t *pud) | |
123 | * Disable free page handling on x86-PAE. This assures that ioremap() | |
124 | * does not update sync'd pmd entries. See vmalloc_sync_one(). | |
125 | */ | |
126 | -int pmd_free_pte_page(pmd_t *pmd) | |
127 | +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) | |
128 | { | |
129 | return pmd_none(*pmd); | |
130 | } | |
131 | --- a/include/asm-generic/pgtable.h | |
132 | +++ b/include/asm-generic/pgtable.h | |
133 | @@ -991,8 +991,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t | |
134 | int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); | |
135 | int pud_clear_huge(pud_t *pud); | |
136 | int pmd_clear_huge(pmd_t *pmd); | |
137 | -int pud_free_pmd_page(pud_t *pud); | |
138 | -int pmd_free_pte_page(pmd_t *pmd); | |
139 | +int pud_free_pmd_page(pud_t *pud, unsigned long addr); | |
140 | +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); | |
141 | #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ | |
142 | static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) | |
143 | { | |
144 | @@ -1018,11 +1018,11 @@ static inline int pmd_clear_huge(pmd_t * | |
145 | { | |
146 | return 0; | |
147 | } | |
148 | -static inline int pud_free_pmd_page(pud_t *pud) | |
149 | +static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) | |
150 | { | |
151 | return 0; | |
152 | } | |
153 | -static inline int pmd_free_pte_page(pmd_t *pmd) | |
154 | +static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) | |
155 | { | |
156 | return 0; | |
157 | } | |
158 | --- a/lib/ioremap.c | |
159 | +++ b/lib/ioremap.c | |
160 | @@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_ | |
161 | if (ioremap_pmd_enabled() && | |
162 | ((next - addr) == PMD_SIZE) && | |
163 | IS_ALIGNED(phys_addr + addr, PMD_SIZE) && | |
164 | - pmd_free_pte_page(pmd)) { | |
165 | + pmd_free_pte_page(pmd, addr)) { | |
166 | if (pmd_set_huge(pmd, phys_addr + addr, prot)) | |
167 | continue; | |
168 | } | |
169 | @@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_ | |
170 | if (ioremap_pud_enabled() && | |
171 | ((next - addr) == PUD_SIZE) && | |
172 | IS_ALIGNED(phys_addr + addr, PUD_SIZE) && | |
173 | - pud_free_pmd_page(pud)) { | |
174 | + pud_free_pmd_page(pud, addr)) { | |
175 | if (pud_set_huge(pud, phys_addr + addr, prot)) | |
176 | continue; | |
177 | } |