]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/3.16.7/sparc64-switch-to-4-level-page-tables.patch
4.14-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 3.16.7 / sparc64-switch-to-4-level-page-tables.patch
CommitLineData
931f3697
GKH
1From foo@baz Tue Oct 28 11:19:22 CST 2014
2From: "David S. Miller" <davem@davemloft.net>
3Date: Fri, 26 Sep 2014 21:19:46 -0700
4Subject: sparc64: Switch to 4-level page tables.
5
6From: "David S. Miller" <davem@davemloft.net>
7
8[ Upstream commit ac55c768143aa34cc3789c4820cbb0809a76fd9c ]
9
10This has become necessary with chips that support more than 43-bits
11of physical addressing.
12
13Based almost entirely upon a patch by Bob Picco.
14
15Signed-off-by: David S. Miller <davem@davemloft.net>
16Acked-by: Bob Picco <bob.picco@oracle.com>
17Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
18---
19 arch/sparc/include/asm/page_64.h | 6 +++++
20 arch/sparc/include/asm/pgalloc_64.h | 28 ++++++++++++++++++++++++++-
21 arch/sparc/include/asm/pgtable_64.h | 37 +++++++++++++++++++++++++++++++-----
22 arch/sparc/include/asm/tsb.h | 10 +++++++++
23 arch/sparc/kernel/smp_64.c | 7 ++++++
24 arch/sparc/mm/init_64.c | 31 ++++++++++++++++++++++++++----
25 6 files changed, 109 insertions(+), 10 deletions(-)
26
27--- a/arch/sparc/include/asm/page_64.h
28+++ b/arch/sparc/include/asm/page_64.h
29@@ -57,18 +57,21 @@ void copy_user_page(void *to, void *from
30 typedef struct { unsigned long pte; } pte_t;
31 typedef struct { unsigned long iopte; } iopte_t;
32 typedef struct { unsigned long pmd; } pmd_t;
33+typedef struct { unsigned long pud; } pud_t;
34 typedef struct { unsigned long pgd; } pgd_t;
35 typedef struct { unsigned long pgprot; } pgprot_t;
36
37 #define pte_val(x) ((x).pte)
38 #define iopte_val(x) ((x).iopte)
39 #define pmd_val(x) ((x).pmd)
40+#define pud_val(x) ((x).pud)
41 #define pgd_val(x) ((x).pgd)
42 #define pgprot_val(x) ((x).pgprot)
43
44 #define __pte(x) ((pte_t) { (x) } )
45 #define __iopte(x) ((iopte_t) { (x) } )
46 #define __pmd(x) ((pmd_t) { (x) } )
47+#define __pud(x) ((pud_t) { (x) } )
48 #define __pgd(x) ((pgd_t) { (x) } )
49 #define __pgprot(x) ((pgprot_t) { (x) } )
50
51@@ -77,18 +80,21 @@ typedef struct { unsigned long pgprot; }
52 typedef unsigned long pte_t;
53 typedef unsigned long iopte_t;
54 typedef unsigned long pmd_t;
55+typedef unsigned long pud_t;
56 typedef unsigned long pgd_t;
57 typedef unsigned long pgprot_t;
58
59 #define pte_val(x) (x)
60 #define iopte_val(x) (x)
61 #define pmd_val(x) (x)
62+#define pud_val(x) (x)
63 #define pgd_val(x) (x)
64 #define pgprot_val(x) (x)
65
66 #define __pte(x) (x)
67 #define __iopte(x) (x)
68 #define __pmd(x) (x)
69+#define __pud(x) (x)
70 #define __pgd(x) (x)
71 #define __pgprot(x) (x)
72
73--- a/arch/sparc/include/asm/pgalloc_64.h
74+++ b/arch/sparc/include/asm/pgalloc_64.h
75@@ -15,6 +15,13 @@
76
77 extern struct kmem_cache *pgtable_cache;
78
79+static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
80+{
81+ pgd_set(pgd, pud);
82+}
83+
84+#define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
85+
86 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
87 {
88 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
89@@ -25,7 +32,23 @@ static inline void pgd_free(struct mm_st
90 kmem_cache_free(pgtable_cache, pgd);
91 }
92
93-#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
94+static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
95+{
96+ pud_set(pud, pmd);
97+}
98+
99+#define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
100+
101+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
102+{
103+ return kmem_cache_alloc(pgtable_cache,
104+ GFP_KERNEL|__GFP_REPEAT);
105+}
106+
107+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
108+{
109+ kmem_cache_free(pgtable_cache, pud);
110+}
111
112 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
113 {
114@@ -91,4 +114,7 @@ static inline void __pte_free_tlb(struct
115 #define __pmd_free_tlb(tlb, pmd, addr) \
116 pgtable_free_tlb(tlb, pmd, false)
117
118+#define __pud_free_tlb(tlb, pud, addr) \
119+ pgtable_free_tlb(tlb, pud, false)
120+
121 #endif /* _SPARC64_PGALLOC_H */
122--- a/arch/sparc/include/asm/pgtable_64.h
123+++ b/arch/sparc/include/asm/pgtable_64.h
124@@ -20,8 +20,6 @@
125 #include <asm/page.h>
126 #include <asm/processor.h>
127
128-#include <asm-generic/pgtable-nopud.h>
129-
130 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
131 * The page copy blockops can use 0x6000000 to 0x8000000.
132 * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
133@@ -55,13 +53,21 @@
134 #define PMD_MASK (~(PMD_SIZE-1))
135 #define PMD_BITS (PAGE_SHIFT - 3)
136
137-/* PGDIR_SHIFT determines what a third-level page table entry can map */
138-#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
139+/* PUD_SHIFT determines the size of the area a third-level page
140+ * table can map
141+ */
142+#define PUD_SHIFT (PMD_SHIFT + PMD_BITS)
143+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
144+#define PUD_MASK (~(PUD_SIZE-1))
145+#define PUD_BITS (PAGE_SHIFT - 3)
146+
147+/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
148+#define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS)
149 #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
150 #define PGDIR_MASK (~(PGDIR_SIZE-1))
151 #define PGDIR_BITS (PAGE_SHIFT - 3)
152
153-#if (PGDIR_SHIFT + PGDIR_BITS) != 43
154+#if (PGDIR_SHIFT + PGDIR_BITS) != 53
155 #error Page table parameters do not cover virtual address space properly.
156 #endif
157
158@@ -93,6 +99,7 @@ static inline bool kern_addr_valid(unsig
159 /* Entries per page directory level. */
160 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
161 #define PTRS_PER_PMD (1UL << PMD_BITS)
162+#define PTRS_PER_PUD (1UL << PUD_BITS)
163 #define PTRS_PER_PGD (1UL << PGDIR_BITS)
164
165 /* Kernel has a separate 44bit address space. */
166@@ -101,6 +108,9 @@ static inline bool kern_addr_valid(unsig
167 #define pmd_ERROR(e) \
168 pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
169 __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
170+#define pud_ERROR(e) \
171+ pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n", \
172+ __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
173 #define pgd_ERROR(e) \
174 pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
175 __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
176@@ -779,6 +789,11 @@ static inline int pmd_present(pmd_t pmd)
177 #define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \
178 !__kern_addr_valid(pud_val(pud)))
179
180+#define pgd_none(pgd) (!pgd_val(pgd))
181+
182+#define pgd_bad(pgd) ((pgd_val(pgd) & ~PAGE_MASK) || \
183+ !__kern_addr_valid(pgd_val(pgd)))
184+
185 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
186 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
187 pmd_t *pmdp, pmd_t pmd);
188@@ -815,10 +830,17 @@ static inline unsigned long __pmd_page(p
189 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
190 #define pud_present(pud) (pud_val(pud) != 0U)
191 #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
192+#define pgd_page_vaddr(pgd) \
193+ ((unsigned long) __va(pgd_val(pgd)))
194+#define pgd_present(pgd) (pgd_val(pgd) != 0U)
195+#define pgd_clear(pgdp) (pgd_val(*(pgd)) = 0UL)
196
197 /* Same in both SUN4V and SUN4U. */
198 #define pte_none(pte) (!pte_val(pte))
199
200+#define pgd_set(pgdp, pudp) \
201+ (pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp))))
202+
203 /* to find an entry in a page-table-directory. */
204 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
205 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
206@@ -826,6 +848,11 @@ static inline unsigned long __pmd_page(p
207 /* to find an entry in a kernel page-table-directory */
208 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
209
210+/* Find an entry in the third-level page table.. */
211+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
212+#define pud_offset(pgdp, address) \
213+ ((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address))
214+
215 /* Find an entry in the second-level page table.. */
216 #define pmd_offset(pudp, address) \
217 ((pmd_t *) pud_page_vaddr(*(pudp)) + \
218--- a/arch/sparc/include/asm/tsb.h
219+++ b/arch/sparc/include/asm/tsb.h
220@@ -145,6 +145,11 @@ extern struct tsb_phys_patch_entry __tsb
221 andn REG2, 0x7, REG2; \
222 ldx [REG1 + REG2], REG1; \
223 brz,pn REG1, FAIL_LABEL; \
224+ sllx VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \
225+ srlx REG2, 64 - PAGE_SHIFT, REG2; \
226+ andn REG2, 0x7, REG2; \
227+ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
228+ brz,pn REG1, FAIL_LABEL; \
229 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
230 srlx REG2, 64 - PAGE_SHIFT, REG2; \
231 andn REG2, 0x7, REG2; \
232@@ -198,6 +203,11 @@ extern struct tsb_phys_patch_entry __tsb
233 andn REG2, 0x7, REG2; \
234 ldxa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
235 brz,pn REG1, FAIL_LABEL; \
236+ sllx VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \
237+ srlx REG2, 64 - PAGE_SHIFT, REG2; \
238+ andn REG2, 0x7, REG2; \
239+ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
240+ brz,pn REG1, FAIL_LABEL; \
241 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
242 srlx REG2, 64 - PAGE_SHIFT, REG2; \
243 andn REG2, 0x7, REG2; \
244--- a/arch/sparc/kernel/smp_64.c
245+++ b/arch/sparc/kernel/smp_64.c
246@@ -1467,6 +1467,13 @@ static void __init pcpu_populate_pte(uns
247 pud_t *pud;
248 pmd_t *pmd;
249
250+ if (pgd_none(*pgd)) {
251+ pud_t *new;
252+
253+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
254+ pgd_populate(&init_mm, pgd, new);
255+ }
256+
257 pud = pud_offset(pgd, addr);
258 if (pud_none(*pud)) {
259 pmd_t *new;
260--- a/arch/sparc/mm/init_64.c
261+++ b/arch/sparc/mm/init_64.c
262@@ -1389,6 +1389,13 @@ static unsigned long __ref kernel_map_ra
263 pmd_t *pmd;
264 pte_t *pte;
265
266+ if (pgd_none(*pgd)) {
267+ pud_t *new;
268+
269+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
270+ alloc_bytes += PAGE_SIZE;
271+ pgd_populate(&init_mm, pgd, new);
272+ }
273 pud = pud_offset(pgd, vstart);
274 if (pud_none(*pud)) {
275 pmd_t *new;
276@@ -1855,7 +1862,12 @@ static void __init sun4v_linear_pte_xor_
277 /* paging_init() sets up the page tables */
278
279 static unsigned long last_valid_pfn;
280-pgd_t swapper_pg_dir[PTRS_PER_PGD];
281+
282+/* These must be page aligned in order to not trigger the
283+ * alignment tests of pgd_bad() and pud_bad().
284+ */
285+pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((aligned (PAGE_SIZE)));
286+static pud_t swapper_pud_dir[PTRS_PER_PUD] __attribute__ ((aligned (PAGE_SIZE)));
287
288 static void sun4u_pgprot_init(void);
289 static void sun4v_pgprot_init(void);
290@@ -1864,6 +1876,8 @@ void __init paging_init(void)
291 {
292 unsigned long end_pfn, shift, phys_base;
293 unsigned long real_end, i;
294+ pud_t *pud;
295+ pmd_t *pmd;
296 int node;
297
298 setup_page_offset();
299@@ -1960,9 +1974,18 @@ void __init paging_init(void)
300
301 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
302
303- /* Now can init the kernel/bad page tables. */
304- pud_set(pud_offset(&swapper_pg_dir[0], 0),
305- swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
306+ /* The kernel page tables we publish into what the rest of the
307+ * world sees must be adjusted so that they see the PAGE_OFFSET
308+ * address of these in-kerenel data structures. However right
309+ * here we must access them from the kernel image side, because
310+ * the trap tables haven't been taken over and therefore we cannot
311+ * take TLB misses in the PAGE_OFFSET linear mappings yet.
312+ */
313+ pud = swapper_pud_dir + (shift / sizeof(pud_t));
314+ pgd_set(&swapper_pg_dir[0], pud);
315+
316+ pmd = swapper_low_pmd_dir + (shift / sizeof(pmd_t));
317+ pud_set(&swapper_pud_dir[0], pmd);
318
319 inherit_prom_mappings();
320