]>
Commit | Line | Data |
---|---|---|
931f3697 GKH |
1 | From foo@baz Tue Oct 28 11:19:22 CST 2014 |
2 | From: "David S. Miller" <davem@davemloft.net> | |
3 | Date: Sat, 27 Sep 2014 11:05:21 -0700 | |
4 | Subject: sparc64: Adjust vmalloc region size based upon available virtual address bits. | |
5 | ||
6 | From: "David S. Miller" <davem@davemloft.net> | |
7 | ||
8 | [ Upstream commit bb4e6e85daa52a9f6210fa06a5ec6269598a202b ] | |
9 | ||
10 | In order to accomodate embedded per-cpu allocation with large numbers | |
11 | of cpus and numa nodes, we have to use as much virtual address space | |
12 | as possible for the vmalloc region. Otherwise we can get things like: | |
13 | ||
14 | PERCPU: max_distance=0x380001c10000 too large for vmalloc space 0xff00000000 | |
15 | ||
16 | So, once we select a value for PAGE_OFFSET, derive the size of the | |
17 | vmalloc region based upon that. | |
18 | ||
19 | Signed-off-by: David S. Miller <davem@davemloft.net> | |
20 | Acked-by: Bob Picco <bob.picco@oracle.com> | |
21 | Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | |
22 | --- | |
23 | arch/sparc/include/asm/page_64.h | 1 - | |
24 | arch/sparc/include/asm/pgtable_64.h | 9 +++++---- | |
25 | arch/sparc/kernel/ktlb.S | 8 ++++---- | |
26 | arch/sparc/mm/init_64.c | 30 +++++++++++++++++++----------- | |
27 | 4 files changed, 28 insertions(+), 20 deletions(-) | |
28 | ||
29 | --- a/arch/sparc/include/asm/page_64.h | |
30 | +++ b/arch/sparc/include/asm/page_64.h | |
31 | @@ -117,7 +117,6 @@ extern unsigned long sparc64_va_hole_bot | |
32 | ||
33 | #include <asm-generic/memory_model.h> | |
34 | ||
35 | -#define PAGE_OFFSET_BY_BITS(X) (-(_AC(1,UL) << (X))) | |
36 | extern unsigned long PAGE_OFFSET; | |
37 | ||
38 | #endif /* !(__ASSEMBLY__) */ | |
39 | --- a/arch/sparc/include/asm/pgtable_64.h | |
40 | +++ b/arch/sparc/include/asm/pgtable_64.h | |
41 | @@ -40,10 +40,7 @@ | |
42 | #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) | |
43 | #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) | |
44 | #define VMALLOC_START _AC(0x0000000100000000,UL) | |
45 | -#define VMALLOC_END _AC(0x0000010000000000,UL) | |
46 | -#define VMEMMAP_BASE _AC(0x0000010000000000,UL) | |
47 | - | |
48 | -#define vmemmap ((struct page *)VMEMMAP_BASE) | |
49 | +#define VMEMMAP_BASE VMALLOC_END | |
50 | ||
51 | /* PMD_SHIFT determines the size of the area a second-level page | |
52 | * table can map | |
53 | @@ -81,6 +78,10 @@ | |
54 | ||
55 | #ifndef __ASSEMBLY__ | |
56 | ||
57 | +extern unsigned long VMALLOC_END; | |
58 | + | |
59 | +#define vmemmap ((struct page *)VMEMMAP_BASE) | |
60 | + | |
61 | #include <linux/sched.h> | |
62 | ||
63 | bool kern_addr_valid(unsigned long addr); | |
64 | --- a/arch/sparc/kernel/ktlb.S | |
65 | +++ b/arch/sparc/kernel/ktlb.S | |
66 | @@ -199,8 +199,8 @@ kvmap_dtlb_nonlinear: | |
67 | ||
68 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
69 | /* Do not use the TSB for vmemmap. */ | |
70 | - mov (VMEMMAP_BASE >> 40), %g5 | |
71 | - sllx %g5, 40, %g5 | |
72 | + sethi %hi(VMEMMAP_BASE), %g5 | |
73 | + ldx [%g5 + %lo(VMEMMAP_BASE)], %g5 | |
74 | cmp %g4,%g5 | |
75 | bgeu,pn %xcc, kvmap_vmemmap | |
76 | nop | |
77 | @@ -212,8 +212,8 @@ kvmap_dtlb_tsbmiss: | |
78 | sethi %hi(MODULES_VADDR), %g5 | |
79 | cmp %g4, %g5 | |
80 | blu,pn %xcc, kvmap_dtlb_longpath | |
81 | - mov (VMALLOC_END >> 40), %g5 | |
82 | - sllx %g5, 40, %g5 | |
83 | + sethi %hi(VMALLOC_END), %g5 | |
84 | + ldx [%g5 + %lo(VMALLOC_END)], %g5 | |
85 | cmp %g4, %g5 | |
86 | bgeu,pn %xcc, kvmap_dtlb_longpath | |
87 | nop | |
88 | --- a/arch/sparc/mm/init_64.c | |
89 | +++ b/arch/sparc/mm/init_64.c | |
90 | @@ -1368,25 +1368,24 @@ static unsigned long max_phys_bits = 40; | |
91 | ||
92 | bool kern_addr_valid(unsigned long addr) | |
93 | { | |
94 | - unsigned long above = ((long)addr) >> max_phys_bits; | |
95 | pgd_t *pgd; | |
96 | pud_t *pud; | |
97 | pmd_t *pmd; | |
98 | pte_t *pte; | |
99 | ||
100 | - if (above != 0 && above != -1UL) | |
101 | - return false; | |
102 | - | |
103 | - if (addr >= (unsigned long) KERNBASE && | |
104 | - addr < (unsigned long)&_end) | |
105 | - return true; | |
106 | - | |
107 | - if (addr >= PAGE_OFFSET) { | |
108 | + if ((long)addr < 0L) { | |
109 | unsigned long pa = __pa(addr); | |
110 | ||
111 | + if ((addr >> max_phys_bits) != 0UL) | |
112 | + return false; | |
113 | + | |
114 | return pfn_valid(pa >> PAGE_SHIFT); | |
115 | } | |
116 | ||
117 | + if (addr >= (unsigned long) KERNBASE && | |
118 | + addr < (unsigned long)&_end) | |
119 | + return true; | |
120 | + | |
121 | pgd = pgd_offset_k(addr); | |
122 | if (pgd_none(*pgd)) | |
123 | return 0; | |
124 | @@ -1655,6 +1654,9 @@ unsigned long __init find_ecache_flush_s | |
125 | unsigned long PAGE_OFFSET; | |
126 | EXPORT_SYMBOL(PAGE_OFFSET); | |
127 | ||
128 | +unsigned long VMALLOC_END = 0x0000010000000000UL; | |
129 | +EXPORT_SYMBOL(VMALLOC_END); | |
130 | + | |
131 | unsigned long sparc64_va_hole_top = 0xfffff80000000000UL; | |
132 | unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL; | |
133 | ||
134 | @@ -1711,10 +1713,16 @@ static void __init setup_page_offset(voi | |
135 | prom_halt(); | |
136 | } | |
137 | ||
138 | - PAGE_OFFSET = PAGE_OFFSET_BY_BITS(max_phys_bits); | |
139 | + PAGE_OFFSET = sparc64_va_hole_top; | |
140 | + VMALLOC_END = ((sparc64_va_hole_bottom >> 1) + | |
141 | + (sparc64_va_hole_bottom >> 2)); | |
142 | ||
143 | - pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n", | |
144 | + pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n", | |
145 | PAGE_OFFSET, max_phys_bits); | |
146 | + pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n", | |
147 | + VMALLOC_START, VMALLOC_END); | |
148 | + pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n", | |
149 | + VMEMMAP_BASE, VMEMMAP_BASE << 1); | |
150 | } | |
151 | ||
152 | static void __init tsb_phys_patch(void) |