]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/powerpc/mm/nohash/8xx.c
Merge tag 'block-5.7-2020-05-16' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / arch / powerpc / mm / nohash / 8xx.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
a372acfa
CL
2/*
3 * This file contains the routines for initializing the MMU
4 * on the 8xx series of chips.
5 * -- christophe
6 *
7 * Derived from arch/powerpc/mm/40x_mmu.c:
a372acfa
CL
8 */
9
10#include <linux/memblock.h>
b6ae3550 11#include <linux/mmu_context.h>
4badd43a
CL
12#include <asm/fixmap.h>
13#include <asm/code-patching.h>
a372acfa 14
9d9f2ccc 15#include <mm/mmu_decl.h>
a372acfa 16
4badd43a
CL
17#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
18
a372acfa 19extern int __map_without_ltlbs;
4badd43a 20
eef784bb
CL
21static unsigned long block_mapped_ram;
22
4badd43a 23/*
991d656d 24 * Return PA for this VA if it is in an area mapped with LTLBs or fixmap.
eef784bb 25 * Otherwise, returns 0
4badd43a
CL
26 */
27phys_addr_t v_block_mapped(unsigned long va)
28{
29 unsigned long p = PHYS_IMMR_BASE;
30
4badd43a
CL
31 if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
32 return p + va - VIRT_IMMR_BASE;
991d656d
CL
33 if (__map_without_ltlbs)
34 return 0;
eef784bb
CL
35 if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
36 return __pa(va);
4badd43a
CL
37 return 0;
38}
39
40/*
991d656d
CL
41 * Return VA for a given PA mapped with LTLBs or fixmap
42 * Return 0 if not mapped
4badd43a
CL
43 */
44unsigned long p_block_mapped(phys_addr_t pa)
45{
46 unsigned long p = PHYS_IMMR_BASE;
47
4badd43a
CL
48 if (pa >= p && pa < p + IMMR_SIZE)
49 return VIRT_IMMR_BASE + pa - p;
991d656d
CL
50 if (__map_without_ltlbs)
51 return 0;
eef784bb
CL
52 if (pa < block_mapped_ram)
53 return (unsigned long)__va(pa);
4badd43a
CL
54 return 0;
55}
56
4ad27450
CL
57#define LARGE_PAGE_SIZE_8M (1<<23)
58
a372acfa
CL
59/*
60 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
61 */
62void __init MMU_init_hw(void)
63{
4ad27450 64 /* PIN up to the 3 first 8Mb after IMMR in DTLB table */
665bed23
CL
65 if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) {
66 unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
67 unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY;
68 int i = IS_ENABLED(CONFIG_PIN_TLB_IMMR) ? 29 : 28;
69 unsigned long addr = 0;
70 unsigned long mem = total_lowmem;
71
72 for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) {
73 mtspr(SPRN_MD_CTR, ctr | (i << 8));
74 mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID);
75 mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID);
76 mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT);
77 addr += LARGE_PAGE_SIZE_8M;
78 mem -= LARGE_PAGE_SIZE_8M;
79 }
4ad27450 80 }
a372acfa
CL
81}
82
346bcc4d 83static void __init mmu_mapin_immr(void)
4badd43a
CL
84{
85 unsigned long p = PHYS_IMMR_BASE;
86 unsigned long v = VIRT_IMMR_BASE;
4badd43a
CL
87 int offset;
88
89 for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
c766ee72 90 map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
4badd43a
CL
91}
92
d5f17ee9 93static void mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
a372acfa 94{
002cdfc2 95 modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16);
bb7f3808
CL
96}
97
d5f17ee9
CL
98static void mmu_patch_addis(s32 *site, long simm)
99{
100 unsigned int instr = *(unsigned int *)patch_site_addr(site);
101
102 instr &= 0xffff0000;
103 instr |= ((unsigned long)simm) >> 16;
104 patch_instruction_site(site, instr);
105}
106
0601546f 107static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot)
a2227a27
CL
108{
109 unsigned long s = offset;
110 unsigned long v = PAGE_OFFSET + s;
111 phys_addr_t p = memstart_addr + s;
112
113 for (; s < top; s += PAGE_SIZE) {
114 map_kernel_page(v, p, prot);
115 v += PAGE_SIZE;
116 p += PAGE_SIZE;
117 }
118}
119
14e609d6 120unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
bb7f3808
CL
121{
122 unsigned long mapped;
a372acfa 123
4badd43a 124 if (__map_without_ltlbs) {
bb7f3808 125 mapped = 0;
4badd43a 126 mmu_mapin_immr();
665bed23
CL
127 if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR))
128 patch_instruction_site(&patch__dtlbmiss_immr_jmp, PPC_INST_NOP);
129 if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
130 mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0);
bb7f3808 131 } else {
a2227a27
CL
132 unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
133
bb7f3808 134 mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
e4470bd6 135 if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
a2227a27
CL
136 mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, einittext8);
137
138 /*
139 * Populate page tables to:
140 * - have them appear in /sys/kernel/debug/kernel_page_tables
141 * - allow the BDI to find the pages when they are not PINNED
142 */
143 mmu_mapin_ram_chunk(0, einittext8, PAGE_KERNEL_X);
144 mmu_mapin_ram_chunk(einittext8, mapped, PAGE_KERNEL);
145 mmu_mapin_immr();
a372acfa 146 }
a372acfa 147
1a210878
CL
148 mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
149 mmu_patch_cmp_limit(&patch__fixupdar_linmem_top, mapped);
a372acfa
CL
150
151 /* If the size of RAM is not an exact power of two, we may not
152 * have covered RAM in its entirety with 8 MiB
153 * pages. Consequently, restrict the top end of RAM currently
154 * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
155 * coverage with normal-sized pages (or other reasons) do not
156 * attempt to allocate outside the allowed range.
157 */
bb7f3808
CL
158 if (mapped)
159 memblock_set_current_limit(mapped);
a372acfa 160
eef784bb
CL
161 block_mapped_ram = mapped;
162
a372acfa
CL
163 return mapped;
164}
516d9189 165
d5f17ee9
CL
166void mmu_mark_initmem_nx(void)
167{
168 if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23)
169 mmu_patch_addis(&patch__itlbmiss_linmem_top8,
170 -((long)_etext & ~(LARGE_PAGE_SIZE_8M - 1)));
a2227a27
CL
171 if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) {
172 unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
173 unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
174 unsigned long etext = __pa(_etext);
175
d5f17ee9 176 mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, __pa(_etext));
a2227a27
CL
177
178 /* Update page tables for PTDUMP and BDI */
179 mmu_mapin_ram_chunk(0, einittext8, __pgprot(0));
180 if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
181 mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_TEXT);
182 mmu_mapin_ram_chunk(etext, einittext8, PAGE_KERNEL);
183 } else {
184 mmu_mapin_ram_chunk(0, etext8, PAGE_KERNEL_TEXT);
185 mmu_mapin_ram_chunk(etext8, einittext8, PAGE_KERNEL);
186 }
187 }
b61c38ba 188 _tlbil_all();
d5f17ee9
CL
189}
190
191#ifdef CONFIG_STRICT_KERNEL_RWX
192void mmu_mark_rodata_ro(void)
193{
a2227a27
CL
194 unsigned long sinittext = __pa(_sinittext);
195 unsigned long etext = __pa(_etext);
196
d5f17ee9
CL
197 if (CONFIG_DATA_SHIFT < 23)
198 mmu_patch_addis(&patch__dtlbmiss_romem_top8,
199 -__pa(((unsigned long)_sinittext) &
200 ~(LARGE_PAGE_SIZE_8M - 1)));
201 mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext));
a2227a27 202
b61c38ba
CL
203 _tlbil_all();
204
a2227a27
CL
205 /* Update page tables for PTDUMP and BDI */
206 mmu_mapin_ram_chunk(0, sinittext, __pgprot(0));
207 mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_ROX);
208 mmu_mapin_ram_chunk(etext, sinittext, PAGE_KERNEL_RO);
d5f17ee9
CL
209}
210#endif
211
346bcc4d
CL
212void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
213 phys_addr_t first_memblock_size)
516d9189
CL
214{
215 /* We don't currently support the first MEMBLOCK not mapping 0
216 * physical on those processors
217 */
218 BUG_ON(first_memblock_base != 0);
219
e4470bd6
CL
220 /* 8xx can only access 32MB at the moment */
221 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x02000000));
516d9189 222}
a7761fe4
CL
223
224/*
225 * Set up to use a given MMU context.
226 * id is context number, pgd is PGD pointer.
227 *
228 * We place the physical address of the new task page directory loaded
229 * into the MMU base register, and set the ASID compare register with
230 * the new "context."
231 */
232void set_context(unsigned long id, pgd_t *pgd)
233{
234 s16 offset = (s16)(__pa(swapper_pg_dir));
235
a7761fe4
CL
236 /* Context switch the PTE pointer for the Abatron BDI2000.
237 * The PGDIR is passed as second argument.
238 */
40058337
CL
239 if (IS_ENABLED(CONFIG_BDI_SWITCH))
240 abatron_pteptrs[1] = pgd;
a7761fe4 241
6a8f911b 242 /* Register M_TWB will contain base address of level 1 table minus the
a7761fe4
CL
243 * lower part of the kernel PGDIR base address, so that all accesses to
244 * level 1 table are done relative to lower part of kernel PGDIR base
245 * address.
246 */
6a8f911b 247 mtspr(SPRN_M_TWB, __pa(pgd) - offset);
a7761fe4
CL
248
249 /* Update context */
aa0ab02b 250 mtspr(SPRN_M_CASID, id - 1);
a7761fe4
CL
251 /* sync */
252 mb();
253}
766d45cb
CL
254
255void flush_instruction_cache(void)
256{
257 isync();
258 mtspr(SPRN_IC_CST, IDC_INVALL);
259 isync();
260}
06fbe81b
CL
261
262#ifdef CONFIG_PPC_KUEP
263void __init setup_kuep(bool disabled)
264{
265 if (disabled)
266 return;
267
268 pr_info("Activating Kernel Userspace Execution Prevention\n");
269
270 mtspr(SPRN_MI_AP, MI_APG_KUEP);
271}
272#endif
2679f9bd
CL
273
274#ifdef CONFIG_PPC_KUAP
275void __init setup_kuap(bool disabled)
276{
277 pr_info("Activating Kernel Userspace Access Protection\n");
278
279 if (disabled)
280 pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n");
281
282 mtspr(SPRN_MD_AP, MD_APG_KUAP);
283}
284#endif