]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - arch/m68k/mm/mcfmmu.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[thirdparty/kernel/linux.git] / arch / m68k / mm / mcfmmu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Based upon linux/arch/m68k/mm/sun3mmu.c
4 * Based upon linux/arch/ppc/mm/mmu_context.c
5 *
6 * Implementations of mm routines specific to the Coldfire MMU.
7 *
8 * Copyright (c) 2008 Freescale Semiconductor, Inc.
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/string.h>
16 #include <linux/bootmem.h>
17
18 #include <asm/setup.h>
19 #include <asm/page.h>
20 #include <asm/pgtable.h>
21 #include <asm/mmu_context.h>
22 #include <asm/mcf_pgalloc.h>
23 #include <asm/tlbflush.h>
24
25 #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
26
27 mm_context_t next_mmu_context;
28 unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
29 atomic_t nr_free_contexts;
30 struct mm_struct *context_mm[LAST_CONTEXT+1];
31 unsigned long num_pages;
32
33 /*
34 * ColdFire paging_init derived from sun3.
35 */
36 void __init paging_init(void)
37 {
38 pgd_t *pg_dir;
39 pte_t *pg_table;
40 unsigned long address, size;
41 unsigned long next_pgtable, bootmem_end;
42 unsigned long zones_size[MAX_NR_ZONES];
43 enum zone_type zone;
44 int i;
45
46 empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
47 memset((void *) empty_zero_page, 0, PAGE_SIZE);
48
49 pg_dir = swapper_pg_dir;
50 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
51
52 size = num_pages * sizeof(pte_t);
53 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
54 next_pgtable = (unsigned long) alloc_bootmem_pages(size);
55
56 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
57 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
58
59 address = PAGE_OFFSET;
60 while (address < (unsigned long)high_memory) {
61 pg_table = (pte_t *) next_pgtable;
62 next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
63 pgd_val(*pg_dir) = (unsigned long) pg_table;
64 pg_dir++;
65
66 /* now change pg_table to kernel virtual addresses */
67 for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
68 pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
69 if (address >= (unsigned long) high_memory)
70 pte_val(pte) = 0;
71
72 set_pte(pg_table, pte);
73 address += PAGE_SIZE;
74 }
75 }
76
77 current->mm = NULL;
78
79 for (zone = 0; zone < MAX_NR_ZONES; zone++)
80 zones_size[zone] = 0x0;
81 zones_size[ZONE_DMA] = num_pages;
82 free_area_init(zones_size);
83 }
84
85 int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
86 {
87 unsigned long flags, mmuar, mmutr;
88 struct mm_struct *mm;
89 pgd_t *pgd;
90 pmd_t *pmd;
91 pte_t *pte;
92 int asid;
93
94 local_irq_save(flags);
95
96 mmuar = (dtlb) ? mmu_read(MMUAR) :
97 regs->pc + (extension_word * sizeof(long));
98
99 mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
100 if (!mm) {
101 local_irq_restore(flags);
102 return -1;
103 }
104
105 pgd = pgd_offset(mm, mmuar);
106 if (pgd_none(*pgd)) {
107 local_irq_restore(flags);
108 return -1;
109 }
110
111 pmd = pmd_offset(pgd, mmuar);
112 if (pmd_none(*pmd)) {
113 local_irq_restore(flags);
114 return -1;
115 }
116
117 pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
118 : pte_offset_map(pmd, mmuar);
119 if (pte_none(*pte) || !pte_present(*pte)) {
120 local_irq_restore(flags);
121 return -1;
122 }
123
124 if (write) {
125 if (!pte_write(*pte)) {
126 local_irq_restore(flags);
127 return -1;
128 }
129 set_pte(pte, pte_mkdirty(*pte));
130 }
131
132 set_pte(pte, pte_mkyoung(*pte));
133 asid = mm->context & 0xff;
134 if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
135 set_pte(pte, pte_wrprotect(*pte));
136
137 mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
138 if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
139 mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
140 mmu_write(MMUTR, mmutr);
141
142 mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
143 ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
144
145 if (dtlb)
146 mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
147 else
148 mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
149
150 local_irq_restore(flags);
151 return 0;
152 }
153
154 void __init cf_bootmem_alloc(void)
155 {
156 unsigned long start_pfn;
157 unsigned long memstart;
158
159 /* _rambase and _ramend will be naturally page aligned */
160 m68k_memory[0].addr = _rambase;
161 m68k_memory[0].size = _ramend - _rambase;
162
163 /* compute total pages in system */
164 num_pages = PFN_DOWN(_ramend - _rambase);
165
166 /* page numbers */
167 memstart = PAGE_ALIGN(_ramstart);
168 min_low_pfn = PFN_DOWN(_rambase);
169 start_pfn = PFN_DOWN(memstart);
170 max_pfn = max_low_pfn = PFN_DOWN(_ramend);
171 high_memory = (void *)_ramend;
172
173 m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
174 module_fixup(NULL, __start_fixup, __stop_fixup);
175
176 /* setup bootmem data */
177 m68k_setup_node(0);
178 memstart += init_bootmem_node(NODE_DATA(0), start_pfn,
179 min_low_pfn, max_low_pfn);
180 free_bootmem_node(NODE_DATA(0), memstart, _ramend - memstart);
181 }
182
183 /*
184 * Initialize the context management stuff.
185 * The following was taken from arch/ppc/mmu_context.c
186 */
187 void __init mmu_context_init(void)
188 {
189 /*
190 * Some processors have too few contexts to reserve one for
191 * init_mm, and require using context 0 for a normal task.
192 * Other processors reserve the use of context zero for the kernel.
193 * This code assumes FIRST_CONTEXT < 32.
194 */
195 context_map[0] = (1 << FIRST_CONTEXT) - 1;
196 next_mmu_context = FIRST_CONTEXT;
197 atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
198 }
199
200 /*
201 * Steal a context from a task that has one at the moment.
202 * This is only used on 8xx and 4xx and we presently assume that
203 * they don't do SMP. If they do then thicfpgalloc.hs will have to check
204 * whether the MM we steal is in use.
205 * We also assume that this is only used on systems that don't
206 * use an MMU hash table - this is true for 8xx and 4xx.
207 * This isn't an LRU system, it just frees up each context in
208 * turn (sort-of pseudo-random replacement :). This would be the
209 * place to implement an LRU scheme if anyone was motivated to do it.
210 * -- paulus
211 */
212 void steal_context(void)
213 {
214 struct mm_struct *mm;
215 /*
216 * free up context `next_mmu_context'
217 * if we shouldn't free context 0, don't...
218 */
219 if (next_mmu_context < FIRST_CONTEXT)
220 next_mmu_context = FIRST_CONTEXT;
221 mm = context_mm[next_mmu_context];
222 flush_tlb_mm(mm);
223 destroy_context(mm);
224 }
225