]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * linux/arch/cris/arch-v10/mm/init.c | |
4 | * | |
5 | */ | |
1da177e4 LT |
6 | #include <linux/mmzone.h> |
7 | #include <linux/init.h> | |
8 | #include <linux/bootmem.h> | |
9 | #include <linux/mm.h> | |
10 | #include <asm/pgtable.h> | |
11 | #include <asm/page.h> | |
12 | #include <asm/types.h> | |
13 | #include <asm/mmu.h> | |
14 | #include <asm/io.h> | |
15 | #include <asm/mmu_context.h> | |
556dcee7 | 16 | #include <arch/svinto.h> |
1da177e4 LT |
17 | |
18 | extern void tlb_init(void); | |
19 | ||
20 | /* | |
21 | * The kernel is already mapped with a kernel segment at kseg_c so | |
22 | * we don't need to map it with a page table. However head.S also | |
23 | * temporarily mapped it at kseg_4 so we should set up the ksegs again, | |
24 | * clear the TLB and do some other paging setup stuff. | |
25 | */ | |
26 | ||
27 | void __init | |
28 | paging_init(void) | |
29 | { | |
30 | int i; | |
31 | unsigned long zones_size[MAX_NR_ZONES]; | |
32 | ||
33 | printk("Setting up paging and the MMU.\n"); | |
34 | ||
35 | /* clear out the init_mm.pgd that will contain the kernel's mappings */ | |
36 | ||
37 | for(i = 0; i < PTRS_PER_PGD; i++) | |
38 | swapper_pg_dir[i] = __pgd(0); | |
39 | ||
40 | /* make sure the current pgd table points to something sane | |
41 | * (even if it is most probably not used until the next | |
42 | * switch_mm) | |
43 | */ | |
44 | ||
8d20a541 | 45 | per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd; |
1da177e4 LT |
46 | |
47 | /* initialise the TLB (tlb.c) */ | |
48 | ||
49 | tlb_init(); | |
50 | ||
51 | /* see README.mm for details on the KSEG setup */ | |
52 | ||
53 | #ifdef CONFIG_CRIS_LOW_MAP | |
54 | /* Etrax-100 LX version 1 has a bug so that we cannot map anything | |
55 | * across the 0x80000000 boundary, so we need to shrink the user-virtual | |
56 | * area to 0x50000000 instead of 0xb0000000 and map things slightly | |
57 | * different. The unused areas are marked as paged so that we can catch | |
58 | * freak kernel accesses there. | |
59 | * | |
60 | * The ARTPEC chip is mapped at 0xa so we pass that segment straight | |
61 | * through. We cannot vremap it because the vmalloc area is below 0x8 | |
62 | * and Juliette needs an uncached area above 0x8. | |
63 | * | |
64 | * Same thing with 0xc and 0x9, which is memory-mapped I/O on some boards. | |
65 | * We map them straight over in LOW_MAP, but use vremap in LX version 2. | |
66 | */ | |
67 | ||
68 | #define CACHED_BOOTROM (KSEG_F | 0x08000000UL) | |
69 | ||
70 | *R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg ) | /* bootrom */ | |
71 | IO_STATE(R_MMU_KSEG, seg_e, page ) | | |
e301a08b JN |
72 | IO_STATE(R_MMU_KSEG, seg_d, page ) | |
73 | IO_STATE(R_MMU_KSEG, seg_c, page ) | | |
1da177e4 | 74 | IO_STATE(R_MMU_KSEG, seg_b, seg ) | /* kernel reg area */ |
1da177e4 | 75 | IO_STATE(R_MMU_KSEG, seg_a, page ) | |
1da177e4 LT |
76 | IO_STATE(R_MMU_KSEG, seg_9, seg ) | /* LED's on some boards */ |
77 | IO_STATE(R_MMU_KSEG, seg_8, seg ) | /* CSE0/1, flash and I/O */ | |
78 | IO_STATE(R_MMU_KSEG, seg_7, page ) | /* kernel vmalloc area */ | |
79 | IO_STATE(R_MMU_KSEG, seg_6, seg ) | /* kernel DRAM area */ | |
80 | IO_STATE(R_MMU_KSEG, seg_5, seg ) | /* cached flash */ | |
81 | IO_STATE(R_MMU_KSEG, seg_4, page ) | /* user area */ | |
82 | IO_STATE(R_MMU_KSEG, seg_3, page ) | /* user area */ | |
83 | IO_STATE(R_MMU_KSEG, seg_2, page ) | /* user area */ | |
84 | IO_STATE(R_MMU_KSEG, seg_1, page ) | /* user area */ | |
85 | IO_STATE(R_MMU_KSEG, seg_0, page ) ); /* user area */ | |
86 | ||
87 | *R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x3 ) | | |
88 | IO_FIELD(R_MMU_KBASE_HI, base_e, 0x0 ) | | |
89 | IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) | | |
90 | IO_FIELD(R_MMU_KBASE_HI, base_c, 0x0 ) | | |
91 | IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) | | |
1da177e4 | 92 | IO_FIELD(R_MMU_KBASE_HI, base_a, 0x0 ) | |
1da177e4 LT |
93 | IO_FIELD(R_MMU_KBASE_HI, base_9, 0x9 ) | |
94 | IO_FIELD(R_MMU_KBASE_HI, base_8, 0x8 ) ); | |
e301a08b | 95 | |
1da177e4 LT |
96 | *R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) | |
97 | IO_FIELD(R_MMU_KBASE_LO, base_6, 0x4 ) | | |
98 | IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) | | |
99 | IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) | | |
100 | IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) | | |
101 | IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) | | |
102 | IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) | | |
103 | IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) ); | |
104 | #else | |
105 | /* This code is for the corrected Etrax-100 LX version 2... */ | |
106 | ||
107 | #define CACHED_BOOTROM (KSEG_A | 0x08000000UL) | |
108 | ||
109 | *R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg ) | /* cached flash */ | |
110 | IO_STATE(R_MMU_KSEG, seg_e, seg ) | /* uncached flash */ | |
111 | IO_STATE(R_MMU_KSEG, seg_d, page ) | /* vmalloc area */ | |
112 | IO_STATE(R_MMU_KSEG, seg_c, seg ) | /* kernel area */ | |
113 | IO_STATE(R_MMU_KSEG, seg_b, seg ) | /* kernel reg area */ | |
114 | IO_STATE(R_MMU_KSEG, seg_a, seg ) | /* bootrom */ | |
115 | IO_STATE(R_MMU_KSEG, seg_9, page ) | /* user area */ | |
116 | IO_STATE(R_MMU_KSEG, seg_8, page ) | | |
117 | IO_STATE(R_MMU_KSEG, seg_7, page ) | | |
118 | IO_STATE(R_MMU_KSEG, seg_6, page ) | | |
119 | IO_STATE(R_MMU_KSEG, seg_5, page ) | | |
120 | IO_STATE(R_MMU_KSEG, seg_4, page ) | | |
121 | IO_STATE(R_MMU_KSEG, seg_3, page ) | | |
122 | IO_STATE(R_MMU_KSEG, seg_2, page ) | | |
123 | IO_STATE(R_MMU_KSEG, seg_1, page ) | | |
124 | IO_STATE(R_MMU_KSEG, seg_0, page ) ); | |
125 | ||
126 | *R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x0 ) | | |
127 | IO_FIELD(R_MMU_KBASE_HI, base_e, 0x8 ) | | |
128 | IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) | | |
129 | IO_FIELD(R_MMU_KBASE_HI, base_c, 0x4 ) | | |
130 | IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) | | |
131 | IO_FIELD(R_MMU_KBASE_HI, base_a, 0x3 ) | | |
132 | IO_FIELD(R_MMU_KBASE_HI, base_9, 0x0 ) | | |
133 | IO_FIELD(R_MMU_KBASE_HI, base_8, 0x0 ) ); | |
134 | ||
135 | *R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) | | |
136 | IO_FIELD(R_MMU_KBASE_LO, base_6, 0x0 ) | | |
137 | IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) | | |
138 | IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) | | |
139 | IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) | | |
140 | IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) | | |
141 | IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) | | |
142 | IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) ); | |
143 | #endif | |
144 | ||
145 | *R_MMU_CONTEXT = ( IO_FIELD(R_MMU_CONTEXT, page_id, 0 ) ); | |
146 | ||
147 | /* The MMU has been enabled ever since head.S but just to make | |
148 | * it totally obvious we do it here as well. | |
149 | */ | |
150 | ||
151 | *R_MMU_CTRL = ( IO_STATE(R_MMU_CTRL, inv_excp, enable ) | | |
152 | IO_STATE(R_MMU_CTRL, acc_excp, enable ) | | |
153 | IO_STATE(R_MMU_CTRL, we_excp, enable ) ); | |
154 | ||
155 | *R_MMU_ENABLE = IO_STATE(R_MMU_ENABLE, mmu_enable, enable); | |
156 | ||
157 | /* | |
158 | * initialize the bad page table and bad page to point | |
159 | * to a couple of allocated pages | |
160 | */ | |
161 | ||
162 | empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); | |
163 | memset((void *)empty_zero_page, 0, PAGE_SIZE); | |
164 | ||
165 | /* All pages are DMA'able in Etrax, so put all in the DMA'able zone */ | |
166 | ||
167 | zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT; | |
168 | ||
169 | for (i = 1; i < MAX_NR_ZONES; i++) | |
170 | zones_size[i] = 0; | |
171 | ||
172 | /* Use free_area_init_node instead of free_area_init, because the former | |
173 | * is designed for systems where the DRAM starts at an address substantially | |
174 | * higher than 0, like us (we start at PAGE_OFFSET). This saves space in the | |
175 | * mem_map page array. | |
176 | */ | |
177 | ||
9109fb7b | 178 | free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0); |
1da177e4 LT |
179 | } |
180 | ||
181 | /* Initialize remaps of some I/O-ports. It is important that this | |
182 | * is called before any driver is initialized. | |
183 | */ | |
184 | ||
185 | static int | |
186 | __init init_ioremap(void) | |
187 | { | |
188 | ||
189 | /* Give the external I/O-port addresses their values */ | |
190 | ||
191 | #ifdef CONFIG_CRIS_LOW_MAP | |
192 | /* Simply a linear map (see the KSEG map above in paging_init) */ | |
193 | port_cse1_addr = (volatile unsigned long *)(MEM_CSE1_START | | |
194 | MEM_NON_CACHEABLE); | |
195 | port_csp0_addr = (volatile unsigned long *)(MEM_CSP0_START | | |
196 | MEM_NON_CACHEABLE); | |
197 | port_csp4_addr = (volatile unsigned long *)(MEM_CSP4_START | | |
198 | MEM_NON_CACHEABLE); | |
199 | #else | |
200 | /* Note that nothing blows up just because we do this remapping | |
201 | * it's ok even if the ports are not used or connected | |
202 | * to anything (or connected to a non-I/O thing) */ | |
203 | port_cse1_addr = (volatile unsigned long *) | |
204 | ioremap((unsigned long)(MEM_CSE1_START | MEM_NON_CACHEABLE), 16); | |
205 | port_csp0_addr = (volatile unsigned long *) | |
206 | ioremap((unsigned long)(MEM_CSP0_START | MEM_NON_CACHEABLE), 16); | |
207 | port_csp4_addr = (volatile unsigned long *) | |
208 | ioremap((unsigned long)(MEM_CSP4_START | MEM_NON_CACHEABLE), 16); | |
209 | #endif | |
210 | return 0; | |
211 | } | |
212 | ||
213 | __initcall(init_ioremap); | |
214 | ||
215 | /* Helper function for the two below */ | |
216 | ||
217 | static inline void | |
218 | flush_etrax_cacherange(void *startadr, int length) | |
219 | { | |
220 | /* CACHED_BOOTROM is mapped to the boot-rom area (cached) which | |
221 | * we can use to get fast dummy-reads of cachelines | |
222 | */ | |
223 | ||
224 | volatile short *flushadr = (volatile short *)(((unsigned long)startadr & ~PAGE_MASK) | | |
225 | CACHED_BOOTROM); | |
226 | ||
227 | length = length > 8192 ? 8192 : length; /* No need to flush more than cache size */ | |
228 | ||
229 | while(length > 0) { | |
230 | *flushadr; /* dummy read to flush */ | |
231 | flushadr += (32/sizeof(short)); /* a cacheline is 32 bytes */ | |
232 | length -= 32; | |
233 | } | |
234 | } | |
235 | ||
236 | /* Due to a bug in Etrax100(LX) all versions, receiving DMA buffers | |
c23cf8ba | 237 | * will occasionally corrupt certain CPU writes if the DMA buffers |
1da177e4 LT |
238 | * happen to be hot in the cache. |
239 | * | |
240 | * As a workaround, we have to flush the relevant parts of the cache | |
241 | * before (re) inserting any receiving descriptor into the DMA HW. | |
242 | */ | |
243 | ||
244 | void | |
245 | prepare_rx_descriptor(struct etrax_dma_descr *desc) | |
246 | { | |
247 | flush_etrax_cacherange((void *)desc->buf, desc->sw_len ? desc->sw_len : 65536); | |
248 | } | |
249 | ||
250 | /* Do the same thing but flush the entire cache */ | |
251 | ||
252 | void | |
253 | flush_etrax_cache(void) | |
254 | { | |
255 | flush_etrax_cacherange(0, 8192); | |
256 | } |