]> git.ipfire.org Git - people/arne_f/kernel.git/blame - arch/sparc/kernel/ktlb.S
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[people/arne_f/kernel.git] / arch / sparc / kernel / ktlb.S
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
2a7e2990
DM
2/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
3 *
bf4a7972 4 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
2a7e2990
DM
5 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
74bf4312 8 */
2a7e2990 9
2a7e2990
DM
10#include <asm/head.h>
11#include <asm/asi.h>
12#include <asm/page.h>
13#include <asm/pgtable.h>
74bf4312 14#include <asm/tsb.h>
2a7e2990
DM
15
16 .text
17 .align 32
18
74bf4312
DM
19kvmap_itlb:
20 /* g6: TAG TARGET */
21 mov TLB_TAG_ACCESS, %g4
22 ldxa [%g4] ASI_IMMU, %g4
23
4f6deb8c
DM
24 /* The kernel executes in context zero, therefore we do not
25 * need to clear the context ID bits out of %g4 here.
26 */
27
d257d5da
DM
28 /* sun4v_itlb_miss branches here with the missing virtual
29 * address already loaded into %g4
30 */
31kvmap_itlb_4v:
32
74bf4312
DM
33 /* Catch kernel NULL pointer calls. */
34 sethi %hi(PAGE_SIZE), %g5
35 cmp %g4, %g5
1c2696cd 36 blu,pn %xcc, kvmap_itlb_longpath
74bf4312
DM
37 nop
38
39 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
40
41kvmap_itlb_tsb_miss:
2a7e2990
DM
42 sethi %hi(LOW_OBP_ADDRESS), %g5
43 cmp %g4, %g5
74bf4312 44 blu,pn %xcc, kvmap_itlb_vmalloc_addr
2a7e2990
DM
45 mov 0x1, %g5
46 sllx %g5, 32, %g5
47 cmp %g4, %g5
74bf4312 48 blu,pn %xcc, kvmap_itlb_obp
2a7e2990
DM
49 nop
50
74bf4312
DM
51kvmap_itlb_vmalloc_addr:
52 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
53
9076d0e7 54 TSB_LOCK_TAG(%g1, %g2, %g7)
9076d0e7 55 TSB_WRITE(%g1, %g5, %g6)
74bf4312
DM
56
57 /* fallthrough to TLB load */
58
59kvmap_itlb_load:
459b6e62
DM
60
61661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
2a7e2990 62 retry
459b6e62
DM
63 .section .sun4v_2insn_patch, "ax"
64 .word 661b
65 nop
66 nop
67 .previous
68
69 /* For sun4v the ASI_ITLB_DATA_IN store and the retry
70 * instruction get nop'd out and we get here to branch
71 * to the sun4v tlb load code. The registers are setup
72 * as follows:
73 *
74 * %g4: vaddr
75 * %g5: PTE
76 * %g6: TAG
77 *
78 * The sun4v TLB load wants the PTE in %g3 so we fix that
79 * up here.
80 */
81 ba,pt %xcc, sun4v_itlb_load
82 mov %g5, %g3
2a7e2990 83
74bf4312 84kvmap_itlb_longpath:
45fec05f
DM
85
86661: rdpr %pstate, %g5
74bf4312 87 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
df7d6aec 88 .section .sun4v_2insn_patch, "ax"
45fec05f 89 .word 661b
6cc200db 90 SET_GL(1)
45fec05f
DM
91 nop
92 .previous
93
74bf4312
DM
94 rdpr %tpc, %g5
95 ba,pt %xcc, sparc64_realfault_common
96 mov FAULT_CODE_ITLB, %g4
97
98kvmap_itlb_obp:
99 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
100
9076d0e7 101 TSB_LOCK_TAG(%g1, %g2, %g7)
74bf4312 102
9076d0e7 103 TSB_WRITE(%g1, %g5, %g6)
74bf4312
DM
104
105 ba,pt %xcc, kvmap_itlb_load
106 nop
107
108kvmap_dtlb_obp:
109 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
110
9076d0e7 111 TSB_LOCK_TAG(%g1, %g2, %g7)
74bf4312 112
9076d0e7 113 TSB_WRITE(%g1, %g5, %g6)
74bf4312
DM
114
115 ba,pt %xcc, kvmap_dtlb_load
116 nop
c9c10830 117
0dd5b7b0
DM
118kvmap_linear_early:
119 sethi %hi(kern_linear_pte_xor), %g7
120 ldx [%g7 + %lo(kern_linear_pte_xor)], %g2
121 ba,pt %xcc, kvmap_dtlb_tsb4m_load
122 xor %g2, %g4, %g5
123
2a7e2990 124 .align 32
d7744a09 125kvmap_dtlb_tsb4m_load:
9076d0e7
DM
126 TSB_LOCK_TAG(%g1, %g2, %g7)
127 TSB_WRITE(%g1, %g5, %g6)
d7744a09
DM
128 ba,pt %xcc, kvmap_dtlb_load
129 nop
130
74bf4312
DM
131kvmap_dtlb:
132 /* %g6: TAG TARGET */
133 mov TLB_TAG_ACCESS, %g4
134 ldxa [%g4] ASI_DMMU, %g4
d257d5da 135
4f6deb8c
DM
136 /* The kernel executes in context zero, therefore we do not
137 * need to clear the context ID bits out of %g4 here.
138 */
139
d257d5da
DM
140 /* sun4v_dtlb_miss branches here with the missing virtual
141 * address already loaded into %g4
142 */
143kvmap_dtlb_4v:
74bf4312 144 brgez,pn %g4, kvmap_dtlb_nonlinear
56425306
DM
145 nop
146
d1acb421
DM
147#ifdef CONFIG_DEBUG_PAGEALLOC
148 /* Index through the base page size TSB even for linear
149 * mappings when using page allocation debugging.
150 */
151 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
152#else
d7744a09
DM
153 /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
154 KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
d1acb421 155#endif
0dd5b7b0
DM
156 /* Linear mapping TSB lookup failed. Fallthrough to kernel
157 * page table based lookup.
9cc3a1ac 158 */
56425306
DM
159 .globl kvmap_linear_patch
160kvmap_linear_patch:
0dd5b7b0 161 ba,a,pt %xcc, kvmap_linear_early
2a7e2990 162
74bf4312
DM
163kvmap_dtlb_vmalloc_addr:
164 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
165
9076d0e7 166 TSB_LOCK_TAG(%g1, %g2, %g7)
9076d0e7 167 TSB_WRITE(%g1, %g5, %g6)
74bf4312
DM
168
169 /* fallthrough to TLB load */
170
171kvmap_dtlb_load:
459b6e62
DM
172
173661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
74bf4312 174 retry
459b6e62
DM
175 .section .sun4v_2insn_patch, "ax"
176 .word 661b
177 nop
178 nop
179 .previous
180
181 /* For sun4v the ASI_DTLB_DATA_IN store and the retry
182 * instruction get nop'd out and we get here to branch
183 * to the sun4v tlb load code. The registers are setup
184 * as follows:
185 *
186 * %g4: vaddr
187 * %g5: PTE
188 * %g6: TAG
189 *
190 * The sun4v TLB load wants the PTE in %g3 so we fix that
191 * up here.
192 */
193 ba,pt %xcc, sun4v_dtlb_load
194 mov %g5, %g3
74bf4312 195
bf4a7972 196#ifdef CONFIG_SPARSEMEM_VMEMMAP
46644c24 197kvmap_vmemmap:
c06240c7
DM
198 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
199 ba,a,pt %xcc, kvmap_dtlb_load
bf4a7972 200#endif
46644c24 201
74bf4312
DM
202kvmap_dtlb_nonlinear:
203 /* Catch kernel NULL pointer derefs. */
204 sethi %hi(PAGE_SIZE), %g5
205 cmp %g4, %g5
206 bleu,pn %xcc, kvmap_dtlb_longpath
56425306 207 nop
56425306 208
bf4a7972 209#ifdef CONFIG_SPARSEMEM_VMEMMAP
46644c24 210 /* Do not use the TSB for vmemmap. */
bb4e6e85
DM
211 sethi %hi(VMEMMAP_BASE), %g5
212 ldx [%g5 + %lo(VMEMMAP_BASE)], %g5
46644c24
DM
213 cmp %g4,%g5
214 bgeu,pn %xcc, kvmap_vmemmap
215 nop
bf4a7972 216#endif
46644c24 217
74bf4312
DM
218 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
219
220kvmap_dtlb_tsbmiss:
2a7e2990
DM
221 sethi %hi(MODULES_VADDR), %g5
222 cmp %g4, %g5
74bf4312 223 blu,pn %xcc, kvmap_dtlb_longpath
bb4e6e85
DM
224 sethi %hi(VMALLOC_END), %g5
225 ldx [%g5 + %lo(VMALLOC_END)], %g5
2a7e2990 226 cmp %g4, %g5
74bf4312 227 bgeu,pn %xcc, kvmap_dtlb_longpath
2a7e2990
DM
228 nop
229
230kvmap_check_obp:
231 sethi %hi(LOW_OBP_ADDRESS), %g5
232 cmp %g4, %g5
74bf4312 233 blu,pn %xcc, kvmap_dtlb_vmalloc_addr
2a7e2990
DM
234 mov 0x1, %g5
235 sllx %g5, 32, %g5
236 cmp %g4, %g5
74bf4312 237 blu,pn %xcc, kvmap_dtlb_obp
2a7e2990 238 nop
74bf4312 239 ba,pt %xcc, kvmap_dtlb_vmalloc_addr
2a7e2990
DM
240 nop
241
74bf4312 242kvmap_dtlb_longpath:
45fec05f
DM
243
244661: rdpr %pstate, %g5
74bf4312 245 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
df7d6aec 246 .section .sun4v_2insn_patch, "ax"
45fec05f 247 .word 661b
8b234274
DM
248 SET_GL(1)
249 ldxa [%g0] ASI_SCRATCHPAD, %g5
45fec05f
DM
250 .previous
251
459b6e62
DM
252 rdpr %tl, %g3
253 cmp %g3, 1
254
255661: mov TLB_TAG_ACCESS, %g4
74bf4312 256 ldxa [%g4] ASI_DMMU, %g5
459b6e62
DM
257 .section .sun4v_2insn_patch, "ax"
258 .word 661b
8b234274 259 ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
459b6e62
DM
260 nop
261 .previous
262
4f6deb8c
DM
263 /* The kernel executes in context zero, therefore we do not
264 * need to clear the context ID bits out of %g5 here.
265 */
266
74bf4312
DM
267 be,pt %xcc, sparc64_realfault_common
268 mov FAULT_CODE_DTLB, %g4
269 ba,pt %xcc, winfix_trampoline
270 nop