]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - arch/parisc/kernel/pacache.S
Linux 4.15
[thirdparty/kernel/stable.git] / arch / parisc / kernel / pacache.S
CommitLineData
1da177e4
LT
1/*
2 * PARISC TLB and cache flushing support
3 * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
4 * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
5 * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22/*
23 * NOTE: fdc,fic, and pdc instructions that use base register modification
24 * should only use index and base registers that are not shadowed,
25 * so that the fast path emulation in the non access miss handler
26 * can be used.
27 */
28
413059f2 29#ifdef CONFIG_64BIT
1da177e4
LT
30 .level 2.0w
31#else
1da177e4
LT
32 .level 2.0
33#endif
34
1da177e4 35#include <asm/psw.h>
896a3756 36#include <asm/assembly.h>
1da177e4
LT
37#include <asm/pgtable.h>
38#include <asm/cache.h>
88776c0e 39#include <asm/ldcw.h>
8e9e9844 40#include <linux/linkage.h>
1da177e4 41
dfcf753b 42 .text
1da177e4
LT
43 .align 128
44
f39cce65 45ENTRY_CFI(flush_tlb_all_local)
1da177e4
LT
46 .proc
47 .callinfo NO_CALLS
48 .entry
49
50 /*
51 * The pitlbe and pdtlbe instructions should only be used to
52 * flush the entire tlb. Also, there needs to be no intervening
53 * tlb operations, e.g. tlb misses, so the operation needs
54 * to happen in real mode with all interruptions disabled.
55 */
56
896a3756 57 /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
2fd83038 58 rsm PSW_SM_I, %r19 /* save I-bit state */
896a3756 59 load32 PA(1f), %r1
1da177e4
LT
60 nop
61 nop
62 nop
63 nop
64 nop
896a3756
GG
65
66 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1da177e4
LT
67 mtctl %r0, %cr17 /* Clear IIASQ tail */
68 mtctl %r0, %cr17 /* Clear IIASQ head */
1da177e4
LT
69 mtctl %r1, %cr18 /* IIAOQ head */
70 ldo 4(%r1), %r1
71 mtctl %r1, %cr18 /* IIAOQ tail */
896a3756
GG
72 load32 REAL_MODE_PSW, %r1
73 mtctl %r1, %ipsw
1da177e4
LT
74 rfi
75 nop
76
2fd83038 771: load32 PA(cache_info), %r1
1da177e4
LT
78
79 /* Flush Instruction Tlb */
80
81 LDREG ITLB_SID_BASE(%r1), %r20
82 LDREG ITLB_SID_STRIDE(%r1), %r21
83 LDREG ITLB_SID_COUNT(%r1), %r22
84 LDREG ITLB_OFF_BASE(%r1), %arg0
85 LDREG ITLB_OFF_STRIDE(%r1), %arg1
86 LDREG ITLB_OFF_COUNT(%r1), %arg2
87 LDREG ITLB_LOOP(%r1), %arg3
88
872f6deb 89 addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */
1da177e4
LT
90 movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
91 copy %arg0, %r28 /* Init base addr */
92
93fitmanyloop: /* Loop if LOOP >= 2 */
94 mtsp %r20, %sr1
95 add %r21, %r20, %r20 /* increment space */
96 copy %arg2, %r29 /* Init middle loop count */
97
98fitmanymiddle: /* Loop if LOOP >= 2 */
872f6deb 99 addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
5035b230 100 pitlbe %r0(%sr1, %r28)
1da177e4 101 pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
872f6deb 102 addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
1da177e4
LT
103 copy %arg3, %r31 /* Re-init inner loop count */
104
105 movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
872f6deb 106 addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */
1da177e4
LT
107
108fitoneloop: /* Loop if LOOP = 1 */
109 mtsp %r20, %sr1
110 copy %arg0, %r28 /* init base addr */
111 copy %arg2, %r29 /* init middle loop count */
112
113fitonemiddle: /* Loop if LOOP = 1 */
872f6deb 114 addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */
1da177e4
LT
115 pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
116
872f6deb 117 addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */
1da177e4
LT
118 add %r21, %r20, %r20 /* increment space */
119
120fitdone:
121
122 /* Flush Data Tlb */
123
124 LDREG DTLB_SID_BASE(%r1), %r20
125 LDREG DTLB_SID_STRIDE(%r1), %r21
126 LDREG DTLB_SID_COUNT(%r1), %r22
127 LDREG DTLB_OFF_BASE(%r1), %arg0
128 LDREG DTLB_OFF_STRIDE(%r1), %arg1
129 LDREG DTLB_OFF_COUNT(%r1), %arg2
130 LDREG DTLB_LOOP(%r1), %arg3
131
872f6deb 132 addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */
1da177e4
LT
133 movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
134 copy %arg0, %r28 /* Init base addr */
135
136fdtmanyloop: /* Loop if LOOP >= 2 */
137 mtsp %r20, %sr1
138 add %r21, %r20, %r20 /* increment space */
139 copy %arg2, %r29 /* Init middle loop count */
140
141fdtmanymiddle: /* Loop if LOOP >= 2 */
872f6deb 142 addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
5035b230 143 pdtlbe %r0(%sr1, %r28)
1da177e4 144 pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
872f6deb 145 addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
1da177e4
LT
146 copy %arg3, %r31 /* Re-init inner loop count */
147
148 movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
872f6deb 149 addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */
1da177e4
LT
150
151fdtoneloop: /* Loop if LOOP = 1 */
152 mtsp %r20, %sr1
153 copy %arg0, %r28 /* init base addr */
154 copy %arg2, %r29 /* init middle loop count */
155
156fdtonemiddle: /* Loop if LOOP = 1 */
872f6deb 157 addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */
1da177e4
LT
158 pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
159
872f6deb 160 addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */
1da177e4
LT
161 add %r21, %r20, %r20 /* increment space */
162
1da177e4 163
896a3756
GG
164fdtdone:
165 /*
166 * Switch back to virtual mode
167 */
168 /* pcxt_ssm_bug */
169 rsm PSW_SM_I, %r0
170 load32 2f, %r1
171 nop
172 nop
173 nop
174 nop
175 nop
1da177e4 176
896a3756 177 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1da177e4
LT
178 mtctl %r0, %cr17 /* Clear IIASQ tail */
179 mtctl %r0, %cr17 /* Clear IIASQ head */
1da177e4
LT
180 mtctl %r1, %cr18 /* IIAOQ head */
181 ldo 4(%r1), %r1
182 mtctl %r1, %cr18 /* IIAOQ tail */
896a3756
GG
183 load32 KERNEL_PSW, %r1
184 or %r1, %r19, %r1 /* I-bit to state on entry */
185 mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
1da177e4
LT
186 rfi
187 nop
188
1892: bv %r0(%r2)
190 nop
1da177e4 191
896a3756 192 .exit
1da177e4 193 .procend
f39cce65 194ENDPROC_CFI(flush_tlb_all_local)
1da177e4 195
1da177e4
LT
196 .import cache_info,data
197
f39cce65 198ENTRY_CFI(flush_instruction_cache_local)
1da177e4
LT
199 .proc
200 .callinfo NO_CALLS
201 .entry
202
2fd83038 203 load32 cache_info, %r1
1da177e4
LT
204
205 /* Flush Instruction Cache */
206
207 LDREG ICACHE_BASE(%r1), %arg0
208 LDREG ICACHE_STRIDE(%r1), %arg1
209 LDREG ICACHE_COUNT(%r1), %arg2
210 LDREG ICACHE_LOOP(%r1), %arg3
6d2ddc2f
JDA
211 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
212 mtsp %r0, %sr1
872f6deb 213 addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
1da177e4
LT
214 movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
215
216fimanyloop: /* Loop if LOOP >= 2 */
872f6deb 217 addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */
9b3b331d 218 fice %r0(%sr1, %arg0)
1da177e4
LT
219 fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
220 movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
872f6deb 221 addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
1da177e4
LT
222
223fioneloop: /* Loop if LOOP = 1 */
6d2ddc2f
JDA
224 /* Some implementations may flush with a single fice instruction */
225 cmpib,COND(>>=),n 15, %arg2, fioneloop2
226
227fioneloop1:
228 fice,m %arg1(%sr1, %arg0)
229 fice,m %arg1(%sr1, %arg0)
230 fice,m %arg1(%sr1, %arg0)
231 fice,m %arg1(%sr1, %arg0)
232 fice,m %arg1(%sr1, %arg0)
233 fice,m %arg1(%sr1, %arg0)
234 fice,m %arg1(%sr1, %arg0)
235 fice,m %arg1(%sr1, %arg0)
236 fice,m %arg1(%sr1, %arg0)
237 fice,m %arg1(%sr1, %arg0)
238 fice,m %arg1(%sr1, %arg0)
239 fice,m %arg1(%sr1, %arg0)
240 fice,m %arg1(%sr1, %arg0)
241 fice,m %arg1(%sr1, %arg0)
242 fice,m %arg1(%sr1, %arg0)
243 addib,COND(>) -16, %arg2, fioneloop1
244 fice,m %arg1(%sr1, %arg0)
245
246 /* Check if done */
247 cmpb,COND(=),n %arg2, %r0, fisync /* Predict branch taken */
248
249fioneloop2:
250 addib,COND(>) -1, %arg2, fioneloop2 /* Outer loop count decr */
1da177e4
LT
251 fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
252
253fisync:
254 sync
896a3756 255 mtsm %r22 /* restore I-bit */
1da177e4
LT
256 bv %r0(%r2)
257 nop
258 .exit
259
260 .procend
f39cce65 261ENDPROC_CFI(flush_instruction_cache_local)
1da177e4 262
1da177e4 263
8e9e9844 264 .import cache_info, data
f39cce65 265ENTRY_CFI(flush_data_cache_local)
1da177e4
LT
266 .proc
267 .callinfo NO_CALLS
268 .entry
269
6d2ddc2f 270 load32 cache_info, %r1
1da177e4
LT
271
272 /* Flush Data Cache */
273
274 LDREG DCACHE_BASE(%r1), %arg0
275 LDREG DCACHE_STRIDE(%r1), %arg1
276 LDREG DCACHE_COUNT(%r1), %arg2
277 LDREG DCACHE_LOOP(%r1), %arg3
6d2ddc2f
JDA
278 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
279 mtsp %r0, %sr1
872f6deb 280 addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
1da177e4
LT
281 movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
282
283fdmanyloop: /* Loop if LOOP >= 2 */
872f6deb 284 addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */
9b3b331d 285 fdce %r0(%sr1, %arg0)
1da177e4
LT
286 fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
287 movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
872f6deb 288 addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
1da177e4
LT
289
290fdoneloop: /* Loop if LOOP = 1 */
6d2ddc2f
JDA
291 /* Some implementations may flush with a single fdce instruction */
292 cmpib,COND(>>=),n 15, %arg2, fdoneloop2
293
294fdoneloop1:
295 fdce,m %arg1(%sr1, %arg0)
296 fdce,m %arg1(%sr1, %arg0)
297 fdce,m %arg1(%sr1, %arg0)
298 fdce,m %arg1(%sr1, %arg0)
299 fdce,m %arg1(%sr1, %arg0)
300 fdce,m %arg1(%sr1, %arg0)
301 fdce,m %arg1(%sr1, %arg0)
302 fdce,m %arg1(%sr1, %arg0)
303 fdce,m %arg1(%sr1, %arg0)
304 fdce,m %arg1(%sr1, %arg0)
305 fdce,m %arg1(%sr1, %arg0)
306 fdce,m %arg1(%sr1, %arg0)
307 fdce,m %arg1(%sr1, %arg0)
308 fdce,m %arg1(%sr1, %arg0)
309 fdce,m %arg1(%sr1, %arg0)
310 addib,COND(>) -16, %arg2, fdoneloop1
311 fdce,m %arg1(%sr1, %arg0)
312
313 /* Check if done */
314 cmpb,COND(=),n %arg2, %r0, fdsync /* Predict branch taken */
315
316fdoneloop2:
317 addib,COND(>) -1, %arg2, fdoneloop2 /* Outer loop count decr */
1da177e4
LT
318 fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
319
320fdsync:
321 syncdma
322 sync
896a3756 323 mtsm %r22 /* restore I-bit */
1da177e4
LT
324 bv %r0(%r2)
325 nop
326 .exit
327
328 .procend
f39cce65 329ENDPROC_CFI(flush_data_cache_local)
1da177e4 330
1da177e4
LT
331 .align 16
332
6d2ddc2f
JDA
333/* Macros to serialize TLB purge operations on SMP. */
334
335 .macro tlb_lock la,flags,tmp
336#ifdef CONFIG_SMP
88776c0e
HD
337#if __PA_LDCW_ALIGNMENT > 4
338 load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
339 depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
340#else
341 load32 pa_tlb_lock, \la
342#endif
6d2ddc2f
JDA
343 rsm PSW_SM_I,\flags
3441: LDCW 0(\la),\tmp
345 cmpib,<>,n 0,\tmp,3f
3462: ldw 0(\la),\tmp
347 cmpb,<> %r0,\tmp,1b
348 nop
349 b,n 2b
3503:
351#endif
352 .endm
353
354 .macro tlb_unlock la,flags,tmp
355#ifdef CONFIG_SMP
356 ldi 1,\tmp
357 stw \tmp,0(\la)
358 mtsm \flags
359#endif
360 .endm
361
362/* Clear page using kernel mapping. */
363
f39cce65 364ENTRY_CFI(clear_page_asm)
6d2ddc2f
JDA
365 .proc
366 .callinfo NO_CALLS
367 .entry
368
369#ifdef CONFIG_64BIT
370
371 /* Unroll the loop. */
372 ldi (PAGE_SIZE / 128), %r1
373
3741:
375 std %r0, 0(%r26)
376 std %r0, 8(%r26)
377 std %r0, 16(%r26)
378 std %r0, 24(%r26)
379 std %r0, 32(%r26)
380 std %r0, 40(%r26)
381 std %r0, 48(%r26)
382 std %r0, 56(%r26)
383 std %r0, 64(%r26)
384 std %r0, 72(%r26)
385 std %r0, 80(%r26)
386 std %r0, 88(%r26)
387 std %r0, 96(%r26)
388 std %r0, 104(%r26)
389 std %r0, 112(%r26)
390 std %r0, 120(%r26)
391
392 /* Note reverse branch hint for addib is taken. */
393 addib,COND(>),n -1, %r1, 1b
394 ldo 128(%r26), %r26
395
396#else
397
398 /*
399 * Note that until (if) we start saving the full 64-bit register
400 * values on interrupt, we can't use std on a 32 bit kernel.
401 */
402 ldi (PAGE_SIZE / 64), %r1
403
4041:
405 stw %r0, 0(%r26)
406 stw %r0, 4(%r26)
407 stw %r0, 8(%r26)
408 stw %r0, 12(%r26)
409 stw %r0, 16(%r26)
410 stw %r0, 20(%r26)
411 stw %r0, 24(%r26)
412 stw %r0, 28(%r26)
413 stw %r0, 32(%r26)
414 stw %r0, 36(%r26)
415 stw %r0, 40(%r26)
416 stw %r0, 44(%r26)
417 stw %r0, 48(%r26)
418 stw %r0, 52(%r26)
419 stw %r0, 56(%r26)
420 stw %r0, 60(%r26)
421
422 addib,COND(>),n -1, %r1, 1b
423 ldo 64(%r26), %r26
424#endif
425 bv %r0(%r2)
426 nop
427 .exit
428
429 .procend
f39cce65 430ENDPROC_CFI(clear_page_asm)
6d2ddc2f
JDA
431
432/* Copy page using kernel mapping. */
433
f39cce65 434ENTRY_CFI(copy_page_asm)
1da177e4
LT
435 .proc
436 .callinfo NO_CALLS
437 .entry
438
413059f2 439#ifdef CONFIG_64BIT
1da177e4
LT
440 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
441 * Unroll the loop by hand and arrange insn appropriately.
6d2ddc2f
JDA
442 * Prefetch doesn't improve performance on rp3440.
443 * GCC probably can do this just as well...
1da177e4
LT
444 */
445
6ebeafff 446 ldi (PAGE_SIZE / 128), %r1
2fd83038 447
6d2ddc2f
JDA
4481: ldd 0(%r25), %r19
449 ldd 8(%r25), %r20
1da177e4
LT
450
451 ldd 16(%r25), %r21
452 ldd 24(%r25), %r22
453 std %r19, 0(%r26)
454 std %r20, 8(%r26)
455
456 ldd 32(%r25), %r19
457 ldd 40(%r25), %r20
458 std %r21, 16(%r26)
459 std %r22, 24(%r26)
460
461 ldd 48(%r25), %r21
462 ldd 56(%r25), %r22
463 std %r19, 32(%r26)
464 std %r20, 40(%r26)
465
466 ldd 64(%r25), %r19
467 ldd 72(%r25), %r20
468 std %r21, 48(%r26)
469 std %r22, 56(%r26)
470
471 ldd 80(%r25), %r21
472 ldd 88(%r25), %r22
473 std %r19, 64(%r26)
474 std %r20, 72(%r26)
475
476 ldd 96(%r25), %r19
477 ldd 104(%r25), %r20
478 std %r21, 80(%r26)
479 std %r22, 88(%r26)
480
481 ldd 112(%r25), %r21
482 ldd 120(%r25), %r22
6d2ddc2f 483 ldo 128(%r25), %r25
1da177e4
LT
484 std %r19, 96(%r26)
485 std %r20, 104(%r26)
486
1da177e4
LT
487 std %r21, 112(%r26)
488 std %r22, 120(%r26)
1da177e4 489
6d2ddc2f
JDA
490 /* Note reverse branch hint for addib is taken. */
491 addib,COND(>),n -1, %r1, 1b
492 ldo 128(%r26), %r26
1da177e4
LT
493
494#else
495
496 /*
497 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
498 * bundles (very restricted rules for bundling).
499 * Note that until (if) we start saving
500 * the full 64 bit register values on interrupt, we can't
501 * use ldd/std on a 32 bit kernel.
502 */
37318a3c 503 ldw 0(%r25), %r19
6ebeafff 504 ldi (PAGE_SIZE / 64), %r1
1da177e4
LT
505
5061:
1da177e4
LT
507 ldw 4(%r25), %r20
508 ldw 8(%r25), %r21
509 ldw 12(%r25), %r22
510 stw %r19, 0(%r26)
511 stw %r20, 4(%r26)
512 stw %r21, 8(%r26)
513 stw %r22, 12(%r26)
514 ldw 16(%r25), %r19
515 ldw 20(%r25), %r20
516 ldw 24(%r25), %r21
517 ldw 28(%r25), %r22
518 stw %r19, 16(%r26)
519 stw %r20, 20(%r26)
520 stw %r21, 24(%r26)
521 stw %r22, 28(%r26)
522 ldw 32(%r25), %r19
523 ldw 36(%r25), %r20
524 ldw 40(%r25), %r21
525 ldw 44(%r25), %r22
526 stw %r19, 32(%r26)
527 stw %r20, 36(%r26)
528 stw %r21, 40(%r26)
529 stw %r22, 44(%r26)
530 ldw 48(%r25), %r19
531 ldw 52(%r25), %r20
532 ldw 56(%r25), %r21
533 ldw 60(%r25), %r22
534 stw %r19, 48(%r26)
535 stw %r20, 52(%r26)
37318a3c 536 ldo 64(%r25), %r25
1da177e4
LT
537 stw %r21, 56(%r26)
538 stw %r22, 60(%r26)
539 ldo 64(%r26), %r26
872f6deb 540 addib,COND(>),n -1, %r1, 1b
37318a3c 541 ldw 0(%r25), %r19
1da177e4
LT
542#endif
543 bv %r0(%r2)
544 nop
545 .exit
546
547 .procend
f39cce65 548ENDPROC_CFI(copy_page_asm)
1da177e4
LT
549
550/*
551 * NOTE: Code in clear_user_page has a hard coded dependency on the
552 * maximum alias boundary being 4 Mb. We've been assured by the
553 * parisc chip designers that there will not ever be a parisc
554 * chip with a larger alias boundary (Never say never :-) ).
555 *
556 * Subtle: the dtlb miss handlers support the temp alias region by
557 * "knowing" that if a dtlb miss happens within the temp alias
558 * region it must have occurred while in clear_user_page. Since
559 * this routine makes use of processor local translations, we
560 * don't want to insert them into the kernel page table. Instead,
561 * we load up some general registers (they need to be registers
562 * which aren't shadowed) with the physical page numbers (preshifted
563 * for tlb insertion) needed to insert the translations. When we
564 * miss on the translation, the dtlb miss handler inserts the
565 * translation into the tlb using these values:
566 *
567 * %r26 physical page (shifted for tlb insert) of "to" translation
568 * %r23 physical page (shifted for tlb insert) of "from" translation
569 */
570
6a45716a
HD
571 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
572 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
573 .macro convert_phys_for_tlb_insert20 phys
574 extrd,u \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys
575#if _PAGE_SIZE_ENCODING_DEFAULT
576 depdi _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys
577#endif
578 .endm
579
1da177e4 580 /*
910a8643
JDA
581 * copy_user_page_asm() performs a page copy using mappings
582 * equivalent to the user page mappings. It can be used to
583 * implement copy_user_page() but unfortunately both the `from'
584 * and `to' pages need to be flushed through mappings equivalent
585 * to the user mappings after the copy because the kernel accesses
586 * the `from' page through the kmap kernel mapping and the `to'
587 * page needs to be flushed since code can be copied. As a
588 * result, this implementation is less efficient than the simpler
589 * copy using the kernel mapping. It only needs the `from' page
590 * to flushed via the user mapping. The kunmap routines handle
591 * the flushes needed for the kernel mapping.
1da177e4
LT
592 *
593 * I'm still keeping this around because it may be possible to
594 * use it if more information is passed into copy_user_page().
595 * Have to do some measurements to see if it is worthwhile to
596 * lobby for such a change.
6d2ddc2f 597 *
1da177e4
LT
598 */
599
f39cce65 600ENTRY_CFI(copy_user_page_asm)
1da177e4
LT
601 .proc
602 .callinfo NO_CALLS
603 .entry
604
6d2ddc2f
JDA
605 /* Convert virtual `to' and `from' addresses to physical addresses.
606 Move `from' physical address to non shadowed register. */
1da177e4
LT
607 ldil L%(__PAGE_OFFSET), %r1
608 sub %r26, %r1, %r26
6d2ddc2f 609 sub %r25, %r1, %r23
1da177e4
LT
610
611 ldil L%(TMPALIAS_MAP_START), %r28
413059f2 612#ifdef CONFIG_64BIT
6d2ddc2f
JDA
613#if (TMPALIAS_MAP_START >= 0x80000000)
614 depdi 0, 31,32, %r28 /* clear any sign extension */
615#endif
6a45716a
HD
616 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
617 convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */
6d2ddc2f 618 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
d845b5fb 619 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
1da177e4
LT
620 copy %r28, %r29
621 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
622#else
623 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
624 extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
625 depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
d845b5fb 626 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
1da177e4
LT
627 copy %r28, %r29
628 depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
629#endif
630
631 /* Purge any old translations */
632
6d2ddc2f 633#ifdef CONFIG_PA20
5035b230
JDA
634 pdtlb,l %r0(%r28)
635 pdtlb,l %r0(%r29)
6d2ddc2f
JDA
636#else
637 tlb_lock %r20,%r21,%r22
5035b230
JDA
638 pdtlb %r0(%r28)
639 pdtlb %r0(%r29)
6d2ddc2f
JDA
640 tlb_unlock %r20,%r21,%r22
641#endif
642
643#ifdef CONFIG_64BIT
644 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
645 * Unroll the loop by hand and arrange insn appropriately.
646 * GCC probably can do this just as well.
647 */
1da177e4 648
6d2ddc2f
JDA
649 ldd 0(%r29), %r19
650 ldi (PAGE_SIZE / 128), %r1
651
6521: ldd 8(%r29), %r20
653
654 ldd 16(%r29), %r21
655 ldd 24(%r29), %r22
656 std %r19, 0(%r28)
657 std %r20, 8(%r28)
658
659 ldd 32(%r29), %r19
660 ldd 40(%r29), %r20
661 std %r21, 16(%r28)
662 std %r22, 24(%r28)
663
664 ldd 48(%r29), %r21
665 ldd 56(%r29), %r22
666 std %r19, 32(%r28)
667 std %r20, 40(%r28)
668
669 ldd 64(%r29), %r19
670 ldd 72(%r29), %r20
671 std %r21, 48(%r28)
672 std %r22, 56(%r28)
673
674 ldd 80(%r29), %r21
675 ldd 88(%r29), %r22
676 std %r19, 64(%r28)
677 std %r20, 72(%r28)
678
679 ldd 96(%r29), %r19
680 ldd 104(%r29), %r20
681 std %r21, 80(%r28)
682 std %r22, 88(%r28)
683
684 ldd 112(%r29), %r21
685 ldd 120(%r29), %r22
686 std %r19, 96(%r28)
687 std %r20, 104(%r28)
688
689 ldo 128(%r29), %r29
690 std %r21, 112(%r28)
691 std %r22, 120(%r28)
692 ldo 128(%r28), %r28
693
694 /* conditional branches nullify on forward taken branch, and on
695 * non-taken backward branch. Note that .+4 is a backwards branch.
696 * The ldd should only get executed if the branch is taken.
697 */
698 addib,COND(>),n -1, %r1, 1b /* bundle 10 */
699 ldd 0(%r29), %r19 /* start next loads */
700
701#else
702 ldi (PAGE_SIZE / 64), %r1
1da177e4
LT
703
704 /*
705 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
706 * bundles (very restricted rules for bundling). It probably
707 * does OK on PCXU and better, but we could do better with
708 * ldd/std instructions. Note that until (if) we start saving
709 * the full 64 bit register values on interrupt, we can't
710 * use ldd/std on a 32 bit kernel.
711 */
712
6d2ddc2f 7131: ldw 0(%r29), %r19
1da177e4
LT
714 ldw 4(%r29), %r20
715 ldw 8(%r29), %r21
716 ldw 12(%r29), %r22
717 stw %r19, 0(%r28)
718 stw %r20, 4(%r28)
719 stw %r21, 8(%r28)
720 stw %r22, 12(%r28)
721 ldw 16(%r29), %r19
722 ldw 20(%r29), %r20
723 ldw 24(%r29), %r21
724 ldw 28(%r29), %r22
725 stw %r19, 16(%r28)
726 stw %r20, 20(%r28)
727 stw %r21, 24(%r28)
728 stw %r22, 28(%r28)
729 ldw 32(%r29), %r19
730 ldw 36(%r29), %r20
731 ldw 40(%r29), %r21
732 ldw 44(%r29), %r22
733 stw %r19, 32(%r28)
734 stw %r20, 36(%r28)
735 stw %r21, 40(%r28)
736 stw %r22, 44(%r28)
737 ldw 48(%r29), %r19
738 ldw 52(%r29), %r20
739 ldw 56(%r29), %r21
740 ldw 60(%r29), %r22
741 stw %r19, 48(%r28)
742 stw %r20, 52(%r28)
743 stw %r21, 56(%r28)
744 stw %r22, 60(%r28)
745 ldo 64(%r28), %r28
6d2ddc2f 746
872f6deb 747 addib,COND(>) -1, %r1,1b
1da177e4 748 ldo 64(%r29), %r29
6d2ddc2f 749#endif
1da177e4
LT
750
751 bv %r0(%r2)
752 nop
753 .exit
754
755 .procend
f39cce65 756ENDPROC_CFI(copy_user_page_asm)
1da177e4 757
f39cce65 758ENTRY_CFI(clear_user_page_asm)
1da177e4
LT
759 .proc
760 .callinfo NO_CALLS
761 .entry
762
763 tophys_r1 %r26
764
765 ldil L%(TMPALIAS_MAP_START), %r28
413059f2 766#ifdef CONFIG_64BIT
1da177e4
LT
767#if (TMPALIAS_MAP_START >= 0x80000000)
768 depdi 0, 31,32, %r28 /* clear any sign extension */
769#endif
6a45716a 770 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
1da177e4 771 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
6a45716a 772 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
1da177e4
LT
773#else
774 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
775 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
d845b5fb 776 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
1da177e4
LT
777#endif
778
779 /* Purge any old translation */
780
6d2ddc2f 781#ifdef CONFIG_PA20
5035b230 782 pdtlb,l %r0(%r28)
6d2ddc2f
JDA
783#else
784 tlb_lock %r20,%r21,%r22
5035b230 785 pdtlb %r0(%r28)
6d2ddc2f
JDA
786 tlb_unlock %r20,%r21,%r22
787#endif
1da177e4 788
413059f2 789#ifdef CONFIG_64BIT
6ebeafff 790 ldi (PAGE_SIZE / 128), %r1
1da177e4
LT
791
792 /* PREFETCH (Write) has not (yet) been proven to help here */
2fd83038 793 /* #define PREFETCHW_OP ldd 256(%0), %r0 */
1da177e4
LT
794
7951: std %r0, 0(%r28)
796 std %r0, 8(%r28)
797 std %r0, 16(%r28)
798 std %r0, 24(%r28)
799 std %r0, 32(%r28)
800 std %r0, 40(%r28)
801 std %r0, 48(%r28)
802 std %r0, 56(%r28)
803 std %r0, 64(%r28)
804 std %r0, 72(%r28)
805 std %r0, 80(%r28)
806 std %r0, 88(%r28)
807 std %r0, 96(%r28)
808 std %r0, 104(%r28)
809 std %r0, 112(%r28)
810 std %r0, 120(%r28)
872f6deb 811 addib,COND(>) -1, %r1, 1b
1da177e4
LT
812 ldo 128(%r28), %r28
813
413059f2 814#else /* ! CONFIG_64BIT */
6ebeafff 815 ldi (PAGE_SIZE / 64), %r1
1da177e4 816
6d2ddc2f 8171: stw %r0, 0(%r28)
1da177e4
LT
818 stw %r0, 4(%r28)
819 stw %r0, 8(%r28)
820 stw %r0, 12(%r28)
821 stw %r0, 16(%r28)
822 stw %r0, 20(%r28)
823 stw %r0, 24(%r28)
824 stw %r0, 28(%r28)
825 stw %r0, 32(%r28)
826 stw %r0, 36(%r28)
827 stw %r0, 40(%r28)
828 stw %r0, 44(%r28)
829 stw %r0, 48(%r28)
830 stw %r0, 52(%r28)
831 stw %r0, 56(%r28)
832 stw %r0, 60(%r28)
872f6deb 833 addib,COND(>) -1, %r1, 1b
1da177e4 834 ldo 64(%r28), %r28
413059f2 835#endif /* CONFIG_64BIT */
1da177e4
LT
836
837 bv %r0(%r2)
838 nop
839 .exit
840
841 .procend
f39cce65 842ENDPROC_CFI(clear_user_page_asm)
1da177e4 843
f39cce65 844ENTRY_CFI(flush_dcache_page_asm)
1da177e4
LT
845 .proc
846 .callinfo NO_CALLS
847 .entry
848
f311847c
JB
849 ldil L%(TMPALIAS_MAP_START), %r28
850#ifdef CONFIG_64BIT
851#if (TMPALIAS_MAP_START >= 0x80000000)
852 depdi 0, 31,32, %r28 /* clear any sign extension */
f311847c 853#endif
6a45716a 854 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
f311847c 855 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
6a45716a 856 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
f311847c
JB
857#else
858 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
859 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
d845b5fb 860 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
f311847c
JB
861#endif
862
863 /* Purge any old translation */
864
6d2ddc2f 865#ifdef CONFIG_PA20
5035b230 866 pdtlb,l %r0(%r28)
6d2ddc2f
JDA
867#else
868 tlb_lock %r20,%r21,%r22
5035b230 869 pdtlb %r0(%r28)
6d2ddc2f
JDA
870 tlb_unlock %r20,%r21,%r22
871#endif
f311847c 872
1da177e4 873 ldil L%dcache_stride, %r1
d65ea48d 874 ldw R%dcache_stride(%r1), r31
1da177e4 875
413059f2 876#ifdef CONFIG_64BIT
1da177e4
LT
877 depdi,z 1, 63-PAGE_SHIFT,1, %r25
878#else
879 depwi,z 1, 31-PAGE_SHIFT,1, %r25
880#endif
f311847c 881 add %r28, %r25, %r25
d65ea48d
JDA
882 sub %r25, r31, %r25
883
884
8851: fdc,m r31(%r28)
886 fdc,m r31(%r28)
887 fdc,m r31(%r28)
888 fdc,m r31(%r28)
889 fdc,m r31(%r28)
890 fdc,m r31(%r28)
891 fdc,m r31(%r28)
892 fdc,m r31(%r28)
893 fdc,m r31(%r28)
894 fdc,m r31(%r28)
895 fdc,m r31(%r28)
896 fdc,m r31(%r28)
897 fdc,m r31(%r28)
898 fdc,m r31(%r28)
899 fdc,m r31(%r28)
febe4296 900 cmpb,COND(<<) %r28, %r25,1b
d65ea48d 901 fdc,m r31(%r28)
1da177e4
LT
902
903 sync
904 bv %r0(%r2)
6d2ddc2f 905 nop
1da177e4
LT
906 .exit
907
908 .procend
f39cce65 909ENDPROC_CFI(flush_dcache_page_asm)
f311847c 910
f39cce65 911ENTRY_CFI(flush_icache_page_asm)
1da177e4
LT
912 .proc
913 .callinfo NO_CALLS
914 .entry
915
f311847c 916 ldil L%(TMPALIAS_MAP_START), %r28
413059f2 917#ifdef CONFIG_64BIT
f311847c
JB
918#if (TMPALIAS_MAP_START >= 0x80000000)
919 depdi 0, 31,32, %r28 /* clear any sign extension */
f311847c 920#endif
6a45716a 921 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
f311847c 922 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
d845b5fb 923 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
1da177e4 924#else
f311847c
JB
925 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
926 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
d845b5fb 927 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
1da177e4 928#endif
1da177e4 929
5035b230
JDA
930 /* Purge any old translation. Note that the FIC instruction
931 * may use either the instruction or data TLB. Given that we
932 * have a flat address space, it's not clear which TLB will be
933 * used. So, we purge both entries. */
1da177e4 934
6d2ddc2f 935#ifdef CONFIG_PA20
5035b230 936 pdtlb,l %r0(%r28)
6d2ddc2f
JDA
937 pitlb,l %r0(%sr4,%r28)
938#else
939 tlb_lock %r20,%r21,%r22
5035b230
JDA
940 pdtlb %r0(%r28)
941 pitlb %r0(%sr4,%r28)
6d2ddc2f
JDA
942 tlb_unlock %r20,%r21,%r22
943#endif
f311847c
JB
944
945 ldil L%icache_stride, %r1
d65ea48d 946 ldw R%icache_stride(%r1), %r31
f311847c
JB
947
948#ifdef CONFIG_64BIT
949 depdi,z 1, 63-PAGE_SHIFT,1, %r25
950#else
951 depwi,z 1, 31-PAGE_SHIFT,1, %r25
952#endif
953 add %r28, %r25, %r25
d65ea48d 954 sub %r25, %r31, %r25
f311847c
JB
955
956
207f583d
JDA
957 /* fic only has the type 26 form on PA1.1, requiring an
958 * explicit space specification, so use %sr4 */
d65ea48d
JDA
9591: fic,m %r31(%sr4,%r28)
960 fic,m %r31(%sr4,%r28)
961 fic,m %r31(%sr4,%r28)
962 fic,m %r31(%sr4,%r28)
963 fic,m %r31(%sr4,%r28)
964 fic,m %r31(%sr4,%r28)
965 fic,m %r31(%sr4,%r28)
966 fic,m %r31(%sr4,%r28)
967 fic,m %r31(%sr4,%r28)
968 fic,m %r31(%sr4,%r28)
969 fic,m %r31(%sr4,%r28)
970 fic,m %r31(%sr4,%r28)
971 fic,m %r31(%sr4,%r28)
972 fic,m %r31(%sr4,%r28)
973 fic,m %r31(%sr4,%r28)
6a45716a 974 cmpb,COND(<<) %r28, %r25,1b
d65ea48d 975 fic,m %r31(%sr4,%r28)
1da177e4
LT
976
977 sync
978 bv %r0(%r2)
6d2ddc2f 979 nop
1da177e4
LT
980 .exit
981
982 .procend
f39cce65 983ENDPROC_CFI(flush_icache_page_asm)
1da177e4 984
f39cce65 985ENTRY_CFI(flush_kernel_dcache_page_asm)
1da177e4
LT
986 .proc
987 .callinfo NO_CALLS
988 .entry
989
990 ldil L%dcache_stride, %r1
991 ldw R%dcache_stride(%r1), %r23
992
413059f2 993#ifdef CONFIG_64BIT
1da177e4
LT
994 depdi,z 1, 63-PAGE_SHIFT,1, %r25
995#else
996 depwi,z 1, 31-PAGE_SHIFT,1, %r25
997#endif
998 add %r26, %r25, %r25
999 sub %r25, %r23, %r25
1000
1001
f311847c
JB
10021: fdc,m %r23(%r26)
1003 fdc,m %r23(%r26)
1004 fdc,m %r23(%r26)
1005 fdc,m %r23(%r26)
1006 fdc,m %r23(%r26)
1007 fdc,m %r23(%r26)
1008 fdc,m %r23(%r26)
1009 fdc,m %r23(%r26)
1010 fdc,m %r23(%r26)
1011 fdc,m %r23(%r26)
1012 fdc,m %r23(%r26)
1013 fdc,m %r23(%r26)
1014 fdc,m %r23(%r26)
1015 fdc,m %r23(%r26)
1016 fdc,m %r23(%r26)
872f6deb 1017 cmpb,COND(<<) %r26, %r25,1b
f311847c 1018 fdc,m %r23(%r26)
1da177e4
LT
1019
1020 sync
1021 bv %r0(%r2)
1022 nop
1023 .exit
1024
1025 .procend
f39cce65 1026ENDPROC_CFI(flush_kernel_dcache_page_asm)
1da177e4 1027
f39cce65 1028ENTRY_CFI(purge_kernel_dcache_page_asm)
1da177e4
LT
1029 .proc
1030 .callinfo NO_CALLS
1031 .entry
1032
1033 ldil L%dcache_stride, %r1
1034 ldw R%dcache_stride(%r1), %r23
1035
413059f2 1036#ifdef CONFIG_64BIT
1da177e4
LT
1037 depdi,z 1, 63-PAGE_SHIFT,1, %r25
1038#else
1039 depwi,z 1, 31-PAGE_SHIFT,1, %r25
1040#endif
1041 add %r26, %r25, %r25
1042 sub %r25, %r23, %r25
1043
10441: pdc,m %r23(%r26)
1045 pdc,m %r23(%r26)
1046 pdc,m %r23(%r26)
1047 pdc,m %r23(%r26)
1048 pdc,m %r23(%r26)
1049 pdc,m %r23(%r26)
1050 pdc,m %r23(%r26)
1051 pdc,m %r23(%r26)
1052 pdc,m %r23(%r26)
1053 pdc,m %r23(%r26)
1054 pdc,m %r23(%r26)
1055 pdc,m %r23(%r26)
1056 pdc,m %r23(%r26)
1057 pdc,m %r23(%r26)
1058 pdc,m %r23(%r26)
872f6deb 1059 cmpb,COND(<<) %r26, %r25, 1b
1da177e4
LT
1060 pdc,m %r23(%r26)
1061
1062 sync
1063 bv %r0(%r2)
1064 nop
1065 .exit
1066
1067 .procend
f39cce65 1068ENDPROC_CFI(purge_kernel_dcache_page_asm)
1da177e4 1069
f39cce65 1070ENTRY_CFI(flush_user_dcache_range_asm)
1da177e4
LT
1071 .proc
1072 .callinfo NO_CALLS
1073 .entry
1074
1075 ldil L%dcache_stride, %r1
1076 ldw R%dcache_stride(%r1), %r23
1077 ldo -1(%r23), %r21
1078 ANDCM %r26, %r21, %r26
1079
872f6deb 10801: cmpb,COND(<<),n %r26, %r25, 1b
1da177e4
LT
1081 fdc,m %r23(%sr3, %r26)
1082
1083 sync
1084 bv %r0(%r2)
1085 nop
1086 .exit
1087
1088 .procend
f39cce65 1089ENDPROC_CFI(flush_user_dcache_range_asm)
1da177e4 1090
f39cce65 1091ENTRY_CFI(flush_kernel_dcache_range_asm)
1da177e4
LT
1092 .proc
1093 .callinfo NO_CALLS
1094 .entry
1095
1096 ldil L%dcache_stride, %r1
1097 ldw R%dcache_stride(%r1), %r23
1098 ldo -1(%r23), %r21
1099 ANDCM %r26, %r21, %r26
1100
872f6deb 11011: cmpb,COND(<<),n %r26, %r25,1b
1da177e4
LT
1102 fdc,m %r23(%r26)
1103
1104 sync
1105 syncdma
1106 bv %r0(%r2)
1107 nop
1108 .exit
1109
1110 .procend
f39cce65 1111ENDPROC_CFI(flush_kernel_dcache_range_asm)
1da177e4 1112
f39cce65 1113ENTRY_CFI(flush_user_icache_range_asm)
1da177e4
LT
1114 .proc
1115 .callinfo NO_CALLS
1116 .entry
1117
1118 ldil L%icache_stride, %r1
1119 ldw R%icache_stride(%r1), %r23
1120 ldo -1(%r23), %r21
1121 ANDCM %r26, %r21, %r26
1122
872f6deb 11231: cmpb,COND(<<),n %r26, %r25,1b
1da177e4
LT
1124 fic,m %r23(%sr3, %r26)
1125
1126 sync
1127 bv %r0(%r2)
1128 nop
1129 .exit
1130
1131 .procend
f39cce65 1132ENDPROC_CFI(flush_user_icache_range_asm)
1da177e4 1133
f39cce65 1134ENTRY_CFI(flush_kernel_icache_page)
1da177e4
LT
1135 .proc
1136 .callinfo NO_CALLS
1137 .entry
1138
1139 ldil L%icache_stride, %r1
1140 ldw R%icache_stride(%r1), %r23
1141
413059f2 1142#ifdef CONFIG_64BIT
1da177e4
LT
1143 depdi,z 1, 63-PAGE_SHIFT,1, %r25
1144#else
1145 depwi,z 1, 31-PAGE_SHIFT,1, %r25
1146#endif
1147 add %r26, %r25, %r25
1148 sub %r25, %r23, %r25
1149
1150
e635c96e
MW
11511: fic,m %r23(%sr4, %r26)
1152 fic,m %r23(%sr4, %r26)
1153 fic,m %r23(%sr4, %r26)
1154 fic,m %r23(%sr4, %r26)
1155 fic,m %r23(%sr4, %r26)
1156 fic,m %r23(%sr4, %r26)
1157 fic,m %r23(%sr4, %r26)
1158 fic,m %r23(%sr4, %r26)
1159 fic,m %r23(%sr4, %r26)
1160 fic,m %r23(%sr4, %r26)
1161 fic,m %r23(%sr4, %r26)
1162 fic,m %r23(%sr4, %r26)
1163 fic,m %r23(%sr4, %r26)
1164 fic,m %r23(%sr4, %r26)
1165 fic,m %r23(%sr4, %r26)
872f6deb 1166 cmpb,COND(<<) %r26, %r25, 1b
e635c96e 1167 fic,m %r23(%sr4, %r26)
1da177e4
LT
1168
1169 sync
1170 bv %r0(%r2)
1171 nop
1172 .exit
1173
1174 .procend
f39cce65 1175ENDPROC_CFI(flush_kernel_icache_page)
1da177e4 1176
f39cce65 1177ENTRY_CFI(flush_kernel_icache_range_asm)
1da177e4
LT
1178 .proc
1179 .callinfo NO_CALLS
1180 .entry
1181
1182 ldil L%icache_stride, %r1
1183 ldw R%icache_stride(%r1), %r23
1184 ldo -1(%r23), %r21
1185 ANDCM %r26, %r21, %r26
1186
872f6deb 11871: cmpb,COND(<<),n %r26, %r25, 1b
e635c96e 1188 fic,m %r23(%sr4, %r26)
1da177e4
LT
1189
1190 sync
1191 bv %r0(%r2)
1192 nop
1193 .exit
1da177e4 1194 .procend
f39cce65 1195ENDPROC_CFI(flush_kernel_icache_range_asm)
1da177e4 1196
896a3756
GG
1197 /* align should cover use of rfi in disable_sr_hashing_asm and
1198 * srdis_done.
1199 */
1200 .align 256
f39cce65 1201ENTRY_CFI(disable_sr_hashing_asm)
1da177e4
LT
1202 .proc
1203 .callinfo NO_CALLS
1204 .entry
1205
896a3756
GG
1206 /*
1207 * Switch to real mode
1208 */
1209 /* pcxt_ssm_bug */
1210 rsm PSW_SM_I, %r0
1211 load32 PA(1f), %r1
1da177e4
LT
1212 nop
1213 nop
1214 nop
1215 nop
1216 nop
896a3756
GG
1217
1218 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1da177e4
LT
1219 mtctl %r0, %cr17 /* Clear IIASQ tail */
1220 mtctl %r0, %cr17 /* Clear IIASQ head */
1da177e4
LT
1221 mtctl %r1, %cr18 /* IIAOQ head */
1222 ldo 4(%r1), %r1
1223 mtctl %r1, %cr18 /* IIAOQ tail */
896a3756
GG
1224 load32 REAL_MODE_PSW, %r1
1225 mtctl %r1, %ipsw
1da177e4
LT
1226 rfi
1227 nop
1228
12291: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
1230 cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
1231 cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
1232 b,n srdis_done
1233
1234srdis_pcxs:
1235
1236 /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
1237
1238 .word 0x141c1a00 /* mfdiag %dr0, %r28 */
1239 .word 0x141c1a00 /* must issue twice */
1240 depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
1241 depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
1242 .word 0x141c1600 /* mtdiag %r28, %dr0 */
1243 .word 0x141c1600 /* must issue twice */
1244 b,n srdis_done
1245
1246srdis_pcxl:
1247
1248 /* Disable Space Register Hashing for PCXL */
1249
1250 .word 0x141c0600 /* mfdiag %dr0, %r28 */
1251 depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
1252 .word 0x141c0240 /* mtdiag %r28, %dr0 */
1253 b,n srdis_done
1254
1255srdis_pa20:
1256
896a3756 1257 /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
1da177e4
LT
1258
1259 .word 0x144008bc /* mfdiag %dr2, %r28 */
1260 depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
1261 .word 0x145c1840 /* mtdiag %r28, %dr2 */
1262
1da177e4 1263
896a3756 1264srdis_done:
1da177e4 1265 /* Switch back to virtual mode */
896a3756
GG
1266 rsm PSW_SM_I, %r0 /* prep to load iia queue */
1267 load32 2f, %r1
1268 nop
1269 nop
1270 nop
1271 nop
1272 nop
1da177e4 1273
896a3756 1274 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1da177e4
LT
1275 mtctl %r0, %cr17 /* Clear IIASQ tail */
1276 mtctl %r0, %cr17 /* Clear IIASQ head */
1da177e4
LT
1277 mtctl %r1, %cr18 /* IIAOQ head */
1278 ldo 4(%r1), %r1
1279 mtctl %r1, %cr18 /* IIAOQ tail */
896a3756
GG
1280 load32 KERNEL_PSW, %r1
1281 mtctl %r1, %ipsw
1da177e4
LT
1282 rfi
1283 nop
1284
12852: bv %r0(%r2)
1286 nop
1287 .exit
1288
1289 .procend
f39cce65 1290ENDPROC_CFI(disable_sr_hashing_asm)
1da177e4
LT
1291
1292 .end