]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/powerpc/mm/book3s32/hash_low.S
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / arch / powerpc / mm / book3s32 / hash_low.S
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
14cf11af 2/*
14cf11af
PM
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 *
12 * This file contains low-level assembler routines for managing
13 * the PowerPC MMU hash table. (PPC 8xx processors don't use a
14 * hash table, so this file is not used on them.)
14cf11af
PM
15 */
16
b3b8dc6c 17#include <asm/reg.h>
14cf11af
PM
18#include <asm/page.h>
19#include <asm/pgtable.h>
20#include <asm/cputable.h>
21#include <asm/ppc_asm.h>
22#include <asm/thread_info.h>
23#include <asm/asm-offsets.h>
9445aa1a 24#include <asm/export.h>
2c86cd18 25#include <asm/feature-fixups.h>
9efc74ff 26#include <asm/code-patching-asm.h>
14cf11af
PM
27
28#ifdef CONFIG_SMP
991eb43a
KG
29 .section .bss
30 .align 2
991eb43a
KG
31mmu_hash_lock:
32 .space 4
14cf11af
PM
33#endif /* CONFIG_SMP */
34
14cf11af
PM
35/*
36 * Load a PTE into the hash table, if possible.
37 * The address is in r4, and r3 contains an access flag:
40bb0e90 38 * _PAGE_RW (0x400) if a write.
14cf11af 39 * r9 contains the SRR1 value, from which we use the MSR_PR bit.
ee43eb78 40 * SPRG_THREAD contains the physical address of the current task's thread.
14cf11af
PM
41 *
42 * Returns to the caller if the access is illegal or there is no
43 * mapping for the address. Otherwise it places an appropriate PTE
44 * in the hash table and returns from the exception.
6790dae8 45 * Uses r0, r3 - r6, r8, r10, ctr, lr.
14cf11af
PM
46 */
47 .text
48_GLOBAL(hash_page)
14cf11af 49#ifdef CONFIG_SMP
232ca1ee
CL
50 lis r8, (mmu_hash_lock - PAGE_OFFSET)@h
51 ori r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
14cf11af
PM
52 lis r0,0x0fff
53 b 10f
5411: lwz r6,0(r8)
55 cmpwi 0,r6,0
56 bne 11b
5710: lwarx r6,0,r8
58 cmpwi 0,r6,0
59 bne- 11b
60 stwcx. r0,0,r8
61 bne- 10b
62 isync
63#endif
64 /* Get PTE (linux-style) and check access */
65 lis r0,KERNELBASE@h /* check if kernel address */
66 cmplw 0,r4,r0
14cf11af 67 ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
4622a2d4 68 mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */
14cf11af 69 blt+ 112f /* assume user more likely */
232ca1ee
CL
70 lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
71 addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
40bb0e90 72 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
4622a2d4 73112:
4ee7084e 74#ifndef CONFIG_PTE_64BIT
14cf11af
PM
75 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
76 lwz r8,0(r5) /* get pmd entry */
77 rlwinm. r8,r8,0,0,19 /* extract address of pte page */
4ee7084e
BB
78#else
79 rlwinm r8,r4,13,19,29 /* Compute pgdir/pmd offset */
80 lwzx r8,r8,r5 /* Get L1 entry */
81 rlwinm. r8,r8,0,0,20 /* extract pt base address */
82#endif
14cf11af
PM
83#ifdef CONFIG_SMP
84 beq- hash_page_out /* return if no mapping */
85#else
86 /* XXX it seems like the 601 will give a machine fault on the
87 rfi if its alignment is wrong (bottom 4 bits of address are
88 8 or 0xc) and we have had a not-taken conditional branch
89 to the address following the rfi. */
90 beqlr-
91#endif
4ee7084e 92#ifndef CONFIG_PTE_64BIT
14cf11af 93 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
4ee7084e
BB
94#else
95 rlwimi r8,r4,23,20,28 /* compute pte address */
96#endif
40bb0e90 97 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
14cf11af
PM
98 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
99
100 /*
101 * Update the linux PTE atomically. We do the lwarx up-front
102 * because almost always, there won't be a permission violation
103 * and there won't already be an HPTE, and thus we will have
104 * to update the PTE to set _PAGE_HASHPTE. -- paulus.
4ee7084e
BB
105 *
106 * If PTE_64BIT is set, the low word is the flags word; use that
107 * word for locking since it contains all the interesting bits.
14cf11af 108 */
4ee7084e
BB
109#if (PTE_FLAGS_OFFSET != 0)
110 addi r8,r8,PTE_FLAGS_OFFSET
111#endif
14cf11af 112retry:
4ee7084e 113 lwarx r6,0,r8 /* get linux-style pte, flag word */
14cf11af
PM
114 andc. r5,r3,r6 /* check access & ~permission */
115#ifdef CONFIG_SMP
116 bne- hash_page_out /* return if access not permitted */
117#else
118 bnelr-
119#endif
120 or r5,r0,r6 /* set accessed/dirty bits */
4ee7084e
BB
121#ifdef CONFIG_PTE_64BIT
122#ifdef CONFIG_SMP
123 subf r10,r6,r8 /* create false data dependency */
124 subi r10,r10,PTE_FLAGS_OFFSET
125 lwzx r10,r6,r10 /* Get upper PTE word */
126#else
127 lwz r10,-PTE_FLAGS_OFFSET(r8)
128#endif /* CONFIG_SMP */
129#endif /* CONFIG_PTE_64BIT */
14cf11af
PM
130 stwcx. r5,0,r8 /* attempt to update PTE */
131 bne- retry /* retry if someone got there first */
132
133 mfsrin r3,r4 /* get segment reg for segment */
232ca1ee 134#ifndef CONFIG_VMAP_STACK
14cf11af
PM
135 mfctr r0
136 stw r0,_CTR(r11)
232ca1ee 137#endif
14cf11af
PM
138 bl create_hpte /* add the hash table entry */
139
140#ifdef CONFIG_SMP
141 eieio
232ca1ee 142 lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
14cf11af 143 li r0,0
232ca1ee 144 stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
14cf11af
PM
145#endif
146
232ca1ee
CL
147#ifdef CONFIG_VMAP_STACK
148 b fast_hash_page_return
149#else
14cf11af
PM
150 /* Return from the exception */
151 lwz r5,_CTR(r11)
152 mtctr r5
153 lwz r0,GPR0(r11)
14cf11af
PM
154 lwz r8,GPR8(r11)
155 b fast_exception_return
232ca1ee 156#endif
14cf11af
PM
157
158#ifdef CONFIG_SMP
159hash_page_out:
160 eieio
232ca1ee 161 lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
14cf11af 162 li r0,0
232ca1ee 163 stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
14cf11af
PM
164 blr
165#endif /* CONFIG_SMP */
166
167/*
168 * Add an entry for a particular page to the hash table.
169 *
170 * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
171 *
172 * We assume any necessary modifications to the pte (e.g. setting
173 * the accessed bit) have already been done and that there is actually
174 * a hash table in use (i.e. we're not on a 603).
175 */
176_GLOBAL(add_hash_page)
177 mflr r0
178 stw r0,4(r1)
179
180 /* Convert context and va to VSID */
181 mulli r3,r3,897*16 /* multiply context by context skew */
182 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
183 mulli r0,r0,0x111 /* multiply by ESID skew */
184 add r3,r3,r0 /* note create_hpte trims to 24 bits */
185
186#ifdef CONFIG_SMP
f7354cca 187 lwz r8,TASK_CPU(r2) /* to go in mmu_hash_lock */
14cf11af
PM
188 oris r8,r8,12
189#endif /* CONFIG_SMP */
190
191 /*
192 * We disable interrupts here, even on UP, because we don't
193 * want to race with hash_page, and because we want the
194 * _PAGE_HASHPTE bit to be a reliable indication of whether
195 * the HPTE exists (or at least whether one did once).
196 * We also turn off the MMU for data accesses so that we
197 * we can't take a hash table miss (assuming the code is
198 * covered by a BAT). -- paulus
199 */
4ee7084e 200 mfmsr r9
14cf11af 201 SYNC
4ee7084e 202 rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */
14cf11af
PM
203 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
204 mtmsr r0
205 SYNC_601
206 isync
207
14cf11af 208#ifdef CONFIG_SMP
6790dae8
CL
209 lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha
210 addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
4ee7084e 21110: lwarx r0,0,r6 /* take the mmu_hash_lock */
14cf11af
PM
212 cmpi 0,r0,0
213 bne- 11f
4ee7084e 214 stwcx. r8,0,r6
14cf11af 215 beq+ 12f
4ee7084e 21611: lwz r0,0(r6)
14cf11af
PM
217 cmpi 0,r0,0
218 beq 10b
219 b 11b
22012: isync
221#endif
222
223 /*
224 * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
225 * If _PAGE_HASHPTE was already set, we don't replace the existing
226 * HPTE, so we just unlock and return.
227 */
228 mr r8,r5
4ee7084e 229#ifndef CONFIG_PTE_64BIT
14cf11af 230 rlwimi r8,r4,22,20,29
4ee7084e
BB
231#else
232 rlwimi r8,r4,23,20,28
233 addi r8,r8,PTE_FLAGS_OFFSET
234#endif
14cf11af
PM
2351: lwarx r6,0,r8
236 andi. r0,r6,_PAGE_HASHPTE
237 bne 9f /* if HASHPTE already set, done */
4ee7084e
BB
238#ifdef CONFIG_PTE_64BIT
239#ifdef CONFIG_SMP
240 subf r10,r6,r8 /* create false data dependency */
241 subi r10,r10,PTE_FLAGS_OFFSET
242 lwzx r10,r6,r10 /* Get upper PTE word */
243#else
244 lwz r10,-PTE_FLAGS_OFFSET(r8)
245#endif /* CONFIG_SMP */
246#endif /* CONFIG_PTE_64BIT */
14cf11af
PM
247 ori r5,r6,_PAGE_HASHPTE
248 stwcx. r5,0,r8
249 bne- 1b
250
251 bl create_hpte
252
2539:
254#ifdef CONFIG_SMP
6790dae8
CL
255 lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha
256 addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
14cf11af
PM
257 eieio
258 li r0,0
4ee7084e 259 stw r0,0(r6) /* clear mmu_hash_lock */
14cf11af
PM
260#endif
261
262 /* reenable interrupts and DR */
4ee7084e 263 mtmsr r9
14cf11af
PM
264 SYNC_601
265 isync
266
267 lwz r0,4(r1)
268 mtlr r0
269 blr
270
271/*
272 * This routine adds a hardware PTE to the hash table.
273 * It is designed to be called with the MMU either on or off.
274 * r3 contains the VSID, r4 contains the virtual address,
275 * r5 contains the linux PTE, r6 contains the old value of the
6790dae8
CL
276 * linux PTE (before setting _PAGE_HASHPTE). r10 contains the
277 * upper half of the PTE if CONFIG_PTE_64BIT.
14cf11af
PM
278 * On SMP, the caller should have the mmu_hash_lock held.
279 * We assume that the caller has (or will) set the _PAGE_HASHPTE
280 * bit in the linux PTE in memory. The value passed in r6 should
281 * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
282 * this routine will skip the search for an existing HPTE.
283 * This procedure modifies r0, r3 - r6, r8, cr0.
284 * -- paulus.
285 *
286 * For speed, 4 of the instructions get patched once the size and
287 * physical address of the hash table are known. These definitions
288 * of Hash_base and Hash_bits below are just an example.
289 */
290Hash_base = 0xc0180000
291Hash_bits = 12 /* e.g. 256kB hash table */
292Hash_msk = (((1 << Hash_bits) - 1) * 64)
293
14cf11af 294/* defines for the PTE format for 32-bit PPCs */
aaf4a9b0 295#define HPTE_SIZE 8
14cf11af
PM
296#define PTEG_SIZE 64
297#define LG_PTEG_SIZE 6
298#define LDPTEu lwzu
ee4f2ea4 299#define LDPTE lwz
14cf11af
PM
300#define STPTE stw
301#define CMPPTE cmpw
302#define PTE_H 0x40
303#define PTE_V 0x80000000
304#define TST_V(r) rlwinm. r,r,0,0,0
305#define SET_V(r) oris r,r,PTE_V@h
306#define CLR_V(r,t) rlwinm r,r,0,1,31
307
14cf11af
PM
308#define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)
309#define HASH_RIGHT 31-LG_PTEG_SIZE
310
311_GLOBAL(create_hpte)
312 /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
40bb0e90 313 rlwinm r8,r5,32-9,30,30 /* _PAGE_RW -> PP msb */
f342adca 314 rlwinm r0,r5,32-6,30,30 /* _PAGE_DIRTY -> PP msb */
40bb0e90
CL
315 and r8,r8,r0 /* writable if _RW & _DIRTY */
316 rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
317 rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
f99fb8a2 318 ori r8,r8,0xe04 /* clear out reserved bits */
f342adca 319 andc r8,r5,r8 /* PP = user? (rw&dirty? 1: 3): 0 */
14cf11af 320BEGIN_FTR_SECTION
64b3d0e8
BH
321 rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
322END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
4ee7084e
BB
323#ifdef CONFIG_PTE_64BIT
324 /* Put the XPN bits into the PTE */
325 rlwimi r8,r10,8,20,22
326 rlwimi r8,r10,2,29,29
327#endif
14cf11af
PM
328
329 /* Construct the high word of the PPC-style PTE (r5) */
14cf11af
PM
330 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
331 rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
14cf11af
PM
332 SET_V(r5) /* set V (valid) bit */
333
9efc74ff
CL
334 patch_site 0f, patch__hash_page_A0
335 patch_site 1f, patch__hash_page_A1
336 patch_site 2f, patch__hash_page_A2
14cf11af 337 /* Get the address of the primary PTE group in the hash table (r3) */
232ca1ee 3380: lis r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
9efc74ff
CL
3391: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
3402: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
14cf11af
PM
341 xor r3,r3,r0 /* make primary hash */
342 li r0,8 /* PTEs/group */
343
344 /*
345 * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
346 * if it is clear, meaning that the HPTE isn't there already...
347 */
348 andi. r6,r6,_PAGE_HASHPTE
349 beq+ 10f /* no PTE: go look for an empty slot */
350 tlbie r4
351
232ca1ee
CL
352 lis r4, (htab_hash_searches - PAGE_OFFSET)@ha
353 lwz r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
14cf11af 354 addi r6,r6,1 /* count how many searches we do */
232ca1ee 355 stw r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
14cf11af
PM
356
357 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
358 mtctr r0
aaf4a9b0
BB
359 addi r4,r3,-HPTE_SIZE
3601: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
14cf11af
PM
361 CMPPTE 0,r6,r5
362 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
363 beq+ found_slot
364
9efc74ff 365 patch_site 0f, patch__hash_page_B
14cf11af
PM
366 /* Search the secondary PTEG for a matching PTE */
367 ori r5,r5,PTE_H /* set H (secondary hash) bit */
9efc74ff 3680: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
14cf11af 369 xori r4,r4,(-PTEG_SIZE & 0xffff)
aaf4a9b0 370 addi r4,r4,-HPTE_SIZE
14cf11af 371 mtctr r0
aaf4a9b0 3722: LDPTEu r6,HPTE_SIZE(r4)
14cf11af
PM
373 CMPPTE 0,r6,r5
374 bdnzf 2,2b
375 beq+ found_slot
376 xori r5,r5,PTE_H /* clear H bit again */
377
378 /* Search the primary PTEG for an empty slot */
37910: mtctr r0
aaf4a9b0
BB
380 addi r4,r3,-HPTE_SIZE /* search primary PTEG */
3811: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
14cf11af
PM
382 TST_V(r6) /* test valid bit */
383 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
384 beq+ found_empty
385
386 /* update counter of times that the primary PTEG is full */
232ca1ee
CL
387 lis r4, (primary_pteg_full - PAGE_OFFSET)@ha
388 lwz r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
14cf11af 389 addi r6,r6,1
232ca1ee 390 stw r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
14cf11af 391
9efc74ff 392 patch_site 0f, patch__hash_page_C
14cf11af
PM
393 /* Search the secondary PTEG for an empty slot */
394 ori r5,r5,PTE_H /* set H (secondary hash) bit */
9efc74ff 3950: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
14cf11af 396 xori r4,r4,(-PTEG_SIZE & 0xffff)
aaf4a9b0 397 addi r4,r4,-HPTE_SIZE
14cf11af 398 mtctr r0
aaf4a9b0 3992: LDPTEu r6,HPTE_SIZE(r4)
14cf11af
PM
400 TST_V(r6)
401 bdnzf 2,2b
402 beq+ found_empty
403 xori r5,r5,PTE_H /* clear H bit again */
404
405 /*
406 * Choose an arbitrary slot in the primary PTEG to overwrite.
407 * Since both the primary and secondary PTEGs are full, and we
408 * have no information that the PTEs in the primary PTEG are
409 * more important or useful than those in the secondary PTEG,
410 * and we know there is a definite (although small) speed
411 * advantage to putting the PTE in the primary PTEG, we always
412 * put the PTE in the primary PTEG.
ee4f2ea4
BH
413 *
414 * In addition, we skip any slot that is mapping kernel text in
415 * order to avoid a deadlock when not using BAT mappings if
416 * trying to hash in the kernel hash code itself after it has
417 * already taken the hash table lock. This works in conjunction
418 * with pre-faulting of the kernel text.
419 *
420 * If the hash table bucket is full of kernel text entries, we'll
421 * lockup here but that shouldn't happen
14cf11af 422 */
ee4f2ea4 423
232ca1ee
CL
4241: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */
425 lwz r6, (next_slot - PAGE_OFFSET)@l(r4)
aaf4a9b0
BB
426 addi r6,r6,HPTE_SIZE /* search for candidate */
427 andi. r6,r6,7*HPTE_SIZE
14cf11af
PM
428 stw r6,next_slot@l(r4)
429 add r4,r3,r6
aaf4a9b0 430 LDPTE r0,HPTE_SIZE/2(r4) /* get PTE second word */
ee4f2ea4
BH
431 clrrwi r0,r0,12
432 lis r6,etext@h
433 ori r6,r6,etext@l /* get etext */
434 tophys(r6,r6)
435 cmpl cr0,r0,r6 /* compare and try again */
436 blt 1b
14cf11af
PM
437
438#ifndef CONFIG_SMP
439 /* Store PTE in PTEG */
440found_empty:
441 STPTE r5,0(r4)
442found_slot:
aaf4a9b0 443 STPTE r8,HPTE_SIZE/2(r4)
14cf11af
PM
444
445#else /* CONFIG_SMP */
446/*
447 * Between the tlbie above and updating the hash table entry below,
448 * another CPU could read the hash table entry and put it in its TLB.
449 * There are 3 cases:
450 * 1. using an empty slot
451 * 2. updating an earlier entry to change permissions (i.e. enable write)
452 * 3. taking over the PTE for an unrelated address
453 *
454 * In each case it doesn't really matter if the other CPUs have the old
455 * PTE in their TLB. So we don't need to bother with another tlbie here,
456 * which is convenient as we've overwritten the register that had the
457 * address. :-) The tlbie above is mainly to make sure that this CPU comes
458 * and gets the new PTE from the hash table.
459 *
460 * We do however have to make sure that the PTE is never in an invalid
461 * state with the V bit set.
462 */
463found_empty:
464found_slot:
465 CLR_V(r5,r0) /* clear V (valid) bit in PTE */
466 STPTE r5,0(r4)
467 sync
468 TLBSYNC
aaf4a9b0 469 STPTE r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
14cf11af
PM
470 sync
471 SET_V(r5)
472 STPTE r5,0(r4) /* finally set V bit in PTE */
473#endif /* CONFIG_SMP */
474
475 sync /* make sure pte updates get to memory */
476 blr
477
991eb43a
KG
478 .section .bss
479 .align 2
480next_slot:
481 .space 4
482primary_pteg_full:
483 .space 4
484htab_hash_searches:
485 .space 4
486 .previous
14cf11af
PM
487
488/*
489 * Flush the entry for a particular page from the hash table.
490 *
491 * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
492 * int count)
493 *
494 * We assume that there is a hash table in use (Hash != 0).
495 */
496_GLOBAL(flush_hash_pages)
14cf11af
PM
497 /*
498 * We disable interrupts here, even on UP, because we want
499 * the _PAGE_HASHPTE bit to be a reliable indication of
500 * whether the HPTE exists (or at least whether one did once).
501 * We also turn off the MMU for data accesses so that we
502 * we can't take a hash table miss (assuming the code is
503 * covered by a BAT). -- paulus
504 */
505 mfmsr r10
506 SYNC
507 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
508 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
509 mtmsr r0
510 SYNC_601
511 isync
512
513 /* First find a PTE in the range that has _PAGE_HASHPTE set */
4ee7084e 514#ifndef CONFIG_PTE_64BIT
14cf11af 515 rlwimi r5,r4,22,20,29
4ee7084e
BB
516#else
517 rlwimi r5,r4,23,20,28
518#endif
5191: lwz r0,PTE_FLAGS_OFFSET(r5)
14cf11af
PM
520 cmpwi cr1,r6,1
521 andi. r0,r0,_PAGE_HASHPTE
522 bne 2f
523 ble cr1,19f
524 addi r4,r4,0x1000
4ee7084e 525 addi r5,r5,PTE_SIZE
14cf11af
PM
526 addi r6,r6,-1
527 b 1b
528
529 /* Convert context and va to VSID */
5302: mulli r3,r3,897*16 /* multiply context by context skew */
531 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
532 mulli r0,r0,0x111 /* multiply by ESID skew */
533 add r3,r3,r0 /* note code below trims to 24 bits */
534
535 /* Construct the high word of the PPC-style PTE (r11) */
14cf11af
PM
536 rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
537 rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */
14cf11af
PM
538 SET_V(r11) /* set V (valid) bit */
539
540#ifdef CONFIG_SMP
6790dae8
CL
541 lis r9, (mmu_hash_lock - PAGE_OFFSET)@ha
542 addi r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l
397d2300
CL
543 tophys (r8, r2)
544 lwz r8, TASK_CPU(r8)
14cf11af
PM
545 oris r8,r8,9
54610: lwarx r0,0,r9
547 cmpi 0,r0,0
548 bne- 11f
549 stwcx. r8,0,r9
550 beq+ 12f
55111: lwz r0,0(r9)
552 cmpi 0,r0,0
553 beq 10b
554 b 11b
55512: isync
556#endif
557
558 /*
559 * Check the _PAGE_HASHPTE bit in the linux PTE. If it is
560 * already clear, we're done (for this pte). If not,
561 * clear it (atomically) and proceed. -- paulus.
562 */
4ee7084e
BB
563#if (PTE_FLAGS_OFFSET != 0)
564 addi r5,r5,PTE_FLAGS_OFFSET
565#endif
56633: lwarx r8,0,r5 /* fetch the pte flags word */
14cf11af
PM
567 andi. r0,r8,_PAGE_HASHPTE
568 beq 8f /* done if HASHPTE is already clear */
40bb0e90 569 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
14cf11af
PM
570 stwcx. r8,0,r5 /* update the pte */
571 bne- 33b
572
9efc74ff
CL
573 patch_site 0f, patch__flush_hash_A0
574 patch_site 1f, patch__flush_hash_A1
575 patch_site 2f, patch__flush_hash_A2
14cf11af 576 /* Get the address of the primary PTE group in the hash table (r3) */
6790dae8 5770: lis r8, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
9efc74ff
CL
5781: rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
5792: rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
14cf11af
PM
580 xor r8,r0,r8 /* make primary hash */
581
582 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
583 li r0,8 /* PTEs/group */
584 mtctr r0
aaf4a9b0
BB
585 addi r12,r8,-HPTE_SIZE
5861: LDPTEu r0,HPTE_SIZE(r12) /* get next PTE */
14cf11af
PM
587 CMPPTE 0,r0,r11
588 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
589 beq+ 3f
590
9efc74ff 591 patch_site 0f, patch__flush_hash_B
14cf11af
PM
592 /* Search the secondary PTEG for a matching PTE */
593 ori r11,r11,PTE_H /* set H (secondary hash) bit */
594 li r0,8 /* PTEs/group */
9efc74ff 5950: xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
14cf11af 596 xori r12,r12,(-PTEG_SIZE & 0xffff)
aaf4a9b0 597 addi r12,r12,-HPTE_SIZE
14cf11af 598 mtctr r0
aaf4a9b0 5992: LDPTEu r0,HPTE_SIZE(r12)
14cf11af
PM
600 CMPPTE 0,r0,r11
601 bdnzf 2,2b
602 xori r11,r11,PTE_H /* clear H again */
603 bne- 4f /* should rarely fail to find it */
604
6053: li r0,0
606 STPTE r0,0(r12) /* invalidate entry */
6074: sync
608 tlbie r4 /* in hw tlb too */
609 sync
610
6118: ble cr1,9f /* if all ptes checked */
61281: addi r6,r6,-1
4ee7084e 613 addi r5,r5,PTE_SIZE
14cf11af
PM
614 addi r4,r4,0x1000
615 lwz r0,0(r5) /* check next pte */
616 cmpwi cr1,r6,1
617 andi. r0,r0,_PAGE_HASHPTE
618 bne 33b
619 bgt cr1,81b
620
6219:
622#ifdef CONFIG_SMP
623 TLBSYNC
624 li r0,0
625 stw r0,0(r9) /* clear mmu_hash_lock */
626#endif
627
62819: mtmsr r10
629 SYNC_601
630 isync
631 blr
c0622167 632EXPORT_SYMBOL(flush_hash_pages)
2a4aca11
BH
633
634/*
635 * Flush an entry from the TLB
636 */
637_GLOBAL(_tlbie)
638#ifdef CONFIG_SMP
f7354cca 639 lwz r8,TASK_CPU(r2)
2a4aca11
BH
640 oris r8,r8,11
641 mfmsr r10
642 SYNC
643 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
644 rlwinm r0,r0,0,28,26 /* clear DR */
645 mtmsr r0
646 SYNC_601
647 isync
648 lis r9,mmu_hash_lock@h
649 ori r9,r9,mmu_hash_lock@l
650 tophys(r9,r9)
65110: lwarx r7,0,r9
652 cmpwi 0,r7,0
653 bne- 10b
654 stwcx. r8,0,r9
655 bne- 10b
656 eieio
657 tlbie r3
658 sync
659 TLBSYNC
660 li r0,0
661 stw r0,0(r9) /* clear mmu_hash_lock */
662 mtmsr r10
663 SYNC_601
664 isync
665#else /* CONFIG_SMP */
666 tlbie r3
667 sync
668#endif /* CONFIG_SMP */
669 blr
670
671/*
672 * Flush the entire TLB. 603/603e only
673 */
674_GLOBAL(_tlbia)
675#if defined(CONFIG_SMP)
f7354cca 676 lwz r8,TASK_CPU(r2)
2a4aca11
BH
677 oris r8,r8,10
678 mfmsr r10
679 SYNC
680 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
681 rlwinm r0,r0,0,28,26 /* clear DR */
682 mtmsr r0
683 SYNC_601
684 isync
685 lis r9,mmu_hash_lock@h
686 ori r9,r9,mmu_hash_lock@l
687 tophys(r9,r9)
68810: lwarx r7,0,r9
689 cmpwi 0,r7,0
690 bne- 10b
691 stwcx. r8,0,r9
692 bne- 10b
e1347a02
CL
693#endif /* CONFIG_SMP */
694 li r5, 32
695 lis r4, KERNELBASE@h
696 mtctr r5
2a4aca11 697 sync
e1347a02
CL
6980: tlbie r4
699 addi r4, r4, 0x1000
700 bdnz 0b
2a4aca11 701 sync
e1347a02 702#ifdef CONFIG_SMP
2a4aca11
BH
703 TLBSYNC
704 li r0,0
705 stw r0,0(r9) /* clear mmu_hash_lock */
706 mtmsr r10
707 SYNC_601
708 isync
2a4aca11 709#endif /* CONFIG_SMP */
a1495359 710 blr