#include <linux/compiler.h>
#include <asm/addrspace.h>
+#include <asm/asm.h>
#include <asm/page.h>
#include <asm/pgtable-bits.h>
#endif
#if CONFIG_PGTABLE_LEVELS == 2
-#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
+#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
#elif CONFIG_PGTABLE_LEVELS == 3
-#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
+#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - PTRLOG))
#elif CONFIG_PGTABLE_LEVELS == 4
-#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
+#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - PTRLOG))
#define PUD_SIZE (1UL << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
-#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3))
+#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - PTRLOG))
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3))
+#ifdef CONFIG_32BIT
+#define VA_BITS 32
+#else
+#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - PTRLOG))
+#endif
-#define PTRS_PER_PGD (PAGE_SIZE >> 3)
+#define PTRS_PER_PGD (PAGE_SIZE >> PTRLOG)
#if CONFIG_PGTABLE_LEVELS > 3
-#define PTRS_PER_PUD (PAGE_SIZE >> 3)
+#define PTRS_PER_PUD (PAGE_SIZE >> PTRLOG)
#endif
#if CONFIG_PGTABLE_LEVELS > 2
-#define PTRS_PER_PMD (PAGE_SIZE >> 3)
+#define PTRS_PER_PMD (PAGE_SIZE >> PTRLOG)
#endif
-#define PTRS_PER_PTE (PAGE_SIZE >> 3)
+#define PTRS_PER_PTE (PAGE_SIZE >> PTRLOG)
+#ifdef CONFIG_32BIT
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+#else
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
+#endif
#ifndef __ASSEMBLER__
#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
-/*
- * TLB refill handlers may also map the vmalloc area into xkvrange.
- * Avoid the first couple of pages so NULL pointer dereferences will
- * still reliably trap.
- */
+#ifdef CONFIG_32BIT
+
+#define VMALLOC_START (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
+#define VMALLOC_END (FIXADDR_START - (2 * PAGE_SIZE))
+
+#endif
+
+#ifdef CONFIG_64BIT
+
#define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
#define MODULES_END (MODULES_VADDR + SZ_256M)
#define KFENCE_AREA_START (VMEMMAP_END + 1)
#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
+#endif
+
#define ptep_get(ptep) READ_ONCE(*(ptep))
#define pmdp_get(pmdp) READ_ONCE(*(pmdp))
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
* are !pte_none() && !pte_present().
*
- * Format of swap PTEs:
+ * Format of 32bit swap PTEs:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <------------ offset -------------> E <- type -> <-- zeroes -->
+ *
+ * E is the exclusive marker that is not stored in swap entries.
+ * The zero'ed bits include _PAGE_PRESENT.
+ *
+ * Format of 64bit swap PTEs:
*
* 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
* 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
* E is the exclusive marker that is not stored in swap entries.
* The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE.
*/
+
+#define __SWP_TYPE_BITS (IS_ENABLED(CONFIG_32BIT) ? 5 : 7)
+#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
+#define __SWP_TYPE_SHIFT (IS_ENABLED(CONFIG_32BIT) ? 8 : 16)
+#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT + 1)
+
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
-{ pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; }
+{
+ pte_t pte;
+ pte_val(pte) = ((type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | (offset << __SWP_OFFSET_SHIFT);
+ return pte;
+}
-#define __swp_type(x) (((x).val >> 16) & 0x7f)
-#define __swp_offset(x) ((x).val >> 24)
+#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
+#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+
#define __swp_entry_to_pte(x) __pte((x).val)
-#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
#define __swp_entry_to_pmd(x) __pmd((x).val | _PAGE_HUGE)
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
static inline bool pte_swp_exclusive(pte_t pte)
{
.align 5
SYM_FUNC_START(clear_page)
- lu12i.w t0, 1 << (PAGE_SHIFT - 12)
- add.d t0, t0, a0
+ lu12i.w t0, 1 << (PAGE_SHIFT - 12)
+ PTR_ADD t0, t0, a0
1:
- st.d zero, a0, 0
- st.d zero, a0, 8
- st.d zero, a0, 16
- st.d zero, a0, 24
- st.d zero, a0, 32
- st.d zero, a0, 40
- st.d zero, a0, 48
- st.d zero, a0, 56
- addi.d a0, a0, 128
- st.d zero, a0, -64
- st.d zero, a0, -56
- st.d zero, a0, -48
- st.d zero, a0, -40
- st.d zero, a0, -32
- st.d zero, a0, -24
- st.d zero, a0, -16
- st.d zero, a0, -8
- bne t0, a0, 1b
+ LONG_S zero, a0, (LONGSIZE * 0)
+ LONG_S zero, a0, (LONGSIZE * 1)
+ LONG_S zero, a0, (LONGSIZE * 2)
+ LONG_S zero, a0, (LONGSIZE * 3)
+ LONG_S zero, a0, (LONGSIZE * 4)
+ LONG_S zero, a0, (LONGSIZE * 5)
+ LONG_S zero, a0, (LONGSIZE * 6)
+ LONG_S zero, a0, (LONGSIZE * 7)
+ PTR_ADDI a0, a0, (LONGSIZE * 16)
+ LONG_S zero, a0, -(LONGSIZE * 8)
+ LONG_S zero, a0, -(LONGSIZE * 7)
+ LONG_S zero, a0, -(LONGSIZE * 6)
+ LONG_S zero, a0, -(LONGSIZE * 5)
+ LONG_S zero, a0, -(LONGSIZE * 4)
+ LONG_S zero, a0, -(LONGSIZE * 3)
+ LONG_S zero, a0, -(LONGSIZE * 2)
+ LONG_S zero, a0, -(LONGSIZE * 1)
+ bne t0, a0, 1b
- jr ra
+ jr ra
SYM_FUNC_END(clear_page)
EXPORT_SYMBOL(clear_page)
.align 5
SYM_FUNC_START(copy_page)
- lu12i.w t8, 1 << (PAGE_SHIFT - 12)
- add.d t8, t8, a0
+ lu12i.w t8, 1 << (PAGE_SHIFT - 12)
+ PTR_ADD t8, t8, a0
1:
- ld.d t0, a1, 0
- ld.d t1, a1, 8
- ld.d t2, a1, 16
- ld.d t3, a1, 24
- ld.d t4, a1, 32
- ld.d t5, a1, 40
- ld.d t6, a1, 48
- ld.d t7, a1, 56
+ LONG_L t0, a1, (LONGSIZE * 0)
+ LONG_L t1, a1, (LONGSIZE * 1)
+ LONG_L t2, a1, (LONGSIZE * 2)
+ LONG_L t3, a1, (LONGSIZE * 3)
+ LONG_L t4, a1, (LONGSIZE * 4)
+ LONG_L t5, a1, (LONGSIZE * 5)
+ LONG_L t6, a1, (LONGSIZE * 6)
+ LONG_L t7, a1, (LONGSIZE * 7)
- st.d t0, a0, 0
- st.d t1, a0, 8
- ld.d t0, a1, 64
- ld.d t1, a1, 72
- st.d t2, a0, 16
- st.d t3, a0, 24
- ld.d t2, a1, 80
- ld.d t3, a1, 88
- st.d t4, a0, 32
- st.d t5, a0, 40
- ld.d t4, a1, 96
- ld.d t5, a1, 104
- st.d t6, a0, 48
- st.d t7, a0, 56
- ld.d t6, a1, 112
- ld.d t7, a1, 120
- addi.d a0, a0, 128
- addi.d a1, a1, 128
+ LONG_S t0, a0, (LONGSIZE * 0)
+ LONG_S t1, a0, (LONGSIZE * 1)
+ LONG_L t0, a1, (LONGSIZE * 8)
+ LONG_L t1, a1, (LONGSIZE * 9)
+ LONG_S t2, a0, (LONGSIZE * 2)
+ LONG_S t3, a0, (LONGSIZE * 3)
+ LONG_L t2, a1, (LONGSIZE * 10)
+ LONG_L t3, a1, (LONGSIZE * 11)
+ LONG_S t4, a0, (LONGSIZE * 4)
+ LONG_S t5, a0, (LONGSIZE * 5)
+ LONG_L t4, a1, (LONGSIZE * 12)
+ LONG_L t5, a1, (LONGSIZE * 13)
+ LONG_S t6, a0, (LONGSIZE * 6)
+ LONG_S t7, a0, (LONGSIZE * 7)
+ LONG_L t6, a1, (LONGSIZE * 14)
+ LONG_L t7, a1, (LONGSIZE * 15)
+ PTR_ADDI a0, a0, (LONGSIZE * 16)
+ PTR_ADDI a1, a1, (LONGSIZE * 16)
- st.d t0, a0, -64
- st.d t1, a0, -56
- st.d t2, a0, -48
- st.d t3, a0, -40
- st.d t4, a0, -32
- st.d t5, a0, -24
- st.d t6, a0, -16
- st.d t7, a0, -8
+ LONG_S t0, a0, -(LONGSIZE * 8)
+ LONG_S t1, a0, -(LONGSIZE * 7)
+ LONG_S t2, a0, -(LONGSIZE * 6)
+ LONG_S t3, a0, -(LONGSIZE * 5)
+ LONG_S t4, a0, -(LONGSIZE * 4)
+ LONG_S t5, a0, -(LONGSIZE * 3)
+ LONG_S t6, a0, -(LONGSIZE * 2)
+ LONG_S t7, a0, -(LONGSIZE * 1)
- bne t8, a0, 1b
- jr ra
+ bne t8, a0, 1b
+ jr ra
SYM_FUNC_END(copy_page)
EXPORT_SYMBOL(copy_page)
#define INVTLB_ADDR_GFALSE_AND_ASID 5
-#define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3)
-#define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3)
-#define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3)
-#define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3)
+#define PTRS_PER_PGD_BITS (PAGE_SHIFT - PTRLOG)
+#define PTRS_PER_PUD_BITS (PAGE_SHIFT - PTRLOG)
+#define PTRS_PER_PMD_BITS (PAGE_SHIFT - PTRLOG)
+#define PTRS_PER_PTE_BITS (PAGE_SHIFT - PTRLOG)
+
+#ifdef CONFIG_32BIT
+#define PTE_LL ll.w
+#define PTE_SC sc.w
+#else
+#define PTE_LL ll.d
+#define PTE_SC sc.d
+#endif
.macro tlb_do_page_fault, write
SYM_CODE_START(tlb_do_page_fault_\write)
vmalloc_done_load:
/* Get PGD offset in bytes */
- bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
- alsl.d t1, ra, t1, 3
+#ifdef CONFIG_32BIT
+ PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT
+#else
+ PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
+#endif
+ PTR_ALSL t1, ra, t1, _PGD_T_LOG2
+
#if CONFIG_PGTABLE_LEVELS > 3
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
+
#endif
#if CONFIG_PGTABLE_LEVELS > 2
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
+
#endif
- ld.d ra, t1, 0
+ PTR_L ra, t1, 0
/*
* For huge tlb entries, pmde doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
- rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
+ PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1
bltz ra, tlb_huge_update_load
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
- bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
- alsl.d t1, t0, ra, _PTE_T_LOG2
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
+ PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
+ PTR_ALSL t1, t0, ra, _PTE_T_LOG2
#ifdef CONFIG_SMP
smp_pgtable_change_load:
- ll.d t0, t1, 0
+ PTE_LL t0, t1, 0
#else
- ld.d t0, t1, 0
+ PTR_L t0, t1, 0
#endif
andi ra, t0, _PAGE_PRESENT
beqz ra, nopage_tlb_load
ori t0, t0, _PAGE_VALID
+
#ifdef CONFIG_SMP
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, smp_pgtable_change_load
#else
- st.d t0, t1, 0
+ PTR_S t0, t1, 0
#endif
+
tlbsrch
- bstrins.d t1, zero, 3, 3
- ld.d t0, t1, 0
- ld.d t1, t1, 8
+ PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2
+ PTR_L t0, t1, 0
+ PTR_L t1, t1, _PTE_T_SIZE
csrwr t0, LOONGARCH_CSR_TLBELO0
csrwr t1, LOONGARCH_CSR_TLBELO1
tlbwr
csrrd ra, EXCEPTION_KS2
ertn
-#ifdef CONFIG_64BIT
vmalloc_load:
la_abs t1, swapper_pg_dir
b vmalloc_done_load
-#endif
/* This is the entry point of a huge page. */
tlb_huge_update_load:
#ifdef CONFIG_SMP
- ll.d ra, t1, 0
+ PTE_LL ra, t1, 0
#else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
#endif
andi t0, ra, _PAGE_PRESENT
beqz t0, nopage_tlb_load
#ifdef CONFIG_SMP
ori t0, ra, _PAGE_VALID
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, tlb_huge_update_load
ori t0, ra, _PAGE_VALID
#else
ori t0, ra, _PAGE_VALID
- st.d t0, t1, 0
+ PTR_S t0, t1, 0
#endif
csrrd ra, LOONGARCH_CSR_ASID
csrrd t1, LOONGARCH_CSR_BADV
xori t0, t0, _PAGE_HUGE
lu12i.w t1, _PAGE_HGLOBAL >> 12
and t1, t0, t1
- srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+ PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
or t0, t0, t1
move ra, t0
csrwr ra, LOONGARCH_CSR_TLBELO0
/* Convert to entrylo1 */
- addi.d t1, zero, 1
- slli.d t1, t1, (HPAGE_SHIFT - 1)
- add.d t0, t0, t1
+ PTR_ADDI t1, zero, 1
+ PTR_SLLI t1, t1, (HPAGE_SHIFT - 1)
+ PTR_ADD t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
csrrd t0, EXCEPTION_KS0
vmalloc_done_store:
/* Get PGD offset in bytes */
- bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
- alsl.d t1, ra, t1, 3
+#ifdef CONFIG_32BIT
+ PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT
+#else
+ PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
+#endif
+ PTR_ALSL t1, ra, t1, _PGD_T_LOG2
+
#if CONFIG_PGTABLE_LEVELS > 3
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
#if CONFIG_PGTABLE_LEVELS > 2
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
- ld.d ra, t1, 0
+ PTR_L ra, t1, 0
/*
* For huge tlb entries, pmde doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
- rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
+ PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1
bltz ra, tlb_huge_update_store
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
- bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
- alsl.d t1, t0, ra, _PTE_T_LOG2
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
+ PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
+ PTR_ALSL t1, t0, ra, _PTE_T_LOG2
#ifdef CONFIG_SMP
smp_pgtable_change_store:
- ll.d t0, t1, 0
+ PTE_LL t0, t1, 0
#else
- ld.d t0, t1, 0
+ PTR_L t0, t1, 0
#endif
+
+#ifdef CONFIG_64BIT
andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE
xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE
+#else
+ PTR_LI ra, _PAGE_PRESENT | _PAGE_WRITE
+ and ra, ra, t0
+ nor ra, ra, zero
+#endif
bnez ra, nopage_tlb_store
+#ifdef CONFIG_64BIT
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+#else
+ PTR_LI ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+ or t0, ra, t0
+#endif
+
#ifdef CONFIG_SMP
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, smp_pgtable_change_store
#else
- st.d t0, t1, 0
+ PTR_S t0, t1, 0
#endif
tlbsrch
- bstrins.d t1, zero, 3, 3
- ld.d t0, t1, 0
- ld.d t1, t1, 8
+ PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2
+ PTR_L t0, t1, 0
+ PTR_L t1, t1, _PTE_T_SIZE
csrwr t0, LOONGARCH_CSR_TLBELO0
csrwr t1, LOONGARCH_CSR_TLBELO1
tlbwr
csrrd ra, EXCEPTION_KS2
ertn
-#ifdef CONFIG_64BIT
vmalloc_store:
la_abs t1, swapper_pg_dir
b vmalloc_done_store
-#endif
/* This is the entry point of a huge page. */
tlb_huge_update_store:
#ifdef CONFIG_SMP
- ll.d ra, t1, 0
+ PTE_LL ra, t1, 0
#else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
#endif
+
+#ifdef CONFIG_64BIT
andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE
xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE
+#else
+ PTR_LI t0, _PAGE_PRESENT | _PAGE_WRITE
+ and t0, t0, ra
+ nor t0, t0, zero
+#endif
+
bnez t0, nopage_tlb_store
#ifdef CONFIG_SMP
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, tlb_huge_update_store
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#else
+#ifdef CONFIG_64BIT
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- st.d t0, t1, 0
+#else
+ PTR_LI t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+ or t0, ra, t0
+#endif
+ PTR_S t0, t1, 0
#endif
csrrd ra, LOONGARCH_CSR_ASID
csrrd t1, LOONGARCH_CSR_BADV
xori t0, t0, _PAGE_HUGE
lu12i.w t1, _PAGE_HGLOBAL >> 12
and t1, t0, t1
- srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+ PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
or t0, t0, t1
move ra, t0
csrwr ra, LOONGARCH_CSR_TLBELO0
/* Convert to entrylo1 */
- addi.d t1, zero, 1
- slli.d t1, t1, (HPAGE_SHIFT - 1)
- add.d t0, t0, t1
+ PTR_ADDI t1, zero, 1
+ PTR_SLLI t1, t1, (HPAGE_SHIFT - 1)
+ PTR_ADD t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill
/* Reset default page size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
csrrd t0, EXCEPTION_KS0
vmalloc_done_modify:
/* Get PGD offset in bytes */
- bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
- alsl.d t1, ra, t1, 3
+#ifdef CONFIG_32BIT
+ PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT
+#else
+ PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
+#endif
+ PTR_ALSL t1, ra, t1, _PGD_T_LOG2
+
#if CONFIG_PGTABLE_LEVELS > 3
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
#if CONFIG_PGTABLE_LEVELS > 2
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
- alsl.d t1, ra, t1, 3
+ PTR_L t1, t1, 0
+ PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
+ PTR_ALSL t1, ra, t1, _PMD_T_LOG2
#endif
- ld.d ra, t1, 0
+ PTR_L ra, t1, 0
/*
* For huge tlb entries, pmde doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
- rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
+ PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1
bltz ra, tlb_huge_update_modify
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
- bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
- alsl.d t1, t0, ra, _PTE_T_LOG2
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
+ PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
+ PTR_ALSL t1, t0, ra, _PTE_T_LOG2
#ifdef CONFIG_SMP
smp_pgtable_change_modify:
- ll.d t0, t1, 0
+ PTE_LL t0, t1, 0
#else
- ld.d t0, t1, 0
+ PTR_L t0, t1, 0
#endif
+#ifdef CONFIG_64BIT
andi ra, t0, _PAGE_WRITE
+#else
+ PTR_LI ra, _PAGE_WRITE
+ and ra, t0, ra
+#endif
+
beqz ra, nopage_tlb_modify
+#ifdef CONFIG_64BIT
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+#else
+ PTR_LI ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+ or t0, ra, t0
+#endif
+
#ifdef CONFIG_SMP
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, smp_pgtable_change_modify
#else
- st.d t0, t1, 0
+ PTR_S t0, t1, 0
#endif
tlbsrch
- bstrins.d t1, zero, 3, 3
- ld.d t0, t1, 0
- ld.d t1, t1, 8
+ PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2
+ PTR_L t0, t1, 0
+ PTR_L t1, t1, _PTE_T_SIZE
csrwr t0, LOONGARCH_CSR_TLBELO0
csrwr t1, LOONGARCH_CSR_TLBELO1
tlbwr
csrrd ra, EXCEPTION_KS2
ertn
-#ifdef CONFIG_64BIT
vmalloc_modify:
la_abs t1, swapper_pg_dir
b vmalloc_done_modify
-#endif
/* This is the entry point of a huge page. */
tlb_huge_update_modify:
#ifdef CONFIG_SMP
- ll.d ra, t1, 0
+ PTE_LL ra, t1, 0
#else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
+ PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1)
#endif
+
+#ifdef CONFIG_64BIT
andi t0, ra, _PAGE_WRITE
+#else
+ PTR_LI t0, _PAGE_WRITE
+ and t0, ra, t0
+#endif
+
beqz t0, nopage_tlb_modify
#ifdef CONFIG_SMP
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- sc.d t0, t1, 0
+ PTE_SC t0, t1, 0
beqz t0, tlb_huge_update_modify
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#else
+#ifdef CONFIG_64BIT
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- st.d t0, t1, 0
+#else
+ PTR_LI t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+ or t0, ra, t0
+#endif
+ PTR_S t0, t1, 0
#endif
csrrd ra, LOONGARCH_CSR_ASID
csrrd t1, LOONGARCH_CSR_BADV
xori t0, t0, _PAGE_HUGE
lu12i.w t1, _PAGE_HGLOBAL >> 12
and t1, t0, t1
- srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+ PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
or t0, t0, t1
move ra, t0
csrwr ra, LOONGARCH_CSR_TLBELO0
/* Convert to entrylo1 */
- addi.d t1, zero, 1
- slli.d t1, t1, (HPAGE_SHIFT - 1)
- add.d t0, t0, t1
+ PTR_ADDI t1, zero, 1
+ PTR_SLLI t1, t1, (HPAGE_SHIFT - 1)
+ PTR_ADD t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill
/* Reset default page size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16
+ PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
csrrd t0, EXCEPTION_KS0
jr t0
SYM_CODE_END(handle_tlb_modify_ptw)
+#ifdef CONFIG_32BIT
+SYM_CODE_START(handle_tlb_refill)
+ UNWIND_HINT_UNDEFINED
+ csrwr t0, EXCEPTION_KS0
+ csrwr t1, EXCEPTION_KS1
+ csrwr ra, EXCEPTION_KS2
+ li.w ra, 0x1fffffff
+
+ csrrd t0, LOONGARCH_CSR_PGD
+ csrrd t1, LOONGARCH_CSR_TLBRBADV
+ srli.w t1, t1, PGDIR_SHIFT
+ slli.w t1, t1, 0x2
+ add.w t0, t0, t1
+ and t0, t0, ra
+
+ ld.w t0, t0, 0
+ csrrd t1, LOONGARCH_CSR_TLBRBADV
+ slli.w t1, t1, (32 - PGDIR_SHIFT)
+ srli.w t1, t1, (32 - PGDIR_SHIFT + PAGE_SHIFT + 1)
+ slli.w t1, t1, (0x2 + 1)
+ add.w t0, t0, t1
+ and t0, t0, ra
+
+ ld.w t1, t0, 0x0
+ csrwr t1, LOONGARCH_CSR_TLBRELO0
+
+ ld.w t1, t0, 0x4
+ csrwr t1, LOONGARCH_CSR_TLBRELO1
+
+ tlbfill
+ csrrd t0, EXCEPTION_KS0
+ csrrd t1, EXCEPTION_KS1
+ csrrd ra, EXCEPTION_KS2
+ ertn
+SYM_CODE_END(handle_tlb_refill)
+#endif
+
+#ifdef CONFIG_64BIT
SYM_CODE_START(handle_tlb_refill)
UNWIND_HINT_UNDEFINED
csrwr t0, LOONGARCH_CSR_TLBRSAVE
csrrd t0, LOONGARCH_CSR_TLBRSAVE
ertn
SYM_CODE_END(handle_tlb_refill)
+#endif