]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
riscv: pgtable: Use riscv_has_extension_unlikely
authorVivian Wang <wangruikang@iscas.ac.cn>
Tue, 18 Nov 2025 04:19:21 +0000 (21:19 -0700)
committerPaul Walmsley <pjw@kernel.org>
Wed, 19 Nov 2025 16:19:27 +0000 (09:19 -0700)
Use riscv_has_extension_unlikely() to check for RISCV_ISA_EXT_SVVPTC,
replacing the use of asm goto with ALTERNATIVE.

The "unlikely" variant is used to match the behavior of the original
implementation using ALTERNATIVE("nop", "j %l[svvptc]", ...).

Note that this makes the check for RISCV_ISA_EXT_SVVPTC a runtime one if
RISCV_ALTERNATIVE=n, but it should still be worthwhile to do so given
that TLB flushes are relatively slow.

Signed-off-by: Vivian Wang <wangruikang@iscas.ac.cn>
Link: https://patch.msgid.link/20251020-riscv-altn-helper-wip-v4-1-ef941c87669a@iscas.ac.cn
Signed-off-by: Paul Walmsley <pjw@kernel.org>
arch/riscv/include/asm/pgtable.h
arch/riscv/mm/pgtable.c

index 5a08eb5fe99fc4ad25423000cabc95d9cbecb48b..45b2021eb2c1e3679bc7ed145ab63705b5a654f0 100644 (file)
@@ -496,8 +496,13 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
                struct vm_area_struct *vma, unsigned long address,
                pte_t *ptep, unsigned int nr)
 {
-       asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
-                : : : : svvptc);
+       /*
+        * Svvptc guarantees that the new valid pte will be visible within
+        * a bounded timeframe, so when the uarch does not cache invalid
+        * entries, we don't have to do anything.
+        */
+       if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SVVPTC))
+               return;
 
        /*
         * The kernel assumes that TLBs don't cache invalid entries, but
@@ -509,12 +514,6 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
        while (nr--)
                local_flush_tlb_page(address + nr * PAGE_SIZE);
 
-svvptc:;
-       /*
-        * Svvptc guarantees that the new valid pte will be visible within
-        * a bounded timeframe, so when the uarch does not cache invalid
-        * entries, we don't have to do anything.
-        */
 }
 #define update_mmu_cache(vma, addr, ptep) \
        update_mmu_cache_range(NULL, vma, addr, ptep, 1)
index 8b6c0a112a8db4e91de54c3bd3bd527a605a6197..807c0a0de18275b1a33bcd41d7dce27ee7de0662 100644 (file)
@@ -9,8 +9,16 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
                          unsigned long address, pte_t *ptep,
                          pte_t entry, int dirty)
 {
-       asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
-                : : : : svvptc);
+       if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SVVPTC)) {
+               if (!pte_same(ptep_get(ptep), entry)) {
+                       __set_pte_at(vma->vm_mm, ptep, entry);
+                       /* Here only not svadu is impacted */
+                       flush_tlb_page(vma, address);
+                       return true;
+               }
+
+               return false;
+       }
 
        if (!pte_same(ptep_get(ptep), entry))
                __set_pte_at(vma->vm_mm, ptep, entry);
@@ -19,16 +27,6 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
         * the case that the PTE changed and the spurious fault case.
         */
        return true;
-
-svvptc:
-       if (!pte_same(ptep_get(ptep), entry)) {
-               __set_pte_at(vma->vm_mm, ptep, entry);
-               /* Here only not svadu is impacted */
-               flush_tlb_page(vma, address);
-               return true;
-       }
-
-       return false;
 }
 
 int ptep_test_and_clear_young(struct vm_area_struct *vma,