struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{
- asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
- : : : : svvptc);
+ /*
+ * Svvptc guarantees that the new valid pte will be visible within
+ * a bounded timeframe, so when the uarch does not cache invalid
+ * entries, we don't have to do anything.
+ */
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SVVPTC))
+ return;
/*
* The kernel assumes that TLBs don't cache invalid entries, but
while (nr--)
local_flush_tlb_page(address + nr * PAGE_SIZE);
-svvptc:;
- /*
- * Svvptc guarantees that the new valid pte will be visible within
- * a bounded timeframe, so when the uarch does not cache invalid
- * entries, we don't have to do anything.
- */
}
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
- asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
- : : : : svvptc);
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SVVPTC)) {
+ if (!pte_same(ptep_get(ptep), entry)) {
+ __set_pte_at(vma->vm_mm, ptep, entry);
+ /* Here only not svadu is impacted */
+ flush_tlb_page(vma, address);
+ return true;
+ }
+
+ return false;
+ }
if (!pte_same(ptep_get(ptep), entry))
__set_pte_at(vma->vm_mm, ptep, entry);
* the case that the PTE changed and the spurious fault case.
*/
return true;
-
-svvptc:
- if (!pte_same(ptep_get(ptep), entry)) {
- __set_pte_at(vma->vm_mm, ptep, entry);
- /* Here only not svadu is impacted */
- flush_tlb_page(vma, address);
- return true;
- }
-
- return false;
}
int ptep_test_and_clear_young(struct vm_area_struct *vma,