]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
arm64: Turn the MMU-on sequence into a macro
authorMarc Zyngier <maz@kernel.org>
Mon, 8 Feb 2021 09:57:12 +0000 (09:57 +0000)
committerWill Deacon <will@kernel.org>
Mon, 8 Feb 2021 12:51:26 +0000 (12:51 +0000)
Turning the MMU on is a popular sport in the arm64 kernel, and
we do it more than once, or even twice. As we are about to add
even more, let's turn it into a macro.

No expected functional change.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: David Brazdil <dbrazdil@google.com>
Link: https://lore.kernel.org/r/20210208095732.3267263-4-maz@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/assembler.h
arch/arm64/kernel/head.S
arch/arm64/mm/proc.S

index bf125c59111688840bf3f3e84fca12b74fb1098b..8cded93f99c38dff3d23a7f11ab22093655c406b 100644 (file)
@@ -675,6 +675,23 @@ USER(\label, ic    ivau, \tmp2)                    // invalidate I line PoU
        .endif
        .endm
 
+/*
+ * Set SCTLR_EL1 to the passed value, and invalidate the local icache
+ * in the process. This is called when setting the MMU on.
+ */
+.macro set_sctlr_el1, reg
+       msr     sctlr_el1, \reg
+       isb
+       /*
+        * Invalidate the local I-cache so that any instructions fetched
+        * speculatively from the PoC are discarded, since they may have
+        * been dynamically patched at the PoU.
+        */
+       ic      iallu
+       dsb     nsh
+       isb
+.endm
+
 /*
  * Check whether to yield to another runnable task from kernel mode NEON code
  * (which runs with preemption disabled).
index a0dc987724eda8e3d50f9fb1440625e3e9d881eb..28e9735302dffd6f24bf1efdf5a2946b97c81d75 100644 (file)
@@ -703,16 +703,9 @@ SYM_FUNC_START(__enable_mmu)
        offset_ttbr1 x1, x3
        msr     ttbr1_el1, x1                   // load TTBR1
        isb
-       msr     sctlr_el1, x0
-       isb
-       /*
-        * Invalidate the local I-cache so that any instructions fetched
-        * speculatively from the PoC are discarded, since they may have
-        * been dynamically patched at the PoU.
-        */
-       ic      iallu
-       dsb     nsh
-       isb
+
+       set_sctlr_el1   x0
+
        ret
 SYM_FUNC_END(__enable_mmu)
 
@@ -883,11 +876,7 @@ SYM_FUNC_START_LOCAL(__primary_switch)
        tlbi    vmalle1                         // Remove any stale TLB entries
        dsb     nsh
 
-       msr     sctlr_el1, x19                  // re-enable the MMU
-       isb
-       ic      iallu                           // flush instructions fetched
-       dsb     nsh                             // via old mapping
-       isb
+       set_sctlr_el1   x19                     // re-enable the MMU
 
        bl      __relocate_kernel
 #endif
index ece785477bdc5b7cc1439227baf0670b50f4884b..c967bfd30d2b545f9836dad23cec8a83bdad764d 100644 (file)
@@ -291,17 +291,7 @@ skip_pgd:
        /* We're done: fire up the MMU again */
        mrs     x17, sctlr_el1
        orr     x17, x17, #SCTLR_ELx_M
-       msr     sctlr_el1, x17
-       isb
-
-       /*
-        * Invalidate the local I-cache so that any instructions fetched
-        * speculatively from the PoC are discarded, since they may have
-        * been dynamically patched at the PoU.
-        */
-       ic      iallu
-       dsb     nsh
-       isb
+       set_sctlr_el1   x17
 
        /* Set the flag to zero to indicate that we're all done */
        str     wzr, [flag_ptr]