]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
arm64: context switch POR_EL0 register
authorJoey Gouly <joey.gouly@arm.com>
Thu, 22 Aug 2024 15:10:49 +0000 (16:10 +0100)
committerWill Deacon <will@kernel.org>
Wed, 4 Sep 2024 11:52:18 +0000 (12:52 +0100)
POR_EL0 is a register that can be modified by userspace directly,
so it must be context switched.

Signed-off-by: Joey Gouly <joey.gouly@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20240822151113.1479789-7-joey.gouly@arm.com
[will: Dropped unnecessary isb()s]
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/process.c

index 5584342672715644e0d65c5997ddcf61369cd86c..3d261cc123c1e22ac7bc9cfcde463624c76b2084 100644 (file)
@@ -832,6 +832,12 @@ static inline bool system_supports_lpa2(void)
        return cpus_have_final_cap(ARM64_HAS_LPA2);
 }
 
+static inline bool system_supports_poe(void)
+{
+       return IS_ENABLED(CONFIG_ARM64_POE) &&
+               alternative_has_cap_unlikely(ARM64_HAS_S1POE);
+}
+
 int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
 bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
 
index f77371232d8c6d542c7df057feea2e21752f34f2..e6376f97927321dff3a16c3af1fbfc9c9039444a 100644 (file)
@@ -184,6 +184,7 @@ struct thread_struct {
        u64                     sctlr_user;
        u64                     svcr;
        u64                     tpidr2_el0;
+       u64                     por_el0;
 };
 
 static inline unsigned int thread_get_vl(struct thread_struct *thread,
index 4a9ea103817e896f9c0f74d2f4285fb0915c8835..494e9efd856f738d86e550585b6b7edab2ddf579 100644 (file)
 #define POE_RXW                UL(0x7)
 #define POE_MASK       UL(0xf)
 
+/* Initial value for Permission Overlay Extension for EL0 */
+#define POR_EL0_INIT   POE_RXW
+
 #define ARM64_FEATURE_FIELD_BITS       4
 
 /* Defined for compatibility only, do not add new users. */
index 4ae31b7af6c31176c072cebb0c5b5269311bc3b1..f365b033a64958f8dab67b2f9b4aabe158f9c7cd 100644 (file)
@@ -271,12 +271,21 @@ static void flush_tagged_addr_state(void)
                clear_thread_flag(TIF_TAGGED_ADDR);
 }
 
+static void flush_poe(void)
+{
+       if (!system_supports_poe())
+               return;
+
+       write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0);
+}
+
 void flush_thread(void)
 {
        fpsimd_flush_thread();
        tls_thread_flush();
        flush_ptrace_hw_breakpoint(current);
        flush_tagged_addr_state();
+       flush_poe();
 }
 
 void arch_release_task_struct(struct task_struct *tsk)
@@ -371,6 +380,9 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
                if (system_supports_tpidr2())
                        p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
 
+               if (system_supports_poe())
+                       p->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
+
                if (stack_start) {
                        if (is_compat_thread(task_thread_info(p)))
                                childregs->compat_sp = stack_start;
@@ -495,6 +507,17 @@ static void erratum_1418040_new_exec(void)
        preempt_enable();
 }
 
+static void permission_overlay_switch(struct task_struct *next)
+{
+       if (!system_supports_poe())
+               return;
+
+       current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
+       if (current->thread.por_el0 != next->thread.por_el0) {
+               write_sysreg_s(next->thread.por_el0, SYS_POR_EL0);
+       }
+}
+
 /*
  * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore
  * this function must be called with preemption disabled and the update to
@@ -530,6 +553,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
        ssbs_thread_switch(next);
        erratum_1418040_thread_switch(next);
        ptrauth_thread_switch_user(next);
+       permission_overlay_switch(next);
 
        /*
         * Complete any pending TLB or cache maintenance on this CPU in case