]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
riscv: misaligned: request misaligned exception from SBI
authorClément Léger <cleger@rivosinc.com>
Fri, 23 May 2025 10:19:23 +0000 (12:19 +0200)
committerPalmer Dabbelt <palmer@dabbelt.com>
Wed, 4 Jun 2025 22:11:03 +0000 (15:11 -0700)
Now that the kernel can handle misaligned accesses in S-mode, request
misaligned access exception delegation from SBI. This uses the FWFT SBI
extension defined in SBI version 3.0.

Signed-off-by: Clément Léger <cleger@rivosinc.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Link: https://lore.kernel.org/r/20250523101932.1594077-7-cleger@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
arch/riscv/include/asm/cpufeature.h
arch/riscv/kernel/traps_misaligned.c
arch/riscv/kernel/unaligned_access_speed.c

index f56b409361fbe07e2a95094bfb3f1a4450b1e4d2..dbe5970d4fe6aa93b8ca9565b812caeb2ca1c708 100644 (file)
@@ -67,8 +67,9 @@ void __init riscv_user_isa_enable(void);
        _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
 
 bool __init check_unaligned_access_emulated_all_cpus(void);
+void unaligned_access_init(void);
+int cpu_online_unaligned_access_init(unsigned int cpu);
 #if defined(CONFIG_RISCV_SCALAR_MISALIGNED)
-void check_unaligned_access_emulated(struct work_struct *work __always_unused);
 void unaligned_emulation_finish(void);
 bool unaligned_ctl_available(void);
 DECLARE_PER_CPU(long, misaligned_access_speed);
index 77c788660223b3db8eb651f6511c813a7c879233..592b1a28e897c1bfdb00dc01952c149a1c49fd8e 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/entry-common.h>
 #include <asm/hwprobe.h>
 #include <asm/cpufeature.h>
+#include <asm/sbi.h>
 #include <asm/vector.h>
 
 #define INSN_MATCH_LB                  0x3
@@ -646,7 +647,7 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
 
 static bool unaligned_ctl __read_mostly;
 
-void check_unaligned_access_emulated(struct work_struct *work __always_unused)
+static void check_unaligned_access_emulated(struct work_struct *work __always_unused)
 {
        int cpu = smp_processor_id();
        long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
@@ -657,6 +658,13 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
        __asm__ __volatile__ (
                "       "REG_L" %[tmp], 1(%[ptr])\n"
                : [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
+}
+
+static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
+{
+       long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
+
+       check_unaligned_access_emulated(NULL);
 
        /*
         * If unaligned_ctl is already set, this means that we detected that all
@@ -665,9 +673,10 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
         */
        if (unlikely(unaligned_ctl && (*mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED))) {
                pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
-               while (true)
-                       cpu_relax();
+               return -EINVAL;
        }
+
+       return 0;
 }
 
 bool __init check_unaligned_access_emulated_all_cpus(void)
@@ -699,4 +708,60 @@ bool __init check_unaligned_access_emulated_all_cpus(void)
 {
        return false;
 }
+static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
+{
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_RISCV_SBI
+
+static bool misaligned_traps_delegated;
+
+static int cpu_online_sbi_unaligned_setup(unsigned int cpu)
+{
+       if (sbi_fwft_set(SBI_FWFT_MISALIGNED_EXC_DELEG, 1, 0) &&
+           misaligned_traps_delegated) {
+               pr_crit("Misaligned trap delegation non homogeneous (expected delegated)");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+void __init unaligned_access_init(void)
+{
+       int ret;
+
+       ret = sbi_fwft_set_online_cpus(SBI_FWFT_MISALIGNED_EXC_DELEG, 1, 0);
+       if (ret)
+               return;
+
+       misaligned_traps_delegated = true;
+       pr_info("SBI misaligned access exception delegation ok\n");
+       /*
+        * Note that we don't have to take any specific action here, if
+        * the delegation is successful, then
+        * check_unaligned_access_emulated() will verify that indeed the
+        * platform traps on misaligned accesses.
+        */
+}
+#else
+void __init unaligned_access_init(void) {}
+
+static int cpu_online_sbi_unaligned_setup(unsigned int cpu __always_unused)
+{
+       return 0;
+}
 #endif
+
+int cpu_online_unaligned_access_init(unsigned int cpu)
+{
+       int ret;
+
+       ret = cpu_online_sbi_unaligned_setup(cpu);
+       if (ret)
+               return ret;
+
+       return cpu_online_check_unaligned_access_emulated(cpu);
+}
index b8ba13819d05e526ee993e289a90a53efe21b5a1..ae2068425fbcd207fb1c172dc701127857864155 100644 (file)
@@ -236,6 +236,11 @@ arch_initcall_sync(lock_and_set_unaligned_access_static_branch);
 
 static int riscv_online_cpu(unsigned int cpu)
 {
+       int ret = cpu_online_unaligned_access_init(cpu);
+
+       if (ret)
+               return ret;
+
        /* We are already set since the last check */
        if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) {
                goto exit;
@@ -248,7 +253,6 @@ static int riscv_online_cpu(unsigned int cpu)
        {
                static struct page *buf;
 
-               check_unaligned_access_emulated(NULL);
                buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
                if (!buf) {
                        pr_warn("Allocation failure, not measuring misaligned performance\n");
@@ -439,6 +443,8 @@ static int __init check_unaligned_access_all_cpus(void)
 {
        int cpu;
 
+       unaligned_access_init();
+
        if (unaligned_scalar_speed_param != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) {
                pr_info("scalar unaligned access speed set to '%s' (%lu) by command line\n",
                        speed_str[unaligned_scalar_speed_param], unaligned_scalar_speed_param);