]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
smccc/kvm_guest: Enable errata based on implementation CPUs
authorShameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Fri, 21 Feb 2025 14:02:28 +0000 (14:02 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Wed, 26 Feb 2025 21:30:37 +0000 (13:30 -0800)
Retrieve any migration target implementation CPUs using the hypercall
and enable associated errata.

Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Reviewed-by: Sebastian Ott <sebott@redhat.com>
Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20250221140229.12588-6-shameerali.kolothum.thodi@huawei.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/hypervisor.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
drivers/firmware/smccc/kvm_guest.c

index ccb4a155d1180df75a13ec91686c445f1d105d6e..cc2d58141be1adcb5f1364e222fbd756ee6e8032 100644 (file)
@@ -276,6 +276,13 @@ static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
        return _model == model && rv >= rv_min && rv <= rv_max;
 }
 
+struct target_impl_cpu {
+       u64 midr;
+       u64 revidr;
+       u64 aidr;
+};
+
+bool cpu_errata_set_target_impl(u64 num, void *impl_cpus);
 bool is_midr_in_range_list(struct midr_range const *ranges);
 
 static inline u64 __attribute_const__ read_cpuid_mpidr(void)
index 409e239834d1947596fe0404ca04781b04c01142..a12fd897c87737ac766579663292db6fce67cb62 100644 (file)
@@ -6,6 +6,7 @@
 
 void kvm_init_hyp_services(void);
 bool kvm_arm_hyp_service_available(u32 func_id);
+void kvm_arm_target_impl_cpu_init(void);
 
 #ifdef CONFIG_ARM_PKVM_GUEST
 void pkvm_init_hyp_services(void);
index 1f51cf6378c5089d4ba9ca1f378f17c018978ff3..66869d81c3d548467b2b5cba42b7f51e01b4efac 100644 (file)
 #include <asm/kvm_asm.h>
 #include <asm/smp_plat.h>
 
+static u64 target_impl_cpu_num;
+static struct target_impl_cpu *target_impl_cpus;
+
+bool cpu_errata_set_target_impl(u64 num, void *impl_cpus)
+{
+       if (target_impl_cpu_num || !num || !impl_cpus)
+               return false;
+
+       target_impl_cpu_num = num;
+       target_impl_cpus = impl_cpus;
+       return true;
+}
+
 static inline bool is_midr_in_range(struct midr_range const *range)
 {
-       return midr_is_cpu_model_range(read_cpuid_id(), range->model,
-                                      range->rv_min, range->rv_max);
+       int i;
+
+       if (!target_impl_cpu_num)
+               return midr_is_cpu_model_range(read_cpuid_id(), range->model,
+                                              range->rv_min, range->rv_max);
+
+       for (i = 0; i < target_impl_cpu_num; i++) {
+               if (midr_is_cpu_model_range(target_impl_cpus[i].midr,
+                                           range->model,
+                                           range->rv_min, range->rv_max))
+                       return true;
+       }
+       return false;
 }
 
 bool is_midr_in_range_list(struct midr_range const *ranges)
@@ -47,9 +71,20 @@ __is_affected_midr_range(const struct arm64_cpu_capabilities *entry,
 static bool __maybe_unused
 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
 {
-       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
-       return __is_affected_midr_range(entry, read_cpuid_id(),
-                                       read_cpuid(REVIDR_EL1));
+       int i;
+
+       if (!target_impl_cpu_num) {
+               WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+               return __is_affected_midr_range(entry, read_cpuid_id(),
+                                               read_cpuid(REVIDR_EL1));
+       }
+
+       for (i = 0; i < target_impl_cpu_num; i++) {
+               if (__is_affected_midr_range(entry, target_impl_cpus[i].midr,
+                                            target_impl_cpus[i].midr))
+                       return true;
+       }
+       return false;
 }
 
 static bool __maybe_unused
index aafa3fbdd5ffa6054b5145a8a1d5daace3d2932f..c615eb6ac7e93e5d0e53bca9365a1431e53d740b 100644 (file)
@@ -86,6 +86,7 @@
 #include <asm/kvm_host.h>
 #include <asm/mmu_context.h>
 #include <asm/mte.h>
+#include <asm/hypervisor.h>
 #include <asm/processor.h>
 #include <asm/smp.h>
 #include <asm/sysreg.h>
@@ -3680,6 +3681,7 @@ unsigned long cpu_get_elf_hwcap3(void)
 
 static void __init setup_boot_cpu_capabilities(void)
 {
+       kvm_arm_target_impl_cpu_init();
        /*
         * The boot CPU's feature register values have been recorded. Detect
         * boot cpucaps and local cpucaps for the boot CPU, then enable and
index f3319be20b36592513b08544b2b1de35a49e79f8..2f03b582c298a4f2d0876e34a0c9a8bfdabeab82 100644 (file)
@@ -6,8 +6,11 @@
 #include <linux/bitmap.h>
 #include <linux/cache.h>
 #include <linux/kernel.h>
+#include <linux/memblock.h>
 #include <linux/string.h>
 
+#include <uapi/linux/psci.h>
+
 #include <asm/hypervisor.h>
 
 static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { };
@@ -51,3 +54,64 @@ bool kvm_arm_hyp_service_available(u32 func_id)
        return test_bit(func_id, __kvm_arm_hyp_services);
 }
 EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available);
+
+void  __init kvm_arm_target_impl_cpu_init(void)
+{
+       int i;
+       u32 ver;
+       u64 max_cpus;
+       struct arm_smccc_res res;
+       struct target_impl_cpu *target;
+
+       if (!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER) ||
+           !kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS))
+               return;
+
+       arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID,
+                            0, &res);
+       if (res.a0 != SMCCC_RET_SUCCESS)
+               return;
+
+       /* Version info is in lower 32 bits and is in SMMCCC_VERSION format */
+       ver = lower_32_bits(res.a1);
+       if (PSCI_VERSION_MAJOR(ver) != 1) {
+               pr_warn("Unsupported target CPU implementation version v%d.%d\n",
+                       PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
+               return;
+       }
+
+       if (!res.a2) {
+               pr_warn("No target implementation CPUs specified\n");
+               return;
+       }
+
+       max_cpus = res.a2;
+       target = memblock_alloc(sizeof(*target) * max_cpus,  __alignof__(*target));
+       if (!target) {
+               pr_warn("Not enough memory for struct target_impl_cpu\n");
+               return;
+       }
+
+       for (i = 0; i < max_cpus; i++) {
+               arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID,
+                                    i, &res);
+               if (res.a0 != SMCCC_RET_SUCCESS) {
+                       pr_warn("Discovering target implementation CPUs failed\n");
+                       goto mem_free;
+               }
+               target[i].midr = res.a1;
+               target[i].revidr = res.a2;
+               target[i].aidr = res.a3;
+       };
+
+       if (!cpu_errata_set_target_impl(max_cpus, target)) {
+               pr_warn("Failed to set target implementation CPUs\n");
+               goto mem_free;
+       }
+
+       pr_info("Number of target implementation CPUs is %lld\n", max_cpus);
+       return;
+
+mem_free:
+       memblock_free(target, sizeof(*target) * max_cpus);
+}