]> git.ipfire.org Git - thirdparty/kernel/stable.git/blobdiff - arch/x86/kvm/x86.c
Merge tag 'kvm-x86-mmu-6.9' of https://github.com/kvm-x86/linux into HEAD
[thirdparty/kernel/stable.git] / arch / x86 / kvm / x86.c
index 48ec889452e2b7aadb41def283fe87e9b552cbff..064862d87b9e17a09b1b8cfb7bde633f5e940437 100644 (file)
@@ -1399,22 +1399,19 @@ int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
 }
 EXPORT_SYMBOL_GPL(kvm_set_dr);
 
-void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
+unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr)
 {
        size_t size = ARRAY_SIZE(vcpu->arch.db);
 
        switch (dr) {
        case 0 ... 3:
-               *val = vcpu->arch.db[array_index_nospec(dr, size)];
-               break;
+               return vcpu->arch.db[array_index_nospec(dr, size)];
        case 4:
        case 6:
-               *val = vcpu->arch.dr6;
-               break;
+               return vcpu->arch.dr6;
        case 5:
        default: /* 7 */
-               *val = vcpu->arch.dr7;
-               break;
+               return vcpu->arch.dr7;
        }
 }
 EXPORT_SYMBOL_GPL(kvm_get_dr);
@@ -1704,22 +1701,17 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
        struct kvm_msr_entry msr;
        int r;
 
+       /* Unconditionally clear the output for simplicity */
+       msr.data = 0;
        msr.index = index;
        r = kvm_get_msr_feature(&msr);
 
-       if (r == KVM_MSR_RET_INVALID) {
-               /* Unconditionally clear the output for simplicity */
-               *data = 0;
-               if (kvm_msr_ignored_check(index, 0, false))
-                       r = 0;
-       }
-
-       if (r)
-               return r;
+       if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false))
+               r = 0;
 
        *data = msr.data;
 
-       return 0;
+       return r;
 }
 
 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
@@ -1782,6 +1774,10 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS)
                kvm_mmu_reset_context(vcpu);
 
+       if (!static_cpu_has(X86_FEATURE_XSAVES) &&
+           (efer & EFER_SVME))
+               kvm_hv_xsaves_xsavec_maybe_warn(vcpu);
+
        return 0;
 }
 
@@ -2507,7 +2503,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
 }
 
 #ifdef CONFIG_X86_64
-static inline int gtod_is_based_on_tsc(int mode)
+static inline bool gtod_is_based_on_tsc(int mode)
 {
        return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
 }
@@ -4581,7 +4577,7 @@ static bool kvm_is_vm_type_supported(unsigned long type)
 {
        return type == KVM_X86_DEFAULT_VM ||
               (type == KVM_X86_SW_PROTECTED_VM &&
-               IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) && tdp_enabled);
+               IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) && tdp_mmu_enabled);
 }
 
 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -5062,8 +5058,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        int idx;
 
        if (vcpu->preempted) {
-               if (!vcpu->arch.guest_state_protected)
-                       vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
+               vcpu->arch.preempted_in_kernel = kvm_arch_vcpu_in_kernel(vcpu);
 
                /*
                 * Take the srcu lock as memslots will be accessed to check the gfn
@@ -5454,7 +5449,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
        if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) {
                vcpu->arch.nmi_pending = 0;
                atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending);
-               kvm_make_request(KVM_REQ_NMI, vcpu);
+               if (events->nmi.pending)
+                       kvm_make_request(KVM_REQ_NMI, vcpu);
        }
        static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked);
 
@@ -5509,18 +5505,23 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
                                             struct kvm_debugregs *dbgregs)
 {
-       unsigned long val;
+       unsigned int i;
 
        memset(dbgregs, 0, sizeof(*dbgregs));
-       memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
-       kvm_get_dr(vcpu, 6, &val);
-       dbgregs->dr6 = val;
+
+       BUILD_BUG_ON(ARRAY_SIZE(vcpu->arch.db) != ARRAY_SIZE(dbgregs->db));
+       for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++)
+               dbgregs->db[i] = vcpu->arch.db[i];
+
+       dbgregs->dr6 = vcpu->arch.dr6;
        dbgregs->dr7 = vcpu->arch.dr7;
 }
 
 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
                                            struct kvm_debugregs *dbgregs)
 {
+       unsigned int i;
+
        if (dbgregs->flags)
                return -EINVAL;
 
@@ -5529,7 +5530,9 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
        if (!kvm_dr7_valid(dbgregs->dr7))
                return -EINVAL;
 
-       memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
+       for (i = 0; i < ARRAY_SIZE(vcpu->arch.db); i++)
+               vcpu->arch.db[i] = dbgregs->db[i];
+
        kvm_update_dr0123(vcpu);
        vcpu->arch.dr6 = dbgregs->dr6;
        vcpu->arch.dr7 = dbgregs->dr7;
@@ -7016,6 +7019,9 @@ set_identity_unlock:
                r = -EEXIST;
                if (kvm->arch.vpit)
                        goto create_pit_unlock;
+               r = -ENOENT;
+               if (!pic_in_kernel(kvm))
+                       goto create_pit_unlock;
                r = -ENOMEM;
                kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
                if (kvm->arch.vpit)
@@ -8164,10 +8170,9 @@ static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
        kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
 }
 
-static void emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
-                           unsigned long *dest)
+static unsigned long emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr)
 {
-       kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
+       return kvm_get_dr(emul_to_vcpu(ctxt), dr);
 }
 
 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
@@ -9625,11 +9630,13 @@ static void kvm_x86_check_cpu_compat(void *ret)
        *(int *)ret = kvm_x86_check_processor_compatibility();
 }
 
-static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
+int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
 {
        u64 host_pat;
        int r, cpu;
 
+       guard(mutex)(&vendor_module_lock);
+
        if (kvm_x86_ops.hardware_enable) {
                pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name);
                return -EEXIST;
@@ -9759,17 +9766,6 @@ out_free_x86_emulator_cache:
        kmem_cache_destroy(x86_emulator_cache);
        return r;
 }
-
-int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
-{
-       int r;
-
-       mutex_lock(&vendor_module_lock);
-       r = __kvm_x86_vendor_init(ops);
-       mutex_unlock(&vendor_module_lock);
-
-       return r;
-}
 EXPORT_SYMBOL_GPL(kvm_x86_vendor_init);
 
 void kvm_x86_vendor_exit(void)
@@ -10666,12 +10662,6 @@ static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
        static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu);
 }
 
-void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
-{
-       smp_send_reschedule(vcpu->cpu);
-}
-EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
-
 /*
  * Called within kvm->srcu read side.
  * Returns 1 to let vcpu_run() continue the guest execution loop without
@@ -10921,10 +10911,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                goto cancel_injection;
        }
 
-       if (req_immediate_exit) {
+       if (req_immediate_exit)
                kvm_make_request(KVM_REQ_EVENT, vcpu);
-               static_call(kvm_x86_request_immediate_exit)(vcpu);
-       }
 
        fpregs_assert_state_consistent();
        if (test_thread_flag(TIF_NEED_FPU_LOAD))
@@ -10955,7 +10943,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) &&
                             (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED));
 
-               exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu);
+               exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu, req_immediate_exit);
                if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
                        break;
 
@@ -12053,27 +12041,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        if (r < 0)
                return r;
 
-       if (irqchip_in_kernel(vcpu->kvm)) {
-               r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
-               if (r < 0)
-                       goto fail_mmu_destroy;
-
-               /*
-                * Defer evaluating inhibits until the vCPU is first run, as
-                * this vCPU will not get notified of any changes until this
-                * vCPU is visible to other vCPUs (marked online and added to
-                * the set of vCPUs).  Opportunistically mark APICv active as
-                * VMX in particularly is highly unlikely to have inhibits.
-                * Ignore the current per-VM APICv state so that vCPU creation
-                * is guaranteed to run with a deterministic value, the request
-                * will ensure the vCPU gets the correct state before VM-Entry.
-                */
-               if (enable_apicv) {
-                       vcpu->arch.apic->apicv_active = true;
-                       kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
-               }
-       } else
-               static_branch_inc(&kvm_has_noapic_vcpu);
+       r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
+       if (r < 0)
+               goto fail_mmu_destroy;
 
        r = -ENOMEM;
 
@@ -12194,8 +12164,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
        free_page((unsigned long)vcpu->arch.pio_data);
        kvfree(vcpu->arch.cpuid_entries);
-       if (!lapic_in_kernel(vcpu))
-               static_branch_dec(&kvm_has_noapic_vcpu);
 }
 
 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
@@ -12472,9 +12440,6 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
        return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
 }
 
-__read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
-EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu);
-
 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
 {
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -13077,11 +13042,13 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 
 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
 {
-       if (kvm_vcpu_apicv_active(vcpu) &&
-           static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu))
-               return true;
+       return kvm_vcpu_apicv_active(vcpu) &&
+              static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu);
+}
 
-       return false;
+bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.preempted_in_kernel;
 }
 
 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
@@ -13104,9 +13071,6 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
        if (vcpu->arch.guest_state_protected)
                return true;
 
-       if (vcpu != kvm_get_running_vcpu())
-               return vcpu->arch.preempted_in_kernel;
-
        return static_call(kvm_x86_get_cpl)(vcpu) == 0;
 }
 
@@ -13901,9 +13865,6 @@ module_init(kvm_x86_init);
 
 static void __exit kvm_x86_exit(void)
 {
-       /*
-        * If module_init() is implemented, module_exit() must also be
-        * implemented to allow module unload.
-        */
+       WARN_ON_ONCE(static_branch_unlikely(&kvm_has_noapic_vcpu));
 }
 module_exit(kvm_x86_exit);