]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: Implement barriers before accessing kvm->buses[] on SRCU read paths
authorKeir Fraser <keirf@google.com>
Tue, 9 Sep 2025 10:00:06 +0000 (10:00 +0000)
committerMarc Zyngier <maz@kernel.org>
Mon, 15 Sep 2025 09:55:23 +0000 (10:55 +0100)
This ensures that, if a VCPU has "observed" that an IO registration has
occurred, the instruction currently being trapped or emulated will also
observe the IO registration.

At the same time, enforce that kvm_get_bus() is used only on the
update side, ensuring that a long-term reference cannot be obtained by
an SRCU reader.

Signed-off-by: Keir Fraser <keirf@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/x86/kvm/vmx/vmx.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index aa157fe5b7b31802aa71351db92c8d1b68111432..0bdf9405969a394bcc18dfd9fd1a0c303858e828 100644 (file)
@@ -5785,6 +5785,13 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
                if (kvm_test_request(KVM_REQ_EVENT, vcpu))
                        return 1;
 
+               /*
+                * Ensure that any updates to kvm->buses[] observed by the
+                * previous instruction (emulated or otherwise) are also
+                * visible to the instruction KVM is about to emulate.
+                */
+               smp_rmb();
+
                if (!kvm_emulate_instruction(vcpu, 0))
                        return 0;
 
index 15656b7fba6c7f85f6723ef1f54b66a6fc0e90cb..e7d6111cf25473f1cc83a163370d2dfa4c6c5f8e 100644 (file)
@@ -966,11 +966,15 @@ static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
        return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
 }
 
+/*
+ * Get a bus reference under the update-side lock. No long-term SRCU reader
+ * references are permitted, to avoid stale reads vs concurrent IO
+ * registrations.
+ */
 static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
 {
-       return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
-                                     lockdep_is_held(&kvm->slots_lock) ||
-                                     !refcount_read(&kvm->users_count));
+       return rcu_dereference_protected(kvm->buses[idx],
+                                        lockdep_is_held(&kvm->slots_lock));
 }
 
 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
index 6c07dd423458c8ad93f2e75a8c5f650dda0175a0..870ad8ea93a78b524b99aa7177dd81eef27c12a0 100644 (file)
@@ -1103,6 +1103,14 @@ void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
 {
 }
 
+/* Called only on cleanup and destruction paths when there are no users. */
+static inline struct kvm_io_bus *kvm_get_bus_for_destruction(struct kvm *kvm,
+                                                            enum kvm_bus idx)
+{
+       return rcu_dereference_protected(kvm->buses[idx],
+                                        !refcount_read(&kvm->users_count));
+}
+
 static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
 {
        struct kvm *kvm = kvm_arch_alloc_vm();
@@ -1228,7 +1236,7 @@ out_err_no_disable:
 out_err_no_arch_destroy_vm:
        WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
        for (i = 0; i < KVM_NR_BUSES; i++)
-               kfree(kvm_get_bus(kvm, i));
+               kfree(kvm_get_bus_for_destruction(kvm, i));
        kvm_free_irq_routing(kvm);
 out_err_no_irq_routing:
        cleanup_srcu_struct(&kvm->irq_srcu);
@@ -1276,7 +1284,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
 
        kvm_free_irq_routing(kvm);
        for (i = 0; i < KVM_NR_BUSES; i++) {
-               struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
+               struct kvm_io_bus *bus = kvm_get_bus_for_destruction(kvm, i);
 
                if (bus)
                        kvm_io_bus_destroy(bus);
@@ -5843,6 +5851,18 @@ static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
        return -EOPNOTSUPP;
 }
 
+static struct kvm_io_bus *kvm_get_bus_srcu(struct kvm *kvm, enum kvm_bus idx)
+{
+       /*
+        * Ensure that any updates to kvm_buses[] observed by the previous vCPU
+        * machine instruction are also visible to the vCPU machine instruction
+        * that triggered this call.
+        */
+       smp_mb__after_srcu_read_lock();
+
+       return srcu_dereference(kvm->buses[idx], &kvm->srcu);
+}
+
 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
                     int len, const void *val)
 {
@@ -5855,7 +5875,7 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
                .len = len,
        };
 
-       bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+       bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
        if (!bus)
                return -ENOMEM;
        r = __kvm_io_bus_write(vcpu, bus, &range, val);
@@ -5874,7 +5894,7 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
                .len = len,
        };
 
-       bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+       bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
        if (!bus)
                return -ENOMEM;
 
@@ -5924,7 +5944,7 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
                .len = len,
        };
 
-       bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+       bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
        if (!bus)
                return -ENOMEM;
        r = __kvm_io_bus_read(vcpu, bus, &range, val);
@@ -6033,7 +6053,7 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 
        srcu_idx = srcu_read_lock(&kvm->srcu);
 
-       bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
+       bus = kvm_get_bus_srcu(kvm, bus_idx);
        if (!bus)
                goto out_unlock;