]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: Stop looking for coalesced MMIO zones if the bus is destroyed
authorSean Christopherson <seanjc@google.com>
Mon, 12 Apr 2021 22:20:49 +0000 (15:20 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 14 May 2021 07:44:15 +0000 (09:44 +0200)
commit 5d3c4c79384af06e3c8e25b7770b6247496b4417 upstream.

Abort the walk of coalesced MMIO zones if kvm_io_bus_unregister_dev()
fails to allocate memory for the new instance of the bus.  If it can't
instantiate a new bus, unregister_dev() destroys all devices _except_ the
target device.   But, it doesn't tell the caller that it obliterated the
bus and invoked the destructor for all devices that were on the bus.  In
the coalesced MMIO case, this can result in a deleted list entry
dereference due to attempting to continue iterating on coalesced_zones
after future entries (in the walk) have been deleted.

Opportunistically add curly braces to the for-loop, which encompasses
many lines but sneaks by without braces due to the guts being a single
if statement.

Fixes: f65886606c2d ("KVM: fix memory leak in kvm_io_bus_unregister_dev()")
Cc: stable@vger.kernel.org
Reported-by: Hao Sun <sunhao.th@gmail.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210412222050.876100-3-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/kvm_host.h
virt/kvm/coalesced_mmio.c
virt/kvm/kvm_main.c

index 21aa6d736e99999fbb01d78140bf6bd8ff9b70f2..0ec4ec428b9ba049afa0aac3dfd6581cf1498e62 100644 (file)
@@ -192,8 +192,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
                    int len, void *val);
 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
                            int len, struct kvm_io_device *dev);
-void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
-                              struct kvm_io_device *dev);
+int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+                             struct kvm_io_device *dev);
 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
                                         gpa_t addr);
 
index 8ffd07e2a1602f2ceee0c2c08cec1a666f32b7ca..6b870e4b9b97210566a4b3fb0f449dbeb664637d 100644 (file)
@@ -178,21 +178,36 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
                                           struct kvm_coalesced_mmio_zone *zone)
 {
        struct kvm_coalesced_mmio_dev *dev, *tmp;
+       int r;
 
        if (zone->pio != 1 && zone->pio != 0)
                return -EINVAL;
 
        mutex_lock(&kvm->slots_lock);
 
-       list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
+       list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
                if (zone->pio == dev->zone.pio &&
                    coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
-                       kvm_io_bus_unregister_dev(kvm,
+                       r = kvm_io_bus_unregister_dev(kvm,
                                zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
                        kvm_iodevice_destructor(&dev->dev);
+
+                       /*
+                        * On failure, unregister destroys all devices on the
+                        * bus _except_ the target device, i.e. coalesced_zones
+                        * has been modified.  No need to restart the walk as
+                        * there aren't any zones left.
+                        */
+                       if (r)
+                               break;
                }
+       }
 
        mutex_unlock(&kvm->slots_lock);
 
+       /*
+        * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
+        * perspective, the coalesced MMIO is most definitely unregistered.
+        */
        return 0;
 }
index 048b555c5acc94ee99e6b01aeb3c07094fda7006..f83fa0aeeb4518551adb076abbc0c0b300bc0b5f 100644 (file)
@@ -4017,15 +4017,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 }
 
 /* Caller must hold slots_lock. */
-void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
-                              struct kvm_io_device *dev)
+int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+                             struct kvm_io_device *dev)
 {
        int i, j;
        struct kvm_io_bus *new_bus, *bus;
 
        bus = kvm_get_bus(kvm, bus_idx);
        if (!bus)
-               return;
+               return 0;
 
        for (i = 0; i < bus->dev_count; i++)
                if (bus->range[i].dev == dev) {
@@ -4033,7 +4033,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
                }
 
        if (i == bus->dev_count)
-               return;
+               return 0;
 
        new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
                          GFP_KERNEL_ACCOUNT);
@@ -4054,7 +4054,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
        rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
        synchronize_srcu_expedited(&kvm->srcu);
        kfree(bus);
-       return;
+       return new_bus ? 0 : -ENOMEM;
 }
 
 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,