]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: Clean up coalesced MMIO ring full check
authorSean Christopherson <seanjc@google.com>
Wed, 28 Aug 2024 18:14:46 +0000 (11:14 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 30 Aug 2024 02:38:33 +0000 (19:38 -0700)
Fold coalesced_mmio_has_room() into its sole caller, coalesced_mmio_write(),
as it's really just a single line of code, has a goofy return value, and
is unnecessarily brittle.

E.g. if coalesced_mmio_has_room() were to check ring->last directly, or
the caller failed to use READ_ONCE(), KVM would be susceptible to TOCTOU
attacks from userspace.

Opportunistically add a comment explaining why on earth KVM leaves one
entry free, which may not be obvious to readers that aren't familiar with
ring buffers.

No functional change intended.

Reviewed-by: Ilias Stamatis <ilstam@amazon.com>
Cc: Paul Durrant <paul@xen.org>
Link: https://lore.kernel.org/r/20240828181446.652474-3-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
virt/kvm/coalesced_mmio.c

index 184c5c40c9c191cae5e396972060bced43b22b84..375d6285475e919b7cd21caac5337557d4baced5 100644 (file)
@@ -40,25 +40,6 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
        return 1;
 }
 
-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
-{
-       struct kvm_coalesced_mmio_ring *ring;
-
-       /* Are we able to batch it ? */
-
-       /* last is the first free entry
-        * check if we don't meet the first used entry
-        * there is always one unused entry in the buffer
-        */
-       ring = dev->kvm->coalesced_mmio_ring;
-       if ((last + 1) % KVM_COALESCED_MMIO_MAX == READ_ONCE(ring->first)) {
-               /* full */
-               return 0;
-       }
-
-       return 1;
-}
-
 static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
                                struct kvm_io_device *this, gpa_t addr,
                                int len, const void *val)
@@ -72,9 +53,15 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
 
        spin_lock(&dev->kvm->ring_lock);
 
+       /*
+        * last is the index of the entry to fill.  Verify userspace hasn't
+        * set last to be out of range, and that there is room in the ring.
+        * Leave one entry free in the ring so that userspace can differentiate
+        * between an empty ring and a full ring.
+        */
        insert = READ_ONCE(ring->last);
-       if (!coalesced_mmio_has_room(dev, insert) ||
-           insert >= KVM_COALESCED_MMIO_MAX) {
+       if (insert >= KVM_COALESCED_MMIO_MAX ||
+           (insert + 1) % KVM_COALESCED_MMIO_MAX == READ_ONCE(ring->first)) {
                spin_unlock(&dev->kvm->ring_lock);
                return -EOPNOTSUPP;
        }