]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: Use mask of harvested dirty ring entries to coalesce dirty ring resets
authorSean Christopherson <seanjc@google.com>
Fri, 16 May 2025 21:35:39 +0000 (14:35 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 20 Jun 2025 20:41:03 +0000 (13:41 -0700)
Use "mask" instead of a dedicated boolean to track whether or not there
is at least one to-be-reset entry for the current slot+offset.  In the
body of the loop, mask is zero only on the first iteration, i.e. !mask is
equivalent to first_round.

Opportunistically combine the adjacent "if (mask)" statements into a single
if-statement.

No functional change intended.

Cc: Peter Xu <peterx@redhat.com>
Cc: Yan Zhao <yan.y.zhao@intel.com>
Cc: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Pankaj Gupta <pankaj.gupta@amd.com>
Reviewed-by: James Houghton <jthoughton@google.com>
Reviewed-by: Binbin Wu <binbin.wu@linux.intel.com>
Reviewed-by: Yan Zhao <yan.y.zhao@intel.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Link: https://lore.kernel.org/r/20250516213540.2546077-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
virt/kvm/dirty_ring.c

index 939198ac66954288283d57717cca97eade595d92..4caa63e610d261d8bdd5d76a415e5eb894716925 100644 (file)
@@ -121,7 +121,6 @@ int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring,
        u64 cur_offset, next_offset;
        unsigned long mask = 0;
        struct kvm_dirty_gfn *entry;
-       bool first_round = true;
 
        while (likely((*nr_entries_reset) < INT_MAX)) {
                if (signal_pending(current))
@@ -141,42 +140,42 @@ int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring,
                ring->reset_index++;
                (*nr_entries_reset)++;
 
-               /*
-                * While the size of each ring is fixed, it's possible for the
-                * ring to be constantly re-dirtied/harvested while the reset
-                * is in-progress (the hard limit exists only to guard against
-                * wrapping the count into negative space).
-                */
-               if (!first_round)
+               if (mask) {
+                       /*
+                        * While the size of each ring is fixed, it's possible
+                        * for the ring to be constantly re-dirtied/harvested
+                        * while the reset is in-progress (the hard limit exists
+                        * only to guard against the count becoming negative).
+                        */
                        cond_resched();
 
-               /*
-                * Try to coalesce the reset operations when the guest is
-                * scanning pages in the same slot.
-                */
-               if (!first_round && next_slot == cur_slot) {
-                       s64 delta = next_offset - cur_offset;
-
-                       if (delta >= 0 && delta < BITS_PER_LONG) {
-                               mask |= 1ull << delta;
-                               continue;
+                       /*
+                        * Try to coalesce the reset operations when the guest
+                        * is scanning pages in the same slot.
+                        */
+                       if (next_slot == cur_slot) {
+                               s64 delta = next_offset - cur_offset;
+
+                               if (delta >= 0 && delta < BITS_PER_LONG) {
+                                       mask |= 1ull << delta;
+                                       continue;
+                               }
+
+                               /* Backwards visit, careful about overflows! */
+                               if (delta > -BITS_PER_LONG && delta < 0 &&
+                               (mask << -delta >> -delta) == mask) {
+                                       cur_offset = next_offset;
+                                       mask = (mask << -delta) | 1;
+                                       continue;
+                               }
                        }
 
-                       /* Backwards visit, careful about overflows!  */
-                       if (delta > -BITS_PER_LONG && delta < 0 &&
-                           (mask << -delta >> -delta) == mask) {
-                               cur_offset = next_offset;
-                               mask = (mask << -delta) | 1;
-                               continue;
-                       }
-               }
-
-               /*
-                * Reset the slot for all the harvested entries that have been
-                * gathered, but not yet fully processed.
-                */
-               if (mask)
+                       /*
+                        * Reset the slot for all the harvested entries that
+                        * have been gathered, but not yet fully processed.
+                        */
                        kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
+               }
 
                /*
                 * The current slot was reset or this is the first harvested
@@ -185,7 +184,6 @@ int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring,
                cur_slot = next_slot;
                cur_offset = next_offset;
                mask = 1;
-               first_round = false;
        }
 
        /*