]> git.ipfire.org Git - people/ms/linux.git/commitdiff
kvm: x86: mmu: Drop the need_remote_flush() function
authorJunaid Shahid <junaids@google.com>
Sat, 23 Jul 2022 02:43:16 +0000 (19:43 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 19 Aug 2022 11:38:02 +0000 (07:38 -0400)
This is only used by kvm_mmu_pte_write(), which no longer actually
creates the new SPTE and instead just clears the old SPTE. So we
just need to check if the old SPTE was shadow-present instead of
calling need_remote_flush(). Hence we can drop this function. It was
incomplete anyway as it didn't take access-tracking into account.

This patch should not result in any functional change.

Signed-off-by: Junaid Shahid <junaids@google.com>
Reviewed-by: David Matlack <dmatlack@google.com>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220723024316.2725328-1-junaids@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index 126fa9aec64cda2b4f80b9558426ffafa34cfb48..fae2c0863e45b75f1c2f4615429e338ed946402c 100644 (file)
@@ -5361,19 +5361,6 @@ void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
        __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
 }
 
-static bool need_remote_flush(u64 old, u64 new)
-{
-       if (!is_shadow_present_pte(old))
-               return false;
-       if (!is_shadow_present_pte(new))
-               return true;
-       if ((old ^ new) & SPTE_BASE_ADDR_MASK)
-               return true;
-       old ^= shadow_nx_mask;
-       new ^= shadow_nx_mask;
-       return (old & ~new & SPTE_PERM_MASK) != 0;
-}
-
 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
                                    int *bytes)
 {
@@ -5519,7 +5506,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                        mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
                        if (gentry && sp->role.level != PG_LEVEL_4K)
                                ++vcpu->kvm->stat.mmu_pde_zapped;
-                       if (need_remote_flush(entry, *spte))
+                       if (is_shadow_present_pte(entry))
                                flush = true;
                        ++spte;
                }