]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: LoongArch: Mark "struct page" pfns accessed only in "slow" page fault path
authorSean Christopherson <seanjc@google.com>
Thu, 10 Oct 2024 18:24:08 +0000 (11:24 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 25 Oct 2024 17:00:49 +0000 (13:00 -0400)
Mark pages accessed only in the slow path, before dropping mmu_lock when
faulting in guest memory so that LoongArch can convert to
kvm_release_faultin_page() without tripping its lockdep assertion on
mmu_lock being held.

Reviewed-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-67-seanjc@google.com>

arch/loongarch/kvm/mmu.c

index cc2a5f289b1479686c575f4577de2203e5a99917..ed43504c5c7e7fb7b1f51e72af1eaf80a4758ee1 100644 (file)
@@ -552,12 +552,10 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
 {
        int ret = 0;
-       kvm_pfn_t pfn = 0;
        kvm_pte_t *ptep, changed, new;
        gfn_t gfn = gpa >> PAGE_SHIFT;
        struct kvm *kvm = vcpu->kvm;
        struct kvm_memory_slot *slot;
-       struct page *page;
 
        spin_lock(&kvm->mmu_lock);
 
@@ -570,8 +568,6 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
 
        /* Track access to pages marked old */
        new = kvm_pte_mkyoung(*ptep);
-       /* call kvm_set_pfn_accessed() after unlock */
-
        if (write && !kvm_pte_dirty(new)) {
                if (!kvm_pte_write(new)) {
                        ret = -EFAULT;
@@ -595,22 +591,10 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
        }
 
        changed = new ^ (*ptep);
-       if (changed) {
+       if (changed)
                kvm_set_pte(ptep, new);
-               pfn = kvm_pte_pfn(new);
-               page = kvm_pfn_to_refcounted_page(pfn);
-               if (page)
-                       get_page(page);
-       }
-       spin_unlock(&kvm->mmu_lock);
 
-       if (changed) {
-               if (kvm_pte_young(changed))
-                       kvm_set_pfn_accessed(pfn);
-
-               if (page)
-                       put_page(page);
-       }
+       spin_unlock(&kvm->mmu_lock);
 
        if (kvm_pte_dirty(changed))
                mark_page_dirty(kvm, gfn);