]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
LoongArch: KVM: Mark page accessed and dirty with page ref added
authorBibo Mao <maobibo@loongson.cn>
Tue, 9 Jul 2024 08:25:51 +0000 (16:25 +0800)
committerHuacai Chen <chenhuacai@loongson.cn>
Tue, 9 Jul 2024 08:25:51 +0000 (16:25 +0800)
Function kvm_map_page_fast() is fast path of secondary mmu page fault
flow, pfn is parsed from secondary mmu page table walker. However the
corresponding page reference is not added, it is dangerious to access
page out of mmu_lock.

Here page ref is added inside mmu_lock, function kvm_set_pfn_accessed()
and kvm_set_pfn_dirty() is called with page ref added, so that the page
will not be freed by others.

Also kvm_set_pfn_accessed() is removed here since it is called in the
following function kvm_release_pfn_clean().

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
arch/loongarch/kvm/mmu.c

index 1057bb984ba090ece20514ffce8a9e928a4526c0..ad80a837e068ed776bf369a2fd96be8bbce0ed7e 100644 (file)
@@ -557,6 +557,7 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
        gfn_t gfn = gpa >> PAGE_SHIFT;
        struct kvm *kvm = vcpu->kvm;
        struct kvm_memory_slot *slot;
+       struct page *page;
 
        spin_lock(&kvm->mmu_lock);
 
@@ -599,19 +600,22 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
        if (changed) {
                kvm_set_pte(ptep, new);
                pfn = kvm_pte_pfn(new);
+               page = kvm_pfn_to_refcounted_page(pfn);
+               if (page)
+                       get_page(page);
        }
        spin_unlock(&kvm->mmu_lock);
 
-       /*
-        * Fixme: pfn may be freed after mmu_lock
-        * kvm_try_get_pfn(pfn)/kvm_release_pfn pair to prevent this?
-        */
-       if (kvm_pte_young(changed))
-               kvm_set_pfn_accessed(pfn);
+       if (changed) {
+               if (kvm_pte_young(changed))
+                       kvm_set_pfn_accessed(pfn);
 
-       if (kvm_pte_dirty(changed)) {
-               mark_page_dirty(kvm, gfn);
-               kvm_set_pfn_dirty(pfn);
+               if (kvm_pte_dirty(changed)) {
+                       mark_page_dirty(kvm, gfn);
+                       kvm_set_pfn_dirty(pfn);
+               }
+               if (page)
+                       put_page(page);
        }
        return ret;
 out:
@@ -920,7 +924,6 @@ retry:
                kvm_set_pfn_dirty(pfn);
        }
 
-       kvm_set_pfn_accessed(pfn);
        kvm_release_pfn_clean(pfn);
 out:
        srcu_read_unlock(&kvm->srcu, srcu_idx);