]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
c283c04068fe29491e2c35e94614816c12d4e4ea
[thirdparty/kernel/stable-queue.git] /
1 From 8d5678a76689acbf91245a3791fe853ab773090f Mon Sep 17 00:00:00 2001
2 From: Hou Wenlong <houwenlong.hwl@antgroup.com>
3 Date: Tue, 15 Mar 2022 17:35:13 +0800
4 Subject: KVM: x86/mmu: Don't rebuild page when the page is synced and no tlb flushing is required
5
6 From: Hou Wenlong <houwenlong.hwl@antgroup.com>
7
8 commit 8d5678a76689acbf91245a3791fe853ab773090f upstream.
9
10 Before Commit c3e5e415bc1e6 ("KVM: X86: Change kvm_sync_page()
11 to return true when remote flush is needed"), the return value
12 of kvm_sync_page() indicates whether the page is synced, and
13 kvm_mmu_get_page() would rebuild page when the sync fails.
14 But now, kvm_sync_page() returns false when the page is
15 synced and no tlb flushing is required, which leads to
16 rebuild page in kvm_mmu_get_page(). So return the return
17 value of mmu->sync_page() directly and check it in
18 kvm_mmu_get_page(). If the sync fails, the page will be
19 zapped and the invalid_list is not empty, so set flush as
20 true is accepted in mmu_sync_children().
21
22 Cc: stable@vger.kernel.org
23 Fixes: c3e5e415bc1e6 ("KVM: X86: Change kvm_sync_page() to return true when remote flush is needed")
24 Signed-off-by: Hou Wenlong <houwenlong.hwl@antgroup.com>
25 Acked-by: Lai Jiangshan <jiangshanlai@gmail.com>
26 Message-Id: <0dabeeb789f57b0d793f85d073893063e692032d.1647336064.git.houwenlong.hwl@antgroup.com>
27 [mmu_sync_children should not flush if the page is zapped. - Paolo]
28 Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
29 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
30 ---
31 arch/x86/kvm/mmu/mmu.c | 18 +++++++++---------
32 1 file changed, 9 insertions(+), 9 deletions(-)
33
34 --- a/arch/x86/kvm/mmu/mmu.c
35 +++ b/arch/x86/kvm/mmu/mmu.c
36 @@ -1843,17 +1843,14 @@ static void kvm_mmu_commit_zap_page(stru
37 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
38 if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
39
40 -static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
41 +static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
42 struct list_head *invalid_list)
43 {
44 int ret = vcpu->arch.mmu->sync_page(vcpu, sp);
45
46 - if (ret < 0) {
47 + if (ret < 0)
48 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
49 - return false;
50 - }
51 -
52 - return !!ret;
53 + return ret;
54 }
55
56 static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
57 @@ -1975,7 +1972,7 @@ static int mmu_sync_children(struct kvm_
58
59 for_each_sp(pages, sp, parents, i) {
60 kvm_unlink_unsync_page(vcpu->kvm, sp);
61 - flush |= kvm_sync_page(vcpu, sp, &invalid_list);
62 + flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0;
63 mmu_pages_clear_parents(&parents);
64 }
65 if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
66 @@ -2016,6 +2013,7 @@ static struct kvm_mmu_page *kvm_mmu_get_
67 struct hlist_head *sp_list;
68 unsigned quadrant;
69 struct kvm_mmu_page *sp;
70 + int ret;
71 int collisions = 0;
72 LIST_HEAD(invalid_list);
73
74 @@ -2068,11 +2066,13 @@ static struct kvm_mmu_page *kvm_mmu_get_
75 * If the sync fails, the page is zapped. If so, break
76 * in order to rebuild it.
77 */
78 - if (!kvm_sync_page(vcpu, sp, &invalid_list))
79 + ret = kvm_sync_page(vcpu, sp, &invalid_list);
80 + if (ret < 0)
81 break;
82
83 WARN_ON(!list_empty(&invalid_list));
84 - kvm_flush_remote_tlbs(vcpu->kvm);
85 + if (ret > 0)
86 + kvm_flush_remote_tlbs(vcpu->kvm);
87 }
88
89 __clear_sp_write_flooding_count(sp);