]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: x86/mmu: remove unnecessary "bool shared" argument from functions
authorPaolo Bonzini <pbonzini@redhat.com>
Sat, 25 Nov 2023 08:33:57 +0000 (03:33 -0500)
committerSean Christopherson <seanjc@google.com>
Fri, 1 Dec 2023 15:52:07 +0000 (07:52 -0800)
Neither tdp_mmu_next_root nor kvm_tdp_mmu_put_root need to know
if the lock is taken for read or write.  Either way, protection
is achieved via RCU and tdp_mmu_pages_lock.  Remove the argument
and just assert that the lock is taken.

Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Link: https://lore.kernel.org/r/20231125083400.1399197-2-pbonzini@redhat.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h

index 29ac130fcb843d09caf7d752447ec458d07292dd..ace9f7c13132281e92796a197af31738a74fed29 100644 (file)
@@ -3580,7 +3580,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
                return;
 
        if (is_tdp_mmu_page(sp))
-               kvm_tdp_mmu_put_root(kvm, sp, false);
+               kvm_tdp_mmu_put_root(kvm, sp);
        else if (!--sp->root_count && sp->role.invalid)
                kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
 
index 038983b1357408bbed32e93b3e715d6b670d97c3..8cd805fa151646813f8d93901043761e333f4671 100644 (file)
@@ -73,10 +73,13 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
        tdp_mmu_free_sp(sp);
 }
 
-void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
-                         bool shared)
+void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
 {
-       kvm_lockdep_assert_mmu_lock_held(kvm, shared);
+       /*
+        * Either read or write is okay, but mmu_lock must be held because
+        * writers are not required to take tdp_mmu_pages_lock.
+        */
+       lockdep_assert_held(&kvm->mmu_lock);
 
        if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
                return;
@@ -106,10 +109,16 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
  */
 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
                                              struct kvm_mmu_page *prev_root,
-                                             bool shared, bool only_valid)
+                                             bool only_valid)
 {
        struct kvm_mmu_page *next_root;
 
+       /*
+        * While the roots themselves are RCU-protected, fields such as
+        * role.invalid are protected by mmu_lock.
+        */
+       lockdep_assert_held(&kvm->mmu_lock);
+
        rcu_read_lock();
 
        if (prev_root)
@@ -132,7 +141,7 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
        rcu_read_unlock();
 
        if (prev_root)
-               kvm_tdp_mmu_put_root(kvm, prev_root, shared);
+               kvm_tdp_mmu_put_root(kvm, prev_root);
 
        return next_root;
 }
@@ -144,13 +153,12 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
  * recent root. (Unless keeping a live reference is desirable.)
  *
  * If shared is set, this function is operating under the MMU lock in read
- * mode. In the unlikely event that this thread must free a root, the lock
- * will be temporarily dropped and reacquired in write mode.
+ * mode.
  */
 #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
-       for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid);       \
+       for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid);                \
             _root;                                                             \
-            _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid))      \
+            _root = tdp_mmu_next_root(_kvm, _root, _only_valid))               \
                if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) &&          \
                    kvm_mmu_page_as_id(_root) != _as_id) {                      \
                } else
@@ -159,9 +167,9 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
        __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
 
 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared)                 \
-       for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false);             \
+       for (_root = tdp_mmu_next_root(_kvm, NULL, false);                      \
             _root;                                                             \
-            _root = tdp_mmu_next_root(_kvm, _root, _shared, false))            \
+            _root = tdp_mmu_next_root(_kvm, _root, false))                     \
                if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) {         \
                } else
 
@@ -891,7 +899,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
                 * the root must be reachable by mmu_notifiers while it's being
                 * zapped
                 */
-               kvm_tdp_mmu_put_root(kvm, root, true);
+               kvm_tdp_mmu_put_root(kvm, root);
        }
 
        read_unlock(&kvm->mmu_lock);
@@ -1500,7 +1508,7 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
        for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) {
                r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
                if (r) {
-                       kvm_tdp_mmu_put_root(kvm, root, shared);
+                       kvm_tdp_mmu_put_root(kvm, root);
                        break;
                }
        }
index 733a3aef3a96eaa32964e8fc042c26fae0a0ee9c..20d97aa46c490fff98f9d3a6cbc116935d71a726 100644 (file)
@@ -17,8 +17,7 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
        return refcount_inc_not_zero(&root->tdp_mmu_root_count);
 }
 
-void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
-                         bool shared);
+void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
 
 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);