]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: x86/mmu: Rename make_huge_page_split_spte() to make_small_spte()
authorDavid Matlack <dmatlack@google.com>
Fri, 23 Aug 2024 23:56:47 +0000 (16:56 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 5 Nov 2024 02:37:23 +0000 (18:37 -0800)
Rename make_huge_page_split_spte() to make_small_spte(). This ensures
that the usage of "small_spte" and "huge_spte" are consistent between
make_huge_spte() and make_small_spte().

This should also reduce some confusion as make_huge_page_split_spte()
almost reads like it will create a huge SPTE, when in fact it is
creating a small SPTE to split the huge SPTE.

No functional change intended.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: David Matlack <dmatlack@google.com>
Link: https://lore.kernel.org/r/20240823235648.3236880-6-dmatlack@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/spte.c
arch/x86/kvm/mmu/spte.h
arch/x86/kvm/mmu/tdp_mmu.c

index 33542ee4bbb1c6ac590f2ea22d07b60e77f1bf65..b522b076c2cee47cfa3f0a17dfbc0c4cb9c8ec21 100644 (file)
@@ -6735,7 +6735,7 @@ static void shadow_mmu_split_huge_page(struct kvm *kvm,
                        continue;
                }
 
-               spte = make_huge_page_split_spte(kvm, huge_spte, sp->role, index);
+               spte = make_small_spte(kvm, huge_spte, sp->role, index);
                mmu_spte_set(sptep, spte);
                __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);
        }
index 4bc107dc6b4519e002a83a52f89b3739b47ff088..22551e2f1d009d7387f749be4ffd3c91238ea8be 100644 (file)
@@ -295,8 +295,8 @@ static u64 make_spte_nonexecutable(u64 spte)
  * This is used during huge page splitting to build the SPTEs that make up the
  * new page table.
  */
-u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte,
-                             union kvm_mmu_page_role role, int index)
+u64 make_small_spte(struct kvm *kvm, u64 huge_spte,
+                   union kvm_mmu_page_role role, int index)
 {
        u64 child_spte = huge_spte;
 
index 3a4147345e03bcae2c2a71c42796953959780089..f332b33bc8178bb6dbd4b5070bd1c9cadd1443b8 100644 (file)
@@ -504,8 +504,8 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
               unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
               u64 old_spte, bool prefetch, bool synchronizing,
               bool host_writable, u64 *new_spte);
-u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte,
-                             union kvm_mmu_page_role role, int index);
+u64 make_small_spte(struct kvm *kvm, u64 huge_spte,
+                   union kvm_mmu_page_role role, int index);
 u64 make_huge_spte(struct kvm *kvm, u64 small_spte, int level);
 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
index 2b5d30190557b6dc75d59ed7bf42c1750630380f..233570e2cd35d00e3bac5cba3997096818d9e088 100644 (file)
@@ -1314,7 +1314,7 @@ static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
         * not been linked in yet and thus is not reachable from any other CPU.
         */
        for (i = 0; i < SPTE_ENT_PER_PAGE; i++)
-               sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i);
+               sp->spt[i] = make_small_spte(kvm, huge_spte, sp->role, i);
 
        /*
         * Replace the huge spte with a pointer to the populated lower level