]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: x86/mmu: Rename .private_max_mapping_level() to .gmem_max_mapping_level()
authorAckerley Tng <ackerleytng@google.com>
Tue, 29 Jul 2025 22:54:43 +0000 (15:54 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 27 Aug 2025 08:35:00 +0000 (04:35 -0400)
Rename kvm_x86_ops.private_max_mapping_level() to .gmem_max_mapping_level()
in anticipation of extending guest_memfd support to non-private memory.

No functional change intended.

Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
Acked-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Message-ID: <20250729225455.670324-13-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/main.c
arch/x86/kvm/vmx/tdx.c
arch/x86/kvm/vmx/x86_ops.h

index 18a5c3119e1a84b339a34929da0ff8bb73f5a354..62c3e4de3303b07add069c050893a236a6fa330c 100644 (file)
@@ -145,7 +145,7 @@ KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
 KVM_X86_OP_OPTIONAL(get_untagged_addr)
 KVM_X86_OP_OPTIONAL(alloc_apic_backing_page)
 KVM_X86_OP_OPTIONAL_RET0(gmem_prepare)
-KVM_X86_OP_OPTIONAL_RET0(private_max_mapping_level)
+KVM_X86_OP_OPTIONAL_RET0(gmem_max_mapping_level)
 KVM_X86_OP_OPTIONAL(gmem_invalidate)
 
 #undef KVM_X86_OP
index 50366a1ca192efa8a17e833ab21a9e3533734846..c0a739bf38296326b4cf883ae122b40a681ef6b0 100644 (file)
@@ -1922,7 +1922,7 @@ struct kvm_x86_ops {
        void *(*alloc_apic_backing_page)(struct kvm_vcpu *vcpu);
        int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
        void (*gmem_invalidate)(kvm_pfn_t start, kvm_pfn_t end);
-       int (*private_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn);
+       int (*gmem_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn);
 };
 
 struct kvm_x86_nested_ops {
index fdc2824755ee6ed7f80a1d97e90c07bffd8e5b3f..b735611e8fcd389c6b7b3120e53d465394b50990 100644 (file)
@@ -4532,7 +4532,7 @@ static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
        if (max_level == PG_LEVEL_4K)
                return PG_LEVEL_4K;
 
-       req_max_level = kvm_x86_call(private_max_mapping_level)(kvm, pfn);
+       req_max_level = kvm_x86_call(gmem_max_mapping_level)(kvm, pfn);
        if (req_max_level)
                max_level = min(max_level, req_max_level);
 
index 966a330dd2941aa3a959aca98fc9bc87f17b3238..b926a053b8cfb2f399ffe906f2295ad692cfba52 100644 (file)
@@ -4943,7 +4943,7 @@ next_pfn:
        }
 }
 
-int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
+int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
 {
        int level, rc;
        bool assigned;
index d9931c6c4bc62aa0562e674e08f95717ee0a4a94..8a66e2e985a48ce888d7d05384b600c846de6f7d 100644 (file)
@@ -5180,7 +5180,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
 
        .gmem_prepare = sev_gmem_prepare,
        .gmem_invalidate = sev_gmem_invalidate,
-       .private_max_mapping_level = sev_private_max_mapping_level,
+       .gmem_max_mapping_level = sev_gmem_max_mapping_level,
 };
 
 /*
index 58b9d168e0c8ece83fee217491c24611097eb541..d84a83ae18a1ff99e4bdc7fde9bd18781e59f755 100644 (file)
@@ -866,7 +866,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
 void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
 int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
 void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
-int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
+int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
 struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu);
 void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa);
 #else
@@ -895,7 +895,7 @@ static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, in
        return 0;
 }
 static inline void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) {}
-static inline int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
+static inline int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
 {
        return 0;
 }
index dbab1c15b0cdfd82a5958c7055e14f4a5b20073f..dd7687ef7e2d8560fef5d6fef521352b4177012b 100644 (file)
@@ -831,10 +831,10 @@ static int vt_vcpu_mem_enc_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
        return tdx_vcpu_ioctl(vcpu, argp);
 }
 
-static int vt_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
+static int vt_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
 {
        if (is_td(kvm))
-               return tdx_gmem_private_max_mapping_level(kvm, pfn);
+               return tdx_gmem_max_mapping_level(kvm, pfn);
 
        return 0;
 }
@@ -1005,7 +1005,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
        .mem_enc_ioctl = vt_op_tdx_only(mem_enc_ioctl),
        .vcpu_mem_enc_ioctl = vt_op_tdx_only(vcpu_mem_enc_ioctl),
 
-       .private_max_mapping_level = vt_op_tdx_only(gmem_private_max_mapping_level)
+       .gmem_max_mapping_level = vt_op_tdx_only(gmem_max_mapping_level)
 };
 
 struct kvm_x86_init_ops vt_init_ops __initdata = {
index 66744f5768c8eb8d439680fe7ddaabf8e5ca9171..b444714e8e8aa50e77ba68c31a8939b7d749b985 100644 (file)
@@ -3318,7 +3318,7 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
        return ret;
 }
 
-int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
+int tdx_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
 {
        return PG_LEVEL_4K;
 }
index 2b3424f638dbbca24f943bc58539162b69b6d012..6037d17084856c99aadff7ad4a7e397a36168aa7 100644 (file)
@@ -153,7 +153,7 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
 void tdx_flush_tlb_current(struct kvm_vcpu *vcpu);
 void tdx_flush_tlb_all(struct kvm_vcpu *vcpu);
 void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
-int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
+int tdx_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
 #endif
 
 #endif /* __KVM_X86_VMX_X86_OPS_H */