]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86/mmu: Dynamically allocate shadow MMU's hashed page list
authorSean Christopherson <seanjc@google.com>
Fri, 23 May 2025 00:11:36 +0000 (17:11 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 24 Jun 2025 19:50:34 +0000 (12:50 -0700)
Dynamically allocate the (massive) array of hashed lists used to track
shadow pages, as the array itself is 32KiB, i.e. is an order-3 allocation
all on its own, and is *exactly* an order-3 allocation.  Dynamically
allocating the array will allow allocating "struct kvm" using kvmalloc(),
and will also allow deferring allocation of the array until it's actually
needed, i.e. until the first shadow root is allocated.

Opportunistically use kvmalloc() for the hashed lists, as an order-3
allocation is (stating the obvious) less likely to fail than an order-4
allocation, and the overhead of vmalloc() is undesirable given that the
size of the allocation is fixed.

Cc: Vipin Sharma <vipinsh@google.com>
Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
Link: https://lore.kernel.org/r/20250523001138.3182794-3-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/x86.c

index b4a391929cdbaa9960e870ade649606bdcab2e9a..17529e5d717c411d1af7750dc794f75c9143c0e0 100644 (file)
@@ -1344,7 +1344,7 @@ struct kvm_arch {
        bool has_private_mem;
        bool has_protected_state;
        bool pre_fault_allowed;
-       struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
+       struct hlist_head *mmu_page_hash;
        struct list_head active_mmu_pages;
        /*
         * A list of kvm_mmu_page structs that, if zapped, could possibly be
@@ -2007,7 +2007,7 @@ void kvm_mmu_vendor_module_exit(void);
 
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
 int kvm_mmu_create(struct kvm_vcpu *vcpu);
-void kvm_mmu_init_vm(struct kvm *kvm);
+int kvm_mmu_init_vm(struct kvm *kvm);
 void kvm_mmu_uninit_vm(struct kvm *kvm);
 
 void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
index 4e06e2e89a8fa7e8710e9683346f8527182327d2..2b521f74bacf0b1cc202470f246afd66ef0f5bf1 100644 (file)
@@ -3882,6 +3882,18 @@ out_unlock:
        return r;
 }
 
+static int kvm_mmu_alloc_page_hash(struct kvm *kvm)
+{
+       struct hlist_head *h;
+
+       h = kvcalloc(KVM_NUM_MMU_PAGES, sizeof(*h), GFP_KERNEL_ACCOUNT);
+       if (!h)
+               return -ENOMEM;
+
+       kvm->arch.mmu_page_hash = h;
+       return 0;
+}
+
 static int mmu_first_shadow_root_alloc(struct kvm *kvm)
 {
        struct kvm_memslots *slots;
@@ -6682,13 +6694,19 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
                kvm_tdp_mmu_zap_invalidated_roots(kvm, true);
 }
 
-void kvm_mmu_init_vm(struct kvm *kvm)
+int kvm_mmu_init_vm(struct kvm *kvm)
 {
+       int r;
+
        kvm->arch.shadow_mmio_value = shadow_mmio_value;
        INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
        INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
        spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
 
+       r = kvm_mmu_alloc_page_hash(kvm);
+       if (r)
+               return r;
+
        if (tdp_mmu_enabled)
                kvm_mmu_init_tdp_mmu(kvm);
 
@@ -6699,6 +6717,7 @@ void kvm_mmu_init_vm(struct kvm *kvm)
 
        kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
        kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
+       return 0;
 }
 
 static void mmu_free_vm_memory_caches(struct kvm *kvm)
@@ -6710,6 +6729,8 @@ static void mmu_free_vm_memory_caches(struct kvm *kvm)
 
 void kvm_mmu_uninit_vm(struct kvm *kvm)
 {
+       kvfree(kvm->arch.mmu_page_hash);
+
        if (tdp_mmu_enabled)
                kvm_mmu_uninit_tdp_mmu(kvm);
 
index b58a74c1722de3f2d180cf8d6a3cf0ce9cf534fd..d9339ad1474eaef5312d1ee20aaaaa59f3bace16 100644 (file)
@@ -12789,7 +12789,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        if (ret)
                goto out;
 
-       kvm_mmu_init_vm(kvm);
+       ret = kvm_mmu_init_vm(kvm);
+       if (ret)
+               goto out_cleanup_page_track;
 
        ret = kvm_x86_call(vm_init)(kvm);
        if (ret)
@@ -12842,6 +12844,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
 out_uninit_mmu:
        kvm_mmu_uninit_vm(kvm);
+out_cleanup_page_track:
        kvm_page_track_cleanup(kvm);
 out:
        return ret;