]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Initialize struct kvm_s2_fault completely at declaration
authorFuad Tabba <tabba@google.com>
Fri, 6 Mar 2026 14:02:29 +0000 (14:02 +0000)
committerMarc Zyngier <maz@kernel.org>
Sat, 28 Mar 2026 11:29:40 +0000 (11:29 +0000)
Simplify the initialization of struct kvm_s2_fault in user_mem_abort().

Instead of partially initializing the struct via designated initializers
and then sequentially assigning the remaining fields (like write_fault
and topup_memcache) further down the function, evaluate those
dependencies upfront.

This allows the entire struct to be fully initialized at declaration. It
also eliminates the need for the intermediate fault_data variable and
its associated fault pointer, reducing boilerplate.

Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/mmu.c

index c5ac9bd87b996eb36e317e7bdd1fa66f513e1fb1..fe942be061cb008cdef21ba05594b6a0f66bec16 100644 (file)
@@ -1962,8 +1962,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                          struct kvm_memory_slot *memslot, unsigned long hva,
                          bool fault_is_perm)
 {
-       int ret = 0;
-       struct kvm_s2_fault fault_data = {
+       bool write_fault = kvm_is_write_fault(vcpu);
+       bool logging_active = memslot_is_logging(memslot);
+       struct kvm_s2_fault fault = {
                .vcpu = vcpu,
                .fault_ipa = fault_ipa,
                .nested = nested,
@@ -1971,19 +1972,18 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                .hva = hva,
                .fault_is_perm = fault_is_perm,
                .ipa = fault_ipa,
-               .logging_active = memslot_is_logging(memslot),
-               .force_pte = memslot_is_logging(memslot),
-               .s2_force_noncacheable = false,
+               .logging_active = logging_active,
+               .force_pte = logging_active,
                .prot = KVM_PGTABLE_PROT_R,
+               .fault_granule = fault_is_perm ? kvm_vcpu_trap_get_perm_fault_granule(vcpu) : 0,
+               .write_fault = write_fault,
+               .exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu),
+               .topup_memcache = !fault_is_perm || (logging_active && write_fault),
        };
-       struct kvm_s2_fault *fault = &fault_data;
        void *memcache;
+       int ret;
 
-       if (fault->fault_is_perm)
-               fault->fault_granule = kvm_vcpu_trap_get_perm_fault_granule(fault->vcpu);
-       fault->write_fault = kvm_is_write_fault(fault->vcpu);
-       fault->exec_fault = kvm_vcpu_trap_is_exec_fault(fault->vcpu);
-       VM_WARN_ON_ONCE(fault->write_fault && fault->exec_fault);
+       VM_WARN_ON_ONCE(fault.write_fault && fault.exec_fault);
 
        /*
         * Permission faults just need to update the existing leaf entry,
@@ -1991,9 +1991,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * only exception to this is when dirty logging is enabled at runtime
         * and a write fault needs to collapse a block entry into a table.
         */
-       fault->topup_memcache = !fault->fault_is_perm ||
-                               (fault->logging_active && fault->write_fault);
-       ret = prepare_mmu_memcache(fault->vcpu, fault->topup_memcache, &memcache);
+       ret = prepare_mmu_memcache(vcpu, fault.topup_memcache, &memcache);
        if (ret)
                return ret;
 
@@ -2001,17 +1999,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * Let's check if we will get back a huge page backed by hugetlbfs, or
         * get block mapping for device MMIO region.
         */
-       ret = kvm_s2_fault_pin_pfn(fault);
+       ret = kvm_s2_fault_pin_pfn(&fault);
        if (ret != 1)
                return ret;
 
-       ret = kvm_s2_fault_compute_prot(fault);
+       ret = kvm_s2_fault_compute_prot(&fault);
        if (ret) {
-               kvm_release_page_unused(fault->page);
+               kvm_release_page_unused(fault.page);
                return ret;
        }
 
-       return kvm_s2_fault_map(fault, memcache);
+       return kvm_s2_fault_map(&fault, memcache);
 }
 
 /* Resolve the access fault by making the page young again. */