struct kvm_memory_slot *memslot, unsigned long hva,
bool fault_is_perm)
{
- int ret = 0;
- struct kvm_s2_fault fault_data = {
+ bool write_fault = kvm_is_write_fault(vcpu);
+ bool logging_active = memslot_is_logging(memslot);
+ struct kvm_s2_fault fault = {
.vcpu = vcpu,
.fault_ipa = fault_ipa,
.nested = nested,
.hva = hva,
.fault_is_perm = fault_is_perm,
.ipa = fault_ipa,
- .logging_active = memslot_is_logging(memslot),
- .force_pte = memslot_is_logging(memslot),
- .s2_force_noncacheable = false,
+ .logging_active = logging_active,
+ .force_pte = logging_active,
.prot = KVM_PGTABLE_PROT_R,
+ .fault_granule = fault_is_perm ? kvm_vcpu_trap_get_perm_fault_granule(vcpu) : 0,
+ .write_fault = write_fault,
+ .exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu),
+ .topup_memcache = !fault_is_perm || (logging_active && write_fault),
};
- struct kvm_s2_fault *fault = &fault_data;
void *memcache;
+ int ret;
- if (fault->fault_is_perm)
- fault->fault_granule = kvm_vcpu_trap_get_perm_fault_granule(fault->vcpu);
- fault->write_fault = kvm_is_write_fault(fault->vcpu);
- fault->exec_fault = kvm_vcpu_trap_is_exec_fault(fault->vcpu);
- VM_WARN_ON_ONCE(fault->write_fault && fault->exec_fault);
+ VM_WARN_ON_ONCE(fault.write_fault && fault.exec_fault);
/*
* Permission faults just need to update the existing leaf entry,
* only exception to this is when dirty logging is enabled at runtime
* and a write fault needs to collapse a block entry into a table.
*/
- fault->topup_memcache = !fault->fault_is_perm ||
- (fault->logging_active && fault->write_fault);
- ret = prepare_mmu_memcache(fault->vcpu, fault->topup_memcache, &memcache);
+ ret = prepare_mmu_memcache(vcpu, fault.topup_memcache, &memcache);
if (ret)
return ret;
* Let's check if we will get back a huge page backed by hugetlbfs, or
* get block mapping for device MMIO region.
*/
- ret = kvm_s2_fault_pin_pfn(fault);
+ ret = kvm_s2_fault_pin_pfn(&fault);
if (ret != 1)
return ret;
- ret = kvm_s2_fault_compute_prot(fault);
+ ret = kvm_s2_fault_compute_prot(&fault);
if (ret) {
- kvm_release_page_unused(fault->page);
+ kvm_release_page_unused(fault.page);
return ret;
}
- return kvm_s2_fault_map(fault, memcache);
+ return kvm_s2_fault_map(&fault, memcache);
}
/* Resolve the access fault by making the page young again. */