kvm_fault_lock(kvm);
pgt = fault->vcpu->arch.hw_mmu->pgt;
- if (mmu_invalidate_retry(kvm, fault->mmu_seq)) {
- ret = -EAGAIN;
+ ret = -EAGAIN;
+ if (mmu_invalidate_retry(kvm, fault->mmu_seq))
goto out_unlock;
- }
/*
* If we are not forced to use page mapping, check if we are
*/
if (fault->vma_pagesize == PAGE_SIZE &&
!(fault->force_pte || fault->s2_force_noncacheable)) {
- if (fault->fault_is_perm && fault->fault_granule > PAGE_SIZE)
+ if (fault->fault_is_perm && fault->fault_granule > PAGE_SIZE) {
fault->vma_pagesize = fault->fault_granule;
- else
+ } else {
fault->vma_pagesize = transparent_hugepage_adjust(kvm, fault->memslot,
fault->hva, &fault->pfn,
&fault->fault_ipa);
- if (fault->vma_pagesize < 0) {
- ret = fault->vma_pagesize;
- goto out_unlock;
+ if (fault->vma_pagesize < 0) {
+ ret = fault->vma_pagesize;
+ goto out_unlock;
+ }
}
}
if (fault->writable && !ret)
mark_page_dirty_in_slot(kvm, fault->memslot, fault->gfn);
- return ret != -EAGAIN ? ret : 0;
+ if (ret != -EAGAIN)
+ return ret;
+ return 0;
}
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,