mmap_read_unlock(current->mm);
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
- write_fault, &writable, NULL);
+ write_fault, &writable);
if (pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(hva, vma_shift);
return 0;
} else {
/* Call KVM generic code to do the slow-path check */
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
- writing, &write_ok, NULL);
+ writing, &write_ok);
if (is_error_noslot_pfn(pfn))
return -EFAULT;
page = NULL;
/* Call KVM generic code to do the slow-path check */
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
- writing, upgrade_p, NULL);
+ writing, upgrade_p);
if (is_error_noslot_pfn(pfn))
return -EFAULT;
page = NULL;
return kvm_faultin_pfn_private(vcpu, fault);
fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, true,
- fault->write, &fault->map_writable,
- NULL);
+ fault->write, &fault->map_writable);
/*
* If resolving the page failed because I/O is needed to fault-in the
* get a page and a fatal signal, i.e. SIGKILL, is pending.
*/
fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, true, true,
- fault->write, &fault->map_writable,
- NULL);
+ fault->write, &fault->map_writable);
return RET_PF_CONTINUE;
}
kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
bool interruptible, bool no_wait,
- bool write_fault, bool *writable, hva_t *hva);
+ bool write_fault, bool *writable);
void kvm_release_pfn_clean(kvm_pfn_t pfn);
void kvm_release_pfn_dirty(kvm_pfn_t pfn);
kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
bool interruptible, bool no_wait,
- bool write_fault, bool *writable, hva_t *hva)
+ bool write_fault, bool *writable)
{
unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
- if (hva)
- *hva = addr;
-
if (kvm_is_error_hva(addr)) {
if (writable)
*writable = false;
bool *writable)
{
return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false,
- write_fault, writable, NULL);
+ write_fault, writable);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{
- return __gfn_to_pfn_memslot(slot, gfn, false, false, true, NULL, NULL);
+ return __gfn_to_pfn_memslot(slot, gfn, false, false, true, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);