From: Quentin Perret Date: Wed, 16 Apr 2025 15:26:46 +0000 (+0000) Subject: KVM: arm64: Defer EL2 stage-1 mapping on share X-Git-Tag: v6.16-rc1~129^2~2^2~6^2~11^2~1 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=48d848882395a6a42ff1bb685082c79791d4e753;p=thirdparty%2Flinux.git KVM: arm64: Defer EL2 stage-1 mapping on share We currently blindly map into EL2 stage-1 *any* page passed to the __pkvm_host_share_hyp() HVC. This is less than ideal from a security perspective as it makes exploitation of potential hypervisor gadgets easier than it should be. But interestingly, pKVM should never need to access SHARED_BORROWED pages that it hasn't previously pinned, so there is no need to map the page before that. Reviewed-by: Marc Zyngier Signed-off-by: Quentin Perret Link: https://lore.kernel.org/r/20250416152648.2982950-7-qperret@google.com Signed-off-by: Marc Zyngier --- diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 25ff84c053c1b..91b757e3fb4c0 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -693,7 +693,6 @@ static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr, int __pkvm_host_share_hyp(u64 pfn) { u64 phys = hyp_pfn_to_phys(pfn); - void *virt = __hyp_va(phys); u64 size = PAGE_SIZE; int ret; @@ -710,7 +709,6 @@ int __pkvm_host_share_hyp(u64 pfn) } __hyp_set_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED); - WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP)); WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED)); unlock: @@ -742,7 +740,6 @@ int __pkvm_host_unshare_hyp(u64 pfn) } __hyp_set_page_state_range(phys, size, PKVM_NOPAGE); - WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size); WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_OWNED)); unlock: @@ -818,6 +815,7 @@ int hyp_pin_shared_mem(void *from, void *to) u64 end = PAGE_ALIGN((u64)to); u64 phys = __hyp_pa(start); u64 size = end - start; + struct hyp_page *p; int ret; host_lock_component(); @@ -831,8 +829,14 @@ int hyp_pin_shared_mem(void *from, void *to) if (ret) goto unlock; - for (cur = start; cur < end; cur += PAGE_SIZE) - hyp_page_ref_inc(hyp_virt_to_page(cur)); + for (cur = start; cur < end; cur += PAGE_SIZE) { + p = hyp_virt_to_page(cur); + hyp_page_ref_inc(p); + if (p->refcount == 1) + WARN_ON(pkvm_create_mappings_locked((void *)cur, + (void *)cur + PAGE_SIZE, + PAGE_HYP)); + } unlock: hyp_unlock_component(); @@ -845,12 +849,17 @@ void hyp_unpin_shared_mem(void *from, void *to) { u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE); u64 end = PAGE_ALIGN((u64)to); + struct hyp_page *p; host_lock_component(); hyp_lock_component(); - for (cur = start; cur < end; cur += PAGE_SIZE) - hyp_page_ref_dec(hyp_virt_to_page(cur)); + for (cur = start; cur < end; cur += PAGE_SIZE) { + p = hyp_virt_to_page(cur); + if (p->refcount == 1) + WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, cur, PAGE_SIZE) != PAGE_SIZE); + hyp_page_ref_dec(p); + } hyp_unlock_component(); host_unlock_component();