]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: SEV: Implement gmem hook for initializing private pages
authorMichael Roth <michael.roth@amd.com>
Wed, 1 May 2024 08:52:03 +0000 (03:52 -0500)
committerPaolo Bonzini <pbonzini@redhat.com>
Sun, 12 May 2024 08:09:32 +0000 (04:09 -0400)
This will handle the RMP table updates needed to put a page into a
private state before mapping it into an SEV-SNP guest.

Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
Message-ID: <20240501085210.2213060-14-michael.roth@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/Kconfig
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/x86.c
virt/kvm/guest_memfd.c

index 5e72faca4e8f934bd6832c203b27245b95459547..10768f13b240be5f12381d3cd50f310b4b2bea2e 100644 (file)
@@ -137,6 +137,7 @@ config KVM_AMD_SEV
        depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
        select ARCH_HAS_CC_PLATFORM
        select KVM_GENERIC_PRIVATE_MEM
+       select HAVE_KVM_GMEM_PREPARE
        help
          Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
          with Encrypted State (SEV-ES) on AMD processors.
index 0c1108ffc4ae72a3dbf1ddd03e707483907a92f1..0ed6b96c01c352d6be1aecb3fc7915c1fcae35fd 100644 (file)
@@ -4565,3 +4565,101 @@ out:
 out_no_trace:
        put_page(pfn_to_page(pfn));
 }
+
+static bool is_pfn_range_shared(kvm_pfn_t start, kvm_pfn_t end)
+{
+       kvm_pfn_t pfn = start;
+
+       while (pfn < end) {
+               int ret, rmp_level;
+               bool assigned;
+
+               ret = snp_lookup_rmpentry(pfn, &assigned, &rmp_level);
+               if (ret) {
+                       pr_warn_ratelimited("SEV: Failed to retrieve RMP entry: PFN 0x%llx GFN start 0x%llx GFN end 0x%llx RMP level %d error %d\n",
+                                           pfn, start, end, rmp_level, ret);
+                       return false;
+               }
+
+               if (assigned) {
+                       pr_debug("%s: overlap detected, PFN 0x%llx start 0x%llx end 0x%llx RMP level %d\n",
+                                __func__, pfn, start, end, rmp_level);
+                       return false;
+               }
+
+               pfn++;
+       }
+
+       return true;
+}
+
+static u8 max_level_for_order(int order)
+{
+       if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
+               return PG_LEVEL_2M;
+
+       return PG_LEVEL_4K;
+}
+
+static bool is_large_rmp_possible(struct kvm *kvm, kvm_pfn_t pfn, int order)
+{
+       kvm_pfn_t pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
+
+       /*
+        * If this is a large folio, and the entire 2M range containing the
+        * PFN is currently shared, then the entire 2M-aligned range can be
+        * set to private via a single 2M RMP entry.
+        */
+       if (max_level_for_order(order) > PG_LEVEL_4K &&
+           is_pfn_range_shared(pfn_aligned, pfn_aligned + PTRS_PER_PMD))
+               return true;
+
+       return false;
+}
+
+int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
+{
+       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       kvm_pfn_t pfn_aligned;
+       gfn_t gfn_aligned;
+       int level, rc;
+       bool assigned;
+
+       if (!sev_snp_guest(kvm))
+               return 0;
+
+       rc = snp_lookup_rmpentry(pfn, &assigned, &level);
+       if (rc) {
+               pr_err_ratelimited("SEV: Failed to look up RMP entry: GFN %llx PFN %llx error %d\n",
+                                  gfn, pfn, rc);
+               return -ENOENT;
+       }
+
+       if (assigned) {
+               pr_debug("%s: already assigned: gfn %llx pfn %llx max_order %d level %d\n",
+                        __func__, gfn, pfn, max_order, level);
+               return 0;
+       }
+
+       if (is_large_rmp_possible(kvm, pfn, max_order)) {
+               level = PG_LEVEL_2M;
+               pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
+               gfn_aligned = ALIGN_DOWN(gfn, PTRS_PER_PMD);
+       } else {
+               level = PG_LEVEL_4K;
+               pfn_aligned = pfn;
+               gfn_aligned = gfn;
+       }
+
+       rc = rmp_make_private(pfn_aligned, gfn_to_gpa(gfn_aligned), level, sev->asid, false);
+       if (rc) {
+               pr_err_ratelimited("SEV: Failed to update RMP entry: GFN %llx PFN %llx level %d error %d\n",
+                                  gfn, pfn, level, rc);
+               return -EINVAL;
+       }
+
+       pr_debug("%s: updated: gfn %llx pfn %llx pfn_aligned %llx max_order %d level %d\n",
+                __func__, gfn, pfn, pfn_aligned, max_order, level);
+
+       return 0;
+}
index 546656606b44410c7e5305fc9025ae69274a773e..b9ecc06f893423130d4342cd3c7fb9f5837c48e1 100644 (file)
@@ -5081,6 +5081,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
        .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
        .alloc_apic_backing_page = svm_alloc_apic_backing_page,
+
+       .gmem_prepare = sev_gmem_prepare,
 };
 
 /*
index 926bfce571a6e10822e95bf695b7ba600ad31204..4203bd9012e9a4e55c0995ed92bb6297ee39fa2d 100644 (file)
@@ -736,6 +736,7 @@ extern unsigned int max_sev_asid;
 void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
 void sev_vcpu_unblocking(struct kvm_vcpu *vcpu);
 void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
+int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
 #else
 static inline struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) {
        return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
@@ -752,6 +753,10 @@ static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXI
 static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {}
 static inline void sev_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
 static inline void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu) {}
+static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
+{
+       return 0;
+}
 
 #endif
 
index d29477910ffa17d3169979a4a53bf3f43ba111a1..d750546ec934016f89968d294f0253861d1c2a21 100644 (file)
@@ -13611,6 +13611,11 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
 EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
 
 #ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
+bool kvm_arch_gmem_prepare_needed(struct kvm *kvm)
+{
+       return kvm->arch.vm_type == KVM_X86_SNP_VM;
+}
+
 int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
 {
        return static_call(kvm_x86_gmem_prepare)(kvm, pfn, gfn, max_order);
index dfe50c64a552e4c5eea5feae0976cadae3056c87..9714add388525b9d4e71ea514c48cc409cd32c64 100644 (file)
@@ -39,8 +39,8 @@ static int kvm_gmem_prepare_folio(struct inode *inode, pgoff_t index, struct fol
                gfn = slot->base_gfn + index - slot->gmem.pgoff;
                rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, compound_order(compound_head(page)));
                if (rc) {
-                       pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx, error %d.\n",
-                                           index, rc);
+                       pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
+                                           index, gfn, pfn, rc);
                        return rc;
                }
        }