]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: SEV: Use long-term pin when registering encrypted memory regions
authorGe Yang <yangge1116@126.com>
Tue, 11 Feb 2025 02:37:03 +0000 (10:37 +0800)
committerSean Christopherson <seanjc@google.com>
Wed, 12 Feb 2025 18:50:42 +0000 (10:50 -0800)
When registering an encrypted memory region for SEV-MEM/SEV-ES guests,
pin the pages with FOLL_TERM so that the pages are migrated out of
MIGRATE_CMA/ZONE_MOVABLE.  Failure to do so violates the CMA/MOVABLE
mechanisms and can result in fragmentation due to unmovable pages, e.g.
can make CMA allocations fail.

Signed-off-by: Ge Yang <yangge1116@126.com>
Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
Acked-by: David Hildenbrand <david@redhat.com>
Link: https://lore.kernel.org/r/1739241423-14326-1-git-send-email-yangge1116@126.com
[sean: massage changelog, make @flags an unsigned int]
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/sev.c

index 01ae6e08fb21c4721a08b232f19064b0d00a82a4..602cb51f95fe46783e560e8f24cc18b8eb7290eb 100644 (file)
@@ -619,7 +619,7 @@ e_free_dh:
 
 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
                                    unsigned long ulen, unsigned long *n,
-                                   int write)
+                                   unsigned int flags)
 {
        struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
        unsigned long npages, size;
@@ -660,7 +660,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
                return ERR_PTR(-ENOMEM);
 
        /* Pin the user virtual address. */
-       npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
+       npinned = pin_user_pages_fast(uaddr, npages, flags, pages);
        if (npinned != npages) {
                pr_err("SEV: Failure locking %lu pages.\n", npages);
                ret = -ENOMEM;
@@ -745,7 +745,7 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
        vaddr_end = vaddr + size;
 
        /* Lock the user memory. */
-       inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
+       inpages = sev_pin_memory(kvm, vaddr, size, &npages, FOLL_WRITE);
        if (IS_ERR(inpages))
                return PTR_ERR(inpages);
 
@@ -1240,7 +1240,7 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
                if (IS_ERR(src_p))
                        return PTR_ERR(src_p);
 
-               dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
+               dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, FOLL_WRITE);
                if (IS_ERR(dst_p)) {
                        sev_unpin_memory(kvm, src_p, n);
                        return PTR_ERR(dst_p);
@@ -1305,7 +1305,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
        if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
                return -EFAULT;
 
-       pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
+       pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, FOLL_WRITE);
        if (IS_ERR(pages))
                return PTR_ERR(pages);
 
@@ -1779,7 +1779,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
 
        /* Pin guest memory */
        guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
-                                   PAGE_SIZE, &n, 1);
+                                   PAGE_SIZE, &n, FOLL_WRITE);
        if (IS_ERR(guest_page)) {
                ret = PTR_ERR(guest_page);
                goto e_free_trans;
@@ -2675,7 +2675,8 @@ int sev_mem_enc_register_region(struct kvm *kvm,
                return -ENOMEM;
 
        mutex_lock(&kvm->lock);
-       region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
+       region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages,
+                                      FOLL_WRITE | FOLL_LONGTERM);
        if (IS_ERR(region->pages)) {
                ret = PTR_ERR(region->pages);
                mutex_unlock(&kvm->lock);