]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: guest_memfd: Invalidate SHARED GPAs if gmem supports INIT_SHARED
authorSean Christopherson <seanjc@google.com>
Fri, 3 Oct 2025 23:25:56 +0000 (16:25 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 10 Oct 2025 21:25:24 +0000 (14:25 -0700)
When invalidating gmem ranges, e.g. in response to PUNCH_HOLE, process all
possible range types (PRIVATE vs. SHARED) for the gmem instance.  Since
since guest_memfd doesn't yet support in-place conversions, simply pivot
on INIT_SHARED as a gmem instance can currently only have private or shared
memory, not both.

Failure to mark shared GPAs for invalidation is benign in the current code
base, as only x86's TDX consumes KVM_FILTER_{PRIVATE,SHARED}, and TDX
doesn't yet support INIT_SHARED with guest_memfd.  However, invalidating
only private GPAs is conceptually wrong and a lurking bug, e.g. could
result in missed invalidations if ARM starts filtering invalidations based
on attributes.

Fixes: 3d3a04fad25a ("KVM: Allow and advertise support for host mmap() on guest_memfd files")
Reviewed-by: Ackerley Tng <ackerleytng@google.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Link: https://lore.kernel.org/r/20251003232606.4070510-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
virt/kvm/guest_memfd.c

index cf3afba23a6be83d627bf29f353a9be70eecd4f2..e10d2c71e78c458a52f20ae6db8f2d486781ef50 100644 (file)
@@ -102,8 +102,17 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
        return filemap_grab_folio(inode->i_mapping, index);
 }
 
-static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
-                                     pgoff_t end)
+static enum kvm_gfn_range_filter kvm_gmem_get_invalidate_filter(struct inode *inode)
+{
+       if ((u64)inode->i_private & GUEST_MEMFD_FLAG_INIT_SHARED)
+               return KVM_FILTER_SHARED;
+
+       return KVM_FILTER_PRIVATE;
+}
+
+static void __kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
+                                       pgoff_t end,
+                                       enum kvm_gfn_range_filter attr_filter)
 {
        bool flush = false, found_memslot = false;
        struct kvm_memory_slot *slot;
@@ -118,8 +127,7 @@ static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
                        .end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,
                        .slot = slot,
                        .may_block = true,
-                       /* guest memfd is relevant to only private mappings. */
-                       .attr_filter = KVM_FILTER_PRIVATE,
+                       .attr_filter = attr_filter,
                };
 
                if (!found_memslot) {
@@ -139,8 +147,21 @@ static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
                KVM_MMU_UNLOCK(kvm);
 }
 
-static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
-                                   pgoff_t end)
+static void kvm_gmem_invalidate_begin(struct inode *inode, pgoff_t start,
+                                     pgoff_t end)
+{
+       struct list_head *gmem_list = &inode->i_mapping->i_private_list;
+       enum kvm_gfn_range_filter attr_filter;
+       struct kvm_gmem *gmem;
+
+       attr_filter = kvm_gmem_get_invalidate_filter(inode);
+
+       list_for_each_entry(gmem, gmem_list, entry)
+               __kvm_gmem_invalidate_begin(gmem, start, end, attr_filter);
+}
+
+static void __kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
+                                     pgoff_t end)
 {
        struct kvm *kvm = gmem->kvm;
 
@@ -151,12 +172,20 @@ static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
        }
 }
 
-static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+static void kvm_gmem_invalidate_end(struct inode *inode, pgoff_t start,
+                                   pgoff_t end)
 {
        struct list_head *gmem_list = &inode->i_mapping->i_private_list;
+       struct kvm_gmem *gmem;
+
+       list_for_each_entry(gmem, gmem_list, entry)
+               __kvm_gmem_invalidate_end(gmem, start, end);
+}
+
+static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+{
        pgoff_t start = offset >> PAGE_SHIFT;
        pgoff_t end = (offset + len) >> PAGE_SHIFT;
-       struct kvm_gmem *gmem;
 
        /*
         * Bindings must be stable across invalidation to ensure the start+end
@@ -164,13 +193,11 @@ static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
         */
        filemap_invalidate_lock(inode->i_mapping);
 
-       list_for_each_entry(gmem, gmem_list, entry)
-               kvm_gmem_invalidate_begin(gmem, start, end);
+       kvm_gmem_invalidate_begin(inode, start, end);
 
        truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1);
 
-       list_for_each_entry(gmem, gmem_list, entry)
-               kvm_gmem_invalidate_end(gmem, start, end);
+       kvm_gmem_invalidate_end(inode, start, end);
 
        filemap_invalidate_unlock(inode->i_mapping);
 
@@ -280,8 +307,9 @@ static int kvm_gmem_release(struct inode *inode, struct file *file)
         * Zap all SPTEs pointed at by this file.  Do not free the backing
         * memory, as its lifetime is associated with the inode, not the file.
         */
-       kvm_gmem_invalidate_begin(gmem, 0, -1ul);
-       kvm_gmem_invalidate_end(gmem, 0, -1ul);
+       __kvm_gmem_invalidate_begin(gmem, 0, -1ul,
+                                   kvm_gmem_get_invalidate_filter(inode));
+       __kvm_gmem_invalidate_end(gmem, 0, -1ul);
 
        list_del(&gmem->entry);
 
@@ -403,8 +431,6 @@ static int kvm_gmem_migrate_folio(struct address_space *mapping,
 
 static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *folio)
 {
-       struct list_head *gmem_list = &mapping->i_private_list;
-       struct kvm_gmem *gmem;
        pgoff_t start, end;
 
        filemap_invalidate_lock_shared(mapping);
@@ -412,8 +438,7 @@ static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *fol
        start = folio->index;
        end = start + folio_nr_pages(folio);
 
-       list_for_each_entry(gmem, gmem_list, entry)
-               kvm_gmem_invalidate_begin(gmem, start, end);
+       kvm_gmem_invalidate_begin(mapping->host, start, end);
 
        /*
         * Do not truncate the range, what action is taken in response to the
@@ -424,8 +449,7 @@ static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *fol
         * error to userspace.
         */
 
-       list_for_each_entry(gmem, gmem_list, entry)
-               kvm_gmem_invalidate_end(gmem, start, end);
+       kvm_gmem_invalidate_end(mapping->host, start, end);
 
        filemap_invalidate_unlock_shared(mapping);