Let's centralize it, by allowing for the driver to enable this handling
through a new flag (bool for now) in the balloon device info.
Note that we now adjust the counter when adding/removing a page into the
balloon list: when removing a page to deflate it, it will now happen
before the driver communicated with hypervisor, not afterwards.
This shouldn't make a difference in practice.
Link: https://lkml.kernel.org/r/20260119230133.3551867-7-david@kernel.org
Signed-off-by: David Hildenbrand (Red Hat) <david@kernel.org>
Acked-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Eugenio Pérez <eperezma@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Jerrin Shaji George <jerrin.shaji-george@broadcom.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: SeongJae Park <sj@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
balloon_page_enqueue(&b_dev_info, page);
atomic_long_inc(&loaned_pages);
- adjust_managed_page_count(page, -1);
nr--;
}
if (!page)
break;
plpar_page_set_active(page);
- adjust_managed_page_count(page, 1);
__free_page(page);
atomic_long_dec(&loaned_pages);
nr--;
return -EBUSY;
}
- /*
- * When we migrate a page to a different zone, we have to fixup the
- * count of both involved zones as we adjusted the managed page count
- * when inflating.
- */
- if (page_zone(page) != page_zone(newpage)) {
- adjust_managed_page_count(page, 1);
- adjust_managed_page_count(newpage, -1);
- }
-
/*
* activate/"deflate" the old page. We ignore any errors just like the
* other callers.
return -EOPNOTSUPP;
balloon_devinfo_init(&b_dev_info);
+ b_dev_info.adjust_managed_page_count = true;
if (IS_ENABLED(CONFIG_BALLOON_COMPACTION))
b_dev_info.migratepage = cmm_migratepage;
set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
- if (!virtio_has_feature(vb->vdev,
- VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
- adjust_managed_page_count(page, -1);
vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
}
struct page *page, *next;
list_for_each_entry_safe(page, next, pages, lru) {
- if (!virtio_has_feature(vb->vdev,
- VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
- adjust_managed_page_count(page, 1);
list_del(&page->lru);
put_page(page); /* balloon reference */
}
if (!mutex_trylock(&vb->balloon_lock))
return -EAGAIN;
- /*
- * When we migrate a page to a different zone and adjusted the
- * managed page count when inflating, we have to fixup the count of
- * both involved zones.
- */
- if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM) &&
- page_zone(page) != page_zone(newpage)) {
- adjust_managed_page_count(page, 1);
- adjust_managed_page_count(newpage, -1);
- }
-
/* balloon's page migration 1st step -- inflate "newpage" */
vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
set_page_pfns(vb, vb->pfns, newpage);
if (err)
goto out_free_vb;
+ if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+ vb->vb_dev_info.adjust_managed_page_count = true;
#ifdef CONFIG_BALLOON_COMPACTION
vb->vb_dev_info.migratepage = virtballoon_migratepage;
#endif
struct list_head pages; /* Pages enqueued & handled to Host */
int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
struct page *page, enum migrate_mode mode);
+ bool adjust_managed_page_count;
};
extern struct page *balloon_page_alloc(void);
spin_lock_init(&balloon->pages_lock);
INIT_LIST_HEAD(&balloon->pages);
balloon->migratepage = NULL;
+ balloon->adjust_managed_page_count = false;
}
#ifdef CONFIG_BALLOON_COMPACTION
BUG_ON(!trylock_page(page));
balloon_page_insert(b_dev_info, page);
unlock_page(page);
+ if (b_dev_info->adjust_managed_page_count)
+ adjust_managed_page_count(page, -1);
__count_vm_event(BALLOON_INFLATE);
inc_node_page_state(page, NR_BALLOON_PAGES);
}
continue;
list_del(&page->lru);
+ if (b_dev_info->adjust_managed_page_count)
+ adjust_managed_page_count(page, 1);
balloon_page_finalize(page);
__count_vm_event(BALLOON_DEFLATE);
list_add(&page->lru, pages);
get_page(newpage);
balloon_page_insert(b_dev_info, newpage);
__count_vm_event(BALLOON_MIGRATE);
+
+ if (b_dev_info->adjust_managed_page_count &&
+ page_zone(page) != page_zone(newpage)) {
+ /*
+ * When we migrate a page to a different zone we
+ * have to fixup the count of both involved zones.
+ */
+ adjust_managed_page_count(page, 1);
+ adjust_managed_page_count(newpage, -1);
+ }
} else {
/* Old page was deflated but new page not inflated. */
__count_vm_event(BALLOON_DEFLATE);
+
+ if (b_dev_info->adjust_managed_page_count)
+ adjust_managed_page_count(page, 1);
}
b_dev_info->isolated_pages--;