/**
* @huge_pages - list of the inflated 2MB pages.
*
- * Protected by @b_dev_info.pages_lock .
+ * Protected by @huge_pages_lock.
*/
struct list_head huge_pages;
+ /**
+ * @huge_pages_lock: lock for the list of inflated 2MB pages.
+ */
+ spinlock_t huge_pages_lock;
+
/**
* @vmci_doorbell.
*
unsigned int *n_pages,
enum vmballoon_page_size_type page_size)
{
- unsigned long flags;
struct page *page;
if (page_size == VMW_BALLOON_4K_PAGE) {
} else {
/*
* Keep the huge pages in a local list which is not available
- * for the balloon compaction mechanism.
+ * for the balloon page migration.
*/
- spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
+ spin_lock(&b->huge_pages_lock);
list_for_each_entry(page, pages, lru) {
vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
list_splice_init(pages, &b->huge_pages);
__count_vm_events(BALLOON_INFLATE, *n_pages *
vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
- spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
+ spin_unlock(&b->huge_pages_lock);
}
*n_pages = 0;
{
struct page *page, *tmp;
unsigned int i = 0;
- unsigned long flags;
/* In the case of 4k pages, use the compaction infrastructure */
if (page_size == VMW_BALLOON_4K_PAGE) {
}
/* 2MB pages */
- spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
+ spin_lock(&b->huge_pages_lock);
list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
__count_vm_events(BALLOON_DEFLATE,
i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
- spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
+ spin_unlock(&b->huge_pages_lock);
*n_pages = i;
}
balloon.b_dev_info.migratepage = vmballoon_migratepage;
INIT_LIST_HEAD(&balloon.huge_pages);
+ spin_lock_init(&balloon.huge_pages_lock);
spin_lock_init(&balloon.comm_lock);
init_rwsem(&balloon.conf_sem);
balloon.vmci_doorbell = VMCI_INVALID_HANDLE;