]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: code cleanup for MADV_FREE
authorHuang Ying <ying.huang@intel.com>
Tue, 7 Apr 2020 03:04:41 +0000 (20:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 7 Apr 2020 17:43:38 +0000 (10:43 -0700)
Some comments for MADV_FREE is revised and added to help people understand
the MADV_FREE code, especially the page flag, PG_swapbacked.  This makes
page_is_file_cache() isn't consistent with its comments.  So the function
is renamed to page_is_file_lru() to make them consistent again.  All these
are put in one patch as one logical change.

Suggested-by: David Hildenbrand <david@redhat.com>
Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
Suggested-by: David Rientjes <rientjes@google.com>
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Michal Hocko <mhocko@kernel.org>
Acked-by: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@surriel.com>
Link: http://lkml.kernel.org/r/20200317100342.2730705-1-ying.huang@intel.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
13 files changed:
include/linux/mm_inline.h
include/linux/page-flags.h
include/trace/events/vmscan.h
mm/compaction.c
mm/gup.c
mm/khugepaged.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/mprotect.c
mm/swap.c
mm/vmscan.c

index 6f2fef7b0784e0eae3cfb9220d289535e331ec45..219bef41d87c334816d761b6f9b93b31bde99b54 100644 (file)
@@ -6,19 +6,20 @@
 #include <linux/swap.h>
 
 /**
- * page_is_file_cache - should the page be on a file LRU or anon LRU?
+ * page_is_file_lru - should the page be on a file LRU or anon LRU?
  * @page: the page to test
  *
- * Returns 1 if @page is page cache page backed by a regular filesystem,
- * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
- * Used by functions that manipulate the LRU lists, to sort a page
- * onto the right LRU list.
+ * Returns 1 if @page is a regular filesystem backed page cache page or a lazily
+ * freed anonymous page (e.g. via MADV_FREE).  Returns 0 if @page is a normal
+ * anonymous page, a tmpfs page or otherwise ram or swap backed page.  Used by
+ * functions that manipulate the LRU lists, to sort a page onto the right LRU
+ * list.
  *
  * We would like to get this info without a page flag, but the state
  * needs to survive until the page is last deleted from the LRU, which
  * could be as far down as __page_cache_release.
  */
-static inline int page_is_file_cache(struct page *page)
+static inline int page_is_file_lru(struct page *page)
 {
        return !PageSwapBacked(page);
 }
@@ -75,7 +76,7 @@ static __always_inline void del_page_from_lru_list(struct page *page,
  */
 static inline enum lru_list page_lru_base_type(struct page *page)
 {
-       if (page_is_file_cache(page))
+       if (page_is_file_lru(page))
                return LRU_INACTIVE_FILE;
        return LRU_INACTIVE_ANON;
 }
index 77de28bfefb0186a2516f643f112a73b792cca55..acf7988fd6408d03a7917e822a0a77cdaf6d8a0b 100644 (file)
  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
  * to become unlocked.
  *
+ * PG_swapbacked is set when a page uses swap as a backing storage.  This are
+ * usually PageAnon or shmem pages but please note that even anonymous pages
+ * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
+ * a result of MADV_FREE).
+ *
  * PG_uptodate tells whether the page's contents is valid.  When a read
  * completes, the page becomes uptodate, unless a disk I/O error happened.
  *
index a5ab2973e8dc3bed647a2f099cf0580cd9e5e157..74bb594ccb2580ecf09b52f0c65c5a2bc8a771ec 100644 (file)
@@ -323,7 +323,7 @@ TRACE_EVENT(mm_vmscan_writepage,
        TP_fast_assign(
                __entry->pfn = page_to_pfn(page);
                __entry->reclaim_flags = trace_reclaim_flags(
-                                               page_is_file_cache(page));
+                                               page_is_file_lru(page));
        ),
 
        TP_printk("page=%p pfn=%lu flags=%s",
index df3da2f76fdc8371a759178c5f3c016a5252d21b..46af63eb821243fcbd573667eb9c8c972810bb80 100644 (file)
@@ -989,7 +989,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                /* Successfully isolated */
                del_page_from_lru_list(page, lruvec, page_lru(page));
                mod_node_page_state(page_pgdat(page),
-                               NR_ISOLATED_ANON + page_is_file_cache(page),
+                               NR_ISOLATED_ANON + page_is_file_lru(page),
                                hpage_nr_pages(page));
 
 isolate_success:
index 96af7e08db4bb9c114fd15520ee76300f6fbae08..b185377c38b7d2854e13d5c6dbe9e396f9c3aa0b 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1677,7 +1677,7 @@ check_again:
                                        list_add_tail(&head->lru, &cma_page_list);
                                        mod_node_page_state(page_pgdat(head),
                                                            NR_ISOLATED_ANON +
-                                                           page_is_file_cache(head),
+                                                           page_is_file_lru(head),
                                                            hpage_nr_pages(head));
                                }
                        }
index b1d9a8e189b8fa3bd579559b1b1eae064e1d72c7..3afc1e2d7a55d89a1df729fda47409b13c4a33bd 100644 (file)
@@ -511,7 +511,7 @@ void __khugepaged_exit(struct mm_struct *mm)
 
 static void release_pte_page(struct page *page)
 {
-       dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
+       dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_lru(page));
        unlock_page(page);
        putback_lru_page(page);
 }
@@ -611,7 +611,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                        goto out;
                }
                inc_node_page_state(page,
-                               NR_ISOLATED_ANON + page_is_file_cache(page));
+                               NR_ISOLATED_ANON + page_is_file_lru(page));
                VM_BUG_ON_PAGE(!PageLocked(page), page);
                VM_BUG_ON_PAGE(PageLRU(page), page);
 
index 1c961cd26c0b0b5fb0dcef21ac1beff4b794bc97..a96364be8ab40066f7ee121a7895094872e28315 100644 (file)
@@ -1810,7 +1810,7 @@ static int __soft_offline_page(struct page *page, int flags)
                 */
                if (!__PageMovable(page))
                        inc_node_page_state(page, NR_ISOLATED_ANON +
-                                               page_is_file_cache(page));
+                                               page_is_file_lru(page));
                list_add(&page->lru, &pagelist);
                ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
                                        MIGRATE_SYNC, MR_MEMORY_FAILURE);
index 19389cdc16a502f04726f4e12bab64cb8e4e1879..005eab3411e500b82dbea329c890b25423aa507e 100644 (file)
@@ -1317,7 +1317,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                        list_add_tail(&page->lru, &source);
                        if (!__PageMovable(page))
                                inc_node_page_state(page, NR_ISOLATED_ANON +
-                                                   page_is_file_cache(page));
+                                                   page_is_file_lru(page));
 
                } else {
                        pr_warn("failed to isolate pfn %lx\n", pfn);
index b36926ba02e209f10704b89293ede280202bc49b..037e5f5481180a93bb35f6409ef070e0e11ea3bc 100644 (file)
@@ -1022,7 +1022,7 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist,
                if (!isolate_lru_page(head)) {
                        list_add_tail(&head->lru, pagelist);
                        mod_node_page_state(page_pgdat(head),
-                               NR_ISOLATED_ANON + page_is_file_cache(head),
+                               NR_ISOLATED_ANON + page_is_file_lru(head),
                                hpage_nr_pages(head));
                } else if (flags & MPOL_MF_STRICT) {
                        /*
index 1a205503be3f5b5771442da15a8362d3b2380a79..c1412e04975e1bb77ae7f1eaf1ccb97e020622d1 100644 (file)
@@ -193,7 +193,7 @@ void putback_movable_pages(struct list_head *l)
                        put_page(page);
                } else {
                        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
-                                       page_is_file_cache(page), -hpage_nr_pages(page));
+                                       page_is_file_lru(page), -hpage_nr_pages(page));
                        putback_lru_page(page);
                }
        }
@@ -1219,7 +1219,7 @@ out:
                 */
                if (likely(!__PageMovable(page)))
                        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
-                                       page_is_file_cache(page), -hpage_nr_pages(page));
+                                       page_is_file_lru(page), -hpage_nr_pages(page));
        }
 
        /*
@@ -1592,7 +1592,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
                err = 1;
                list_add_tail(&head->lru, pagelist);
                mod_node_page_state(page_pgdat(head),
-                       NR_ISOLATED_ANON + page_is_file_cache(head),
+                       NR_ISOLATED_ANON + page_is_file_lru(head),
                        hpage_nr_pages(head));
        }
 out_putpage:
@@ -1955,7 +1955,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
                return 0;
        }
 
-       page_lru = page_is_file_cache(page);
+       page_lru = page_is_file_lru(page);
        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
                                hpage_nr_pages(page));
 
@@ -1991,7 +1991,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
         * Don't migrate file pages that are mapped in multiple processes
         * with execute permissions as they are probably shared libraries.
         */
-       if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
+       if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
            (vma->vm_flags & VM_EXEC))
                goto out;
 
@@ -1999,7 +1999,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
         * Also do not migrate dirty pages as not all filesystems can move
         * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
         */
-       if (page_is_file_cache(page) && PageDirty(page))
+       if (page_is_file_lru(page) && PageDirty(page))
                goto out;
 
        isolated = numamigrate_isolate_page(pgdat, page);
@@ -2014,7 +2014,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
                if (!list_empty(&migratepages)) {
                        list_del(&page->lru);
                        dec_node_page_state(page, NR_ISOLATED_ANON +
-                                       page_is_file_cache(page));
+                                       page_is_file_lru(page));
                        putback_lru_page(page);
                }
                isolated = 0;
@@ -2044,7 +2044,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        pg_data_t *pgdat = NODE_DATA(node);
        int isolated = 0;
        struct page *new_page = NULL;
-       int page_lru = page_is_file_cache(page);
+       int page_lru = page_is_file_lru(page);
        unsigned long start = address & HPAGE_PMD_MASK;
 
        new_page = alloc_pages_node(node,
index 311c0dadf71c9f5eb51b4d2004028fe0fe9cc4d8..0fee14b3941679eac5069206a31acb14c7cfe7ed 100644 (file)
@@ -98,7 +98,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                                 * it cannot move them all from MIGRATE_ASYNC
                                 * context.
                                 */
-                               if (page_is_file_cache(page) && PageDirty(page))
+                               if (page_is_file_lru(page) && PageDirty(page))
                                        continue;
 
                                /*
index a4af8c999963568fc126205cf93bd5693fed1343..18505990c3b140bf48287006aeb2d386cadf29a0 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -276,7 +276,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
                            void *arg)
 {
        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
-               int file = page_is_file_cache(page);
+               int file = page_is_file_lru(page);
                int lru = page_lru_base_type(page);
 
                del_page_from_lru_list(page, lruvec, lru);
@@ -394,7 +394,7 @@ void mark_page_accessed(struct page *page)
                else
                        __lru_cache_activate_page(page);
                ClearPageReferenced(page);
-               if (page_is_file_cache(page))
+               if (page_is_file_lru(page))
                        workingset_activation(page);
        }
        if (page_is_idle(page))
@@ -515,7 +515,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
                return;
 
        active = PageActive(page);
-       file = page_is_file_cache(page);
+       file = page_is_file_lru(page);
        lru = page_lru_base_type(page);
 
        del_page_from_lru_list(page, lruvec, lru + active);
@@ -548,7 +548,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
                            void *arg)
 {
        if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
-               int file = page_is_file_cache(page);
+               int file = page_is_file_lru(page);
                int lru = page_lru_base_type(page);
 
                del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
@@ -573,9 +573,9 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
                ClearPageActive(page);
                ClearPageReferenced(page);
                /*
-                * lazyfree pages are clean anonymous pages. They have
-                * SwapBacked flag cleared to distinguish normal anonymous
-                * pages
+                * Lazyfree pages are clean anonymous pages.  They have
+                * PG_swapbacked flag cleared, to distinguish them from normal
+                * anonymous pages
                 */
                ClearPageSwapBacked(page);
                add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
@@ -962,7 +962,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
 
        if (page_evictable(page)) {
                lru = page_lru(page);
-               update_page_reclaim_stat(lruvec, page_is_file_cache(page),
+               update_page_reclaim_stat(lruvec, page_is_file_lru(page),
                                         PageActive(page));
                if (was_unevictable)
                        count_vm_event(UNEVICTABLE_PGRESCUED);
index 2e8e690d28131504ef3c6ad7c51d80dd3d60ddd9..b06868fc492659a7038eed45a420d3dec391f5d0 100644 (file)
@@ -919,7 +919,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
                 * exceptional entries and shadow exceptional entries in the
                 * same address_space.
                 */
-               if (reclaimed && page_is_file_cache(page) &&
+               if (reclaimed && page_is_file_lru(page) &&
                    !mapping_exiting(mapping) && !dax_mapping(mapping))
                        shadow = workingset_eviction(page, target_memcg);
                __delete_from_page_cache(page, shadow);
@@ -1043,7 +1043,7 @@ static void page_check_dirty_writeback(struct page *page,
         * Anonymous pages are not handled by flushers and must be written
         * from reclaim context. Do not stall reclaim based on them
         */
-       if (!page_is_file_cache(page) ||
+       if (!page_is_file_lru(page) ||
            (PageAnon(page) && !PageSwapBacked(page))) {
                *dirty = false;
                *writeback = false;
@@ -1315,7 +1315,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                         * the rest of the LRU for clean pages and see
                         * the same dirty pages again (PageReclaim).
                         */
-                       if (page_is_file_cache(page) &&
+                       if (page_is_file_lru(page) &&
                            (!current_is_kswapd() || !PageReclaim(page) ||
                             !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
                                /*
@@ -1459,7 +1459,7 @@ activate_locked:
                        try_to_free_swap(page);
                VM_BUG_ON_PAGE(PageActive(page), page);
                if (!PageMlocked(page)) {
-                       int type = page_is_file_cache(page);
+                       int type = page_is_file_lru(page);
                        SetPageActive(page);
                        stat->nr_activate[type] += nr_pages;
                        count_memcg_page_event(page, PGACTIVATE);
@@ -1497,7 +1497,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
        LIST_HEAD(clean_pages);
 
        list_for_each_entry_safe(page, next, page_list, lru) {
-               if (page_is_file_cache(page) && !PageDirty(page) &&
+               if (page_is_file_lru(page) && !PageDirty(page) &&
                    !__PageMovable(page) && !PageUnevictable(page)) {
                        ClearPageActive(page);
                        list_move(&page->lru, &clean_pages);
@@ -2053,7 +2053,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
                         * IO, plus JVM can create lots of anon VM_EXEC pages,
                         * so we ignore them here.
                         */
-                       if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
+                       if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
                                list_add(&page->lru, &l_active);
                                continue;
                        }