--- /dev/null
+From b50da6e9f42ade19141f6cf8870bb2312b055aa3 Mon Sep 17 00:00:00 2001
+From: Zhaoyang Huang <huangzhaoyang@gmail.com>
+Date: Tue, 15 Dec 2020 20:42:23 -0800
+Subject: mm: fix a race on nr_swap_pages
+
+From: Zhaoyang Huang <huangzhaoyang@gmail.com>
+
+commit b50da6e9f42ade19141f6cf8870bb2312b055aa3 upstream.
+
+The scenario on which "Free swap = -4kB" happens in my system, which is caused
+by several get_swap_pages racing with each other and show_swap_cache_info
+happens simutaniously. No need to add a lock on get_swap_page_of_type as we
+remove "Presub/PosAdd" here.
+
+ProcessA ProcessB ProcessC
+ngoals = 1 ngoals = 1
+avail = nr_swap_pages(1) avail = nr_swap_pages(1)
+nr_swap_pages(1) -= ngoals
+ nr_swap_pages(0) -= ngoals
+ nr_swap_pages = -1
+
+Link: https://lkml.kernel.org/r/1607050340-4535-1-git-send-email-zhaoyang.huang@unisoc.com
+Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Hugh Dickins <hughd@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/swapfile.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -1045,16 +1045,18 @@ int get_swap_pages(int n_goal, swp_entry
+ /* Only single cluster request supported */
+ WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
+
++ spin_lock(&swap_avail_lock);
++
+ avail_pgs = atomic_long_read(&nr_swap_pages) / size;
+- if (avail_pgs <= 0)
++ if (avail_pgs <= 0) {
++ spin_unlock(&swap_avail_lock);
+ goto noswap;
++ }
+
+ n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
+
+ atomic_long_sub(n_goal * size, &nr_swap_pages);
+
+- spin_lock(&swap_avail_lock);
+-
+ start_over:
+ node = numa_node_id();
+ plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
+@@ -1128,14 +1130,13 @@ swp_entry_t get_swap_page_of_type(int ty
+
+ spin_lock(&si->lock);
+ if (si->flags & SWP_WRITEOK) {
+- atomic_long_dec(&nr_swap_pages);
+ /* This is called for allocating swap entry, not cache */
+ offset = scan_swap_map(si, 1);
+ if (offset) {
++ atomic_long_dec(&nr_swap_pages);
+ spin_unlock(&si->lock);
+ return swp_entry(type, offset);
+ }
+- atomic_long_inc(&nr_swap_pages);
+ }
+ spin_unlock(&si->lock);
+ fail:
--- /dev/null
+From ce8f86ee94fabcc98537ddccd7e82cfd360a4dc5 Mon Sep 17 00:00:00 2001
+From: Hailong liu <liu.hailong6@zte.com.cn>
+Date: Tue, 12 Jan 2021 15:49:08 -0800
+Subject: mm/page_alloc: add a missing mm_page_alloc_zone_locked() tracepoint
+
+From: Hailong liu <liu.hailong6@zte.com.cn>
+
+commit ce8f86ee94fabcc98537ddccd7e82cfd360a4dc5 upstream.
+
+The trace point *trace_mm_page_alloc_zone_locked()* in __rmqueue() does
+not currently cover all branches. Add the missing tracepoint and check
+the page before do that.
+
+[akpm@linux-foundation.org: use IS_ENABLED() to suppress warning]
+
+Link: https://lkml.kernel.org/r/20201228132901.41523-1-carver4lio@163.com
+Signed-off-by: Hailong liu <liu.hailong6@zte.com.cn>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Ivan Babrou <ivan@cloudflare.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -2846,20 +2846,20 @@ __rmqueue(struct zone *zone, unsigned in
+ {
+ struct page *page;
+
+-#ifdef CONFIG_CMA
+- /*
+- * Balance movable allocations between regular and CMA areas by
+- * allocating from CMA when over half of the zone's free memory
+- * is in the CMA area.
+- */
+- if (alloc_flags & ALLOC_CMA &&
+- zone_page_state(zone, NR_FREE_CMA_PAGES) >
+- zone_page_state(zone, NR_FREE_PAGES) / 2) {
+- page = __rmqueue_cma_fallback(zone, order);
+- if (page)
+- return page;
++ if (IS_ENABLED(CONFIG_CMA)) {
++ /*
++ * Balance movable allocations between regular and CMA areas by
++ * allocating from CMA when over half of the zone's free memory
++ * is in the CMA area.
++ */
++ if (alloc_flags & ALLOC_CMA &&
++ zone_page_state(zone, NR_FREE_CMA_PAGES) >
++ zone_page_state(zone, NR_FREE_PAGES) / 2) {
++ page = __rmqueue_cma_fallback(zone, order);
++ if (page)
++ goto out;
++ }
+ }
+-#endif
+ retry:
+ page = __rmqueue_smallest(zone, order, migratetype);
+ if (unlikely(!page)) {
+@@ -2870,8 +2870,9 @@ retry:
+ alloc_flags))
+ goto retry;
+ }
+-
+- trace_mm_page_alloc_zone_locked(page, order, migratetype);
++out:
++ if (page)
++ trace_mm_page_alloc_zone_locked(page, order, migratetype);
+ return page;
+ }
+