--- /dev/null
+From 23c2d497de21f25898fbea70aeb292ab8acc8c94 Mon Sep 17 00:00:00 2001
+From: Patrick Wang <patrick.wang.shcn@gmail.com>
+Date: Thu, 14 Apr 2022 19:14:04 -0700
+Subject: mm: kmemleak: take a full lowmem check in kmemleak_*_phys()
+
+From: Patrick Wang <patrick.wang.shcn@gmail.com>
+
+commit 23c2d497de21f25898fbea70aeb292ab8acc8c94 upstream.
+
+The kmemleak_*_phys() apis do not check the address for lowmem's min
+boundary, while the caller may pass an address below lowmem, which will
+trigger an oops:
+
+ # echo scan > /sys/kernel/debug/kmemleak
+ Unable to handle kernel paging request at virtual address ff5fffffffe00000
+ Oops [#1]
+ Modules linked in:
+ CPU: 2 PID: 134 Comm: bash Not tainted 5.18.0-rc1-next-20220407 #33
+ Hardware name: riscv-virtio,qemu (DT)
+ epc : scan_block+0x74/0x15c
+ ra : scan_block+0x72/0x15c
+ epc : ffffffff801e5806 ra : ffffffff801e5804 sp : ff200000104abc30
+ gp : ffffffff815cd4e8 tp : ff60000004cfa340 t0 : 0000000000000200
+ t1 : 00aaaaaac23954cc t2 : 00000000000003ff s0 : ff200000104abc90
+ s1 : ffffffff81b0ff28 a0 : 0000000000000000 a1 : ff5fffffffe01000
+ a2 : ffffffff81b0ff28 a3 : 0000000000000002 a4 : 0000000000000001
+ a5 : 0000000000000000 a6 : ff200000104abd7c a7 : 0000000000000005
+ s2 : ff5fffffffe00ff9 s3 : ffffffff815cd998 s4 : ffffffff815d0e90
+ s5 : ffffffff81b0ff28 s6 : 0000000000000020 s7 : ffffffff815d0eb0
+ s8 : ffffffffffffffff s9 : ff5fffffffe00000 s10: ff5fffffffe01000
+ s11: 0000000000000022 t3 : 00ffffffaa17db4c t4 : 000000000000000f
+ t5 : 0000000000000001 t6 : 0000000000000000
+ status: 0000000000000100 badaddr: ff5fffffffe00000 cause: 000000000000000d
+ scan_gray_list+0x12e/0x1a6
+ kmemleak_scan+0x2aa/0x57e
+ kmemleak_write+0x32a/0x40c
+ full_proxy_write+0x56/0x82
+ vfs_write+0xa6/0x2a6
+ ksys_write+0x6c/0xe2
+ sys_write+0x22/0x2a
+ ret_from_syscall+0x0/0x2
+
+The callers may not quite know the actual address they pass(e.g. from
+devicetree). So the kmemleak_*_phys() apis should guarantee the address
+they finally use is in lowmem range, so check the address for lowmem's
+min boundary.
+
+Link: https://lkml.kernel.org/r/20220413122925.33856-1-patrick.wang.shcn@gmail.com
+Signed-off-by: Patrick Wang <patrick.wang.shcn@gmail.com>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kmemleak.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -1130,7 +1130,7 @@ EXPORT_SYMBOL(kmemleak_no_scan);
+ void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+ gfp_t gfp)
+ {
+- if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
++ if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
+ kmemleak_alloc(__va(phys), size, min_count, gfp);
+ }
+ EXPORT_SYMBOL(kmemleak_alloc_phys);
+@@ -1141,7 +1141,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
+ */
+ void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
+ {
+- if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
++ if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
+ kmemleak_free_part(__va(phys), size);
+ }
+ EXPORT_SYMBOL(kmemleak_free_part_phys);
+@@ -1152,7 +1152,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
+ */
+ void __ref kmemleak_not_leak_phys(phys_addr_t phys)
+ {
+- if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
++ if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
+ kmemleak_not_leak(__va(phys));
+ }
+ EXPORT_SYMBOL(kmemleak_not_leak_phys);
+@@ -1163,7 +1163,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
+ */
+ void __ref kmemleak_ignore_phys(phys_addr_t phys)
+ {
+- if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
++ if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
+ kmemleak_ignore(__va(phys));
+ }
+ EXPORT_SYMBOL(kmemleak_ignore_phys);
--- /dev/null
+From e553f62f10d93551eb883eca227ac54d1a4fad84 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 14 Apr 2022 19:13:43 -0700
+Subject: mm, page_alloc: fix build_zonerefs_node()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Juergen Gross <jgross@suse.com>
+
+commit e553f62f10d93551eb883eca227ac54d1a4fad84 upstream.
+
+Since commit 6aa303defb74 ("mm, vmscan: only allocate and reclaim from
+zones with pages managed by the buddy allocator") only zones with free
+memory are included in a built zonelist. This is problematic when e.g.
+all memory of a zone has been ballooned out when zonelists are being
+rebuilt.
+
+The decision whether to rebuild the zonelists when onlining new memory
+is done based on populated_zone() returning 0 for the zone the memory
+will be added to. The new zone is added to the zonelists only, if it
+has free memory pages (managed_zone() returns a non-zero value) after
+the memory has been onlined. This implies, that onlining memory will
+always free the added pages to the allocator immediately, but this is
+not true in all cases: when e.g. running as a Xen guest the onlined new
+memory will be added only to the ballooned memory list, it will be freed
+only when the guest is being ballooned up afterwards.
+
+Another problem with using managed_zone() for the decision whether a
+zone is being added to the zonelists is, that a zone with all memory
+used will in fact be removed from all zonelists in case the zonelists
+happen to be rebuilt.
+
+Use populated_zone() when building a zonelist as it has been done before
+that commit.
+
+There was a report that QubesOS (based on Xen) is hitting this problem.
+Xen has switched to use the zone device functionality in kernel 5.9 and
+QubesOS wants to use memory hotplugging for guests in order to be able
+to start a guest with minimal memory and expand it as needed. This was
+the report leading to the patch.
+
+Link: https://lkml.kernel.org/r/20220407120637.9035-1-jgross@suse.com
+Fixes: 6aa303defb74 ("mm, vmscan: only allocate and reclaim from zones with pages managed by the buddy allocator")
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reported-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_alloc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4503,7 +4503,7 @@ static int build_zonelists_node(pg_data_
+ do {
+ zone_type--;
+ zone = pgdat->node_zones + zone_type;
+- if (managed_zone(zone)) {
++ if (populated_zone(zone)) {
+ zoneref_set_zone(zone,
+ &zonelist->_zonerefs[nr_zones++]);
+ check_highest_zone(zone_type);