--- /dev/null
+From 2f79cdfe58c13949bbbb65ba5926abfe9561d0ec Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Wed, 31 Aug 2022 09:46:12 -0700
+Subject: fs: only do a memory barrier for the first set_buffer_uptodate()
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 2f79cdfe58c13949bbbb65ba5926abfe9561d0ec upstream.
+
+Commit d4252071b97d ("add barriers to buffer_uptodate and
+set_buffer_uptodate") added proper memory barriers to the buffer head
+BH_Uptodate bit, so that anybody who tests a buffer for being up-to-date
+will be guaranteed to actually see initialized state.
+
+However, that commit didn't _just_ add the memory barrier, it also ended
+up dropping the "was it already set" logic that the BUFFER_FNS() macro
+had.
+
+That's conceptually the right thing for a generic "this is a memory
+barrier" operation, but in the case of the buffer contents, we really
+only care about the memory barrier for the _first_ time we set the bit,
+in that the only memory ordering protection we need is to avoid anybody
+seeing uninitialized memory contents.
+
+Any other access ordering wouldn't be about the BH_Uptodate bit anyway,
+and would require some other proper lock (typically BH_Lock or the folio
+lock). A reader that races with somebody invalidating the buffer head
+isn't an issue wrt the memory ordering, it's a serialization issue.
+
+Now, you'd think that the buffer head operations don't matter in this
+day and age (and I certainly thought so), but apparently some loads
+still end up being heavy users of buffer heads. In particular, the
+kernel test robot reported that not having this bit access optimization
+in place caused a noticeable direct IO performance regression on ext4:
+
+ fxmark.ssd_ext4_no_jnl_DWTL_54_directio.works/sec -26.5% regression
+
+although you presumably need a fast disk and a lot of cores to actually
+notice.
+
+Link: https://lore.kernel.org/all/Yw8L7HTZ%2FdE2%2Fo9C@xsang-OptiPlex-9020/
+Reported-by: kernel test robot <oliver.sang@intel.com>
+Tested-by: Fengwei Yin <fengwei.yin@intel.com>
+Cc: Mikulas Patocka <mpatocka@redhat.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/buffer_head.h | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/include/linux/buffer_head.h
++++ b/include/linux/buffer_head.h
+@@ -133,6 +133,17 @@ BUFFER_FNS(Defer_Completion, defer_compl
+ static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
+ {
+ /*
++ * If somebody else already set this uptodate, they will
++ * have done the memory barrier, and a reader will thus
++ * see *some* valid buffer state.
++ *
++ * Any other serialization (with IO errors or whatever that
++ * might clear the bit) has to come from other state (eg BH_Lock).
++ */
++ if (test_bit(BH_Uptodate, &bh->b_state))
++ return;
++
++ /*
+ * make it consistent with folio_mark_uptodate
+ * pairs with smp_load_acquire in buffer_uptodate
+ */
--- /dev/null
+From yee.lee@mediatek.com Thu Sep 8 19:05:17 2022
+From: <yee.lee@mediatek.com>
+Date: Tue, 6 Sep 2022 15:03:06 +0800
+Subject: Revert "mm: kmemleak: take a full lowmem check in kmemleak_*_phys()"
+To: <linux-kernel@vger.kernel.org>
+Cc: <patrick.wang.shcn@gmail.com>, Yee Lee <yee.lee@mediatek.com>, <stable@vger.kernel.org>, Catalin Marinas <catalin.marinas@arm.com>, "Andrew Morton" <akpm@linux-foundation.org>, Matthias Brugger <matthias.bgg@gmail.com>, "open list:MEMORY MANAGEMENT" <linux-mm@kvack.org>, "moderated list:ARM/Mediatek SoC support" <linux-arm-kernel@lists.infradead.org>, "moderated list:ARM/Mediatek SoC support" <linux-mediatek@lists.infradead.org>
+Message-ID: <20220906070309.18809-1-yee.lee@mediatek.com>
+
+From: Yee Lee <yee.lee@mediatek.com>
+
+This reverts commit 23c2d497de21f25898fbea70aeb292ab8acc8c94.
+
+Commit 23c2d497de21 ("mm: kmemleak: take a full lowmem check in
+kmemleak_*_phys()") brought false leak alarms on some archs like arm64
+that does not init pfn boundary in early booting. The final solution
+lands on linux-6.0: commit 0c24e061196c ("mm: kmemleak: add rbtree and
+store physical address for objects allocated with PA").
+
+Revert this commit before linux-6.0. The original issue of invalid PA
+can be mitigated by additional check in devicetree.
+
+The false alarm report is as following: Kmemleak output: (Qemu/arm64)
+unreferenced object 0xffff0000c0170a00 (size 128):
+ comm "swapper/0", pid 1, jiffies 4294892404 (age 126.208s)
+ hex dump (first 32 bytes):
+ 62 61 73 65 00 00 00 00 00 00 00 00 00 00 00 00 base............
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ backtrace:
+ [<(____ptrval____)>] __kmalloc_track_caller+0x1b0/0x2e4
+ [<(____ptrval____)>] kstrdup_const+0x8c/0xc4
+ [<(____ptrval____)>] kvasprintf_const+0xbc/0xec
+ [<(____ptrval____)>] kobject_set_name_vargs+0x58/0xe4
+ [<(____ptrval____)>] kobject_add+0x84/0x100
+ [<(____ptrval____)>] __of_attach_node_sysfs+0x78/0xec
+ [<(____ptrval____)>] of_core_init+0x68/0x104
+ [<(____ptrval____)>] driver_init+0x28/0x48
+ [<(____ptrval____)>] do_basic_setup+0x14/0x28
+ [<(____ptrval____)>] kernel_init_freeable+0x110/0x178
+ [<(____ptrval____)>] kernel_init+0x20/0x1a0
+ [<(____ptrval____)>] ret_from_fork+0x10/0x20
+
+This pacth is also applicable to linux-5.17.y/linux-5.18.y/linux-5.19.y
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Yee Lee <yee.lee@mediatek.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kmemleak.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -1130,7 +1130,7 @@ EXPORT_SYMBOL(kmemleak_no_scan);
+ void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+ gfp_t gfp)
+ {
+- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
++ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+ kmemleak_alloc(__va(phys), size, min_count, gfp);
+ }
+ EXPORT_SYMBOL(kmemleak_alloc_phys);
+@@ -1141,7 +1141,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
+ */
+ void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
+ {
+- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
++ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+ kmemleak_free_part(__va(phys), size);
+ }
+ EXPORT_SYMBOL(kmemleak_free_part_phys);
+@@ -1152,7 +1152,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
+ */
+ void __ref kmemleak_not_leak_phys(phys_addr_t phys)
+ {
+- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
++ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+ kmemleak_not_leak(__va(phys));
+ }
+ EXPORT_SYMBOL(kmemleak_not_leak_phys);
+@@ -1163,7 +1163,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
+ */
+ void __ref kmemleak_ignore_phys(phys_addr_t phys)
+ {
+- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
++ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+ kmemleak_ignore(__va(phys));
+ }
+ EXPORT_SYMBOL(kmemleak_ignore_phys);