--- /dev/null
+From 4cd1103d8c66b2cdb7e64385c274edb0ac5e8887 Mon Sep 17 00:00:00 2001
+From: Joseph Qi <joseph.qi@linux.alibaba.com>
+Date: Sat, 29 Jan 2022 13:41:23 -0800
+Subject: jbd2: export jbd2_journal_[grab|put]_journal_head
+
+From: Joseph Qi <joseph.qi@linux.alibaba.com>
+
+commit 4cd1103d8c66b2cdb7e64385c274edb0ac5e8887 upstream.
+
+Patch series "ocfs2: fix a deadlock case".
+
+This fixes a deadlock case in ocfs2. We firstly export jbd2 symbols
+jbd2_journal_[grab|put]_journal_head as preparation and later use them
+in ocfs2 insread of jbd_[lock|unlock]_bh_journal_head to fix the
+deadlock.
+
+This patch (of 2):
+
+This exports symbols jbd2_journal_[grab|put]_journal_head, which will be
+used outside modules, e.g. ocfs2.
+
+Link: https://lkml.kernel.org/r/20220121071205.100648-2-joseph.qi@linux.alibaba.com
+Signed-off-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Changwei Ge <gechangwei@live.cn>
+Cc: Gang He <ghe@suse.com>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: Andreas Dilger <adilger.kernel@dilger.ca>
+Cc: Gautham Ananthakrishna <gautham.ananthakrishna@oracle.com>
+Cc: Saeed Mirzamohammadi <saeed.mirzamohammadi@oracle.com>
+Cc: "Theodore Ts'o" <tytso@mit.edu>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/jbd2/journal.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -2795,6 +2795,7 @@ struct journal_head *jbd2_journal_grab_j
+ jbd_unlock_bh_journal_head(bh);
+ return jh;
+ }
++EXPORT_SYMBOL(jbd2_journal_grab_journal_head);
+
+ static void __journal_remove_journal_head(struct buffer_head *bh)
+ {
+@@ -2847,6 +2848,7 @@ void jbd2_journal_put_journal_head(struc
+ jbd_unlock_bh_journal_head(bh);
+ }
+ }
++EXPORT_SYMBOL(jbd2_journal_put_journal_head);
+
+ /*
+ * Initialize jbd inode head
--- /dev/null
+From 27fe73394a1c6d0b07fa4d95f1bca116d1cc66e9 Mon Sep 17 00:00:00 2001
+From: Peter Collingbourne <pcc@google.com>
+Date: Sat, 29 Jan 2022 13:41:14 -0800
+Subject: mm, kasan: use compare-exchange operation to set KASAN page tag
+
+From: Peter Collingbourne <pcc@google.com>
+
+commit 27fe73394a1c6d0b07fa4d95f1bca116d1cc66e9 upstream.
+
+It has been reported that the tag setting operation on newly-allocated
+pages can cause the page flags to be corrupted when performed
+concurrently with other flag updates as a result of the use of
+non-atomic operations.
+
+Fix the problem by using a compare-exchange loop to update the tag.
+
+Link: https://lkml.kernel.org/r/20220120020148.1632253-1-pcc@google.com
+Link: https://linux-review.googlesource.com/id/I456b24a2b9067d93968d43b4bb3351c0cec63101
+Fixes: 2813b9c02962 ("kasan, mm, arm64: tag non slab memory allocated via pagealloc")
+Signed-off-by: Peter Collingbourne <pcc@google.com>
+Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/mm.h | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index e1a84b1e6787..213cc569b192 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1506,11 +1506,18 @@ static inline u8 page_kasan_tag(const struct page *page)
+
+ static inline void page_kasan_tag_set(struct page *page, u8 tag)
+ {
+- if (kasan_enabled()) {
+- tag ^= 0xff;
+- page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
+- page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
+- }
++ unsigned long old_flags, flags;
++
++ if (!kasan_enabled())
++ return;
++
++ tag ^= 0xff;
++ old_flags = READ_ONCE(page->flags);
++ do {
++ flags = old_flags;
++ flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
++ flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
++ } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
+ }
+
+ static inline void page_kasan_tag_reset(struct page *page)
+--
+2.35.1
+
--- /dev/null
+From ddf4b773aa40790dfa936bd845c18e735a49c61c Mon Sep 17 00:00:00 2001
+From: Joseph Qi <joseph.qi@linux.alibaba.com>
+Date: Sat, 29 Jan 2022 13:41:27 -0800
+Subject: ocfs2: fix a deadlock when commit trans
+
+From: Joseph Qi <joseph.qi@linux.alibaba.com>
+
+commit ddf4b773aa40790dfa936bd845c18e735a49c61c upstream.
+
+commit 6f1b228529ae introduces a regression which can deadlock as
+follows:
+
+ Task1: Task2:
+ jbd2_journal_commit_transaction ocfs2_test_bg_bit_allocatable
+ spin_lock(&jh->b_state_lock) jbd_lock_bh_journal_head
+ __jbd2_journal_remove_checkpoint spin_lock(&jh->b_state_lock)
+ jbd2_journal_put_journal_head
+ jbd_lock_bh_journal_head
+
+Task1 and Task2 lock bh->b_state and jh->b_state_lock in different
+order, which finally result in a deadlock.
+
+So use jbd2_journal_[grab|put]_journal_head instead in
+ocfs2_test_bg_bit_allocatable() to fix it.
+
+Link: https://lkml.kernel.org/r/20220121071205.100648-3-joseph.qi@linux.alibaba.com
+Fixes: 6f1b228529ae ("ocfs2: fix race between searching chunks and release journal_head from buffer_head")
+Signed-off-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Reported-by: Gautham Ananthakrishna <gautham.ananthakrishna@oracle.com>
+Tested-by: Gautham Ananthakrishna <gautham.ananthakrishna@oracle.com>
+Reported-by: Saeed Mirzamohammadi <saeed.mirzamohammadi@oracle.com>
+Cc: "Theodore Ts'o" <tytso@mit.edu>
+Cc: Andreas Dilger <adilger.kernel@dilger.ca>
+Cc: Changwei Ge <gechangwei@live.cn>
+Cc: Gang He <ghe@suse.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ocfs2/suballoc.c | 25 +++++++++++--------------
+ 1 file changed, 11 insertions(+), 14 deletions(-)
+
+--- a/fs/ocfs2/suballoc.c
++++ b/fs/ocfs2/suballoc.c
+@@ -1253,26 +1253,23 @@ static int ocfs2_test_bg_bit_allocatable
+ {
+ struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+ struct journal_head *jh;
+- int ret = 1;
++ int ret;
+
+ if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
+ return 0;
+
+- if (!buffer_jbd(bg_bh))
++ jh = jbd2_journal_grab_journal_head(bg_bh);
++ if (!jh)
+ return 1;
+
+- jbd_lock_bh_journal_head(bg_bh);
+- if (buffer_jbd(bg_bh)) {
+- jh = bh2jh(bg_bh);
+- spin_lock(&jh->b_state_lock);
+- bg = (struct ocfs2_group_desc *) jh->b_committed_data;
+- if (bg)
+- ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
+- else
+- ret = 1;
+- spin_unlock(&jh->b_state_lock);
+- }
+- jbd_unlock_bh_journal_head(bg_bh);
++ spin_lock(&jh->b_state_lock);
++ bg = (struct ocfs2_group_desc *) jh->b_committed_data;
++ if (bg)
++ ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
++ else
++ ret = 1;
++ spin_unlock(&jh->b_state_lock);
++ jbd2_journal_put_journal_head(jh);
+
+ return ret;
+ }
--- /dev/null
+From bba496656a73fc1d1330b49c7f82843836e9feb1 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Wed, 22 Dec 2021 13:07:31 +0000
+Subject: powerpc/32: Fix boot failure with GCC latent entropy plugin
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit bba496656a73fc1d1330b49c7f82843836e9feb1 upstream.
+
+Boot fails with GCC latent entropy plugin enabled.
+
+This is due to early boot functions trying to access 'latent_entropy'
+global data while the kernel is not relocated at its final
+destination yet.
+
+As there is no way to tell GCC to use PTRRELOC() to access it,
+disable latent entropy plugin in early_32.o and feature-fixups.o and
+code-patching.o
+
+Fixes: 38addce8b600 ("gcc-plugins: Add latent_entropy plugin")
+Cc: stable@vger.kernel.org # v4.9+
+Reported-by: Erhard Furtner <erhard_f@mailbox.org>
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=215217
+Link: https://lore.kernel.org/r/2bac55483b8daf5b1caa163a45fa5f9cdbe18be4.1640178426.git.christophe.leroy@csgroup.eu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/Makefile | 1 +
+ arch/powerpc/lib/Makefile | 3 +++
+ 2 files changed, 4 insertions(+)
+
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -11,6 +11,7 @@ CFLAGS_prom_init.o += -fPIC
+ CFLAGS_btext.o += -fPIC
+ endif
+
++CFLAGS_early_32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+--- a/arch/powerpc/lib/Makefile
++++ b/arch/powerpc/lib/Makefile
+@@ -19,6 +19,9 @@ CFLAGS_code-patching.o += -DDISABLE_BRAN
+ CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING
+ endif
+
++CFLAGS_code-patching.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
++CFLAGS_feature-fixups.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
++
+ obj-y += alloc.o code-patching.o feature-fixups.o pmem.o inst.o test_code-patching.o
+
+ ifndef CONFIG_KASAN
--- /dev/null
+From 37eb7ca91b692e8e49e7dd50158349a6c8fb5b09 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Fri, 26 Nov 2021 13:40:35 +0100
+Subject: powerpc/32s: Allocate one 256k IBAT instead of two consecutives 128k IBATs
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit 37eb7ca91b692e8e49e7dd50158349a6c8fb5b09 upstream.
+
+Today we have the following IBATs allocated:
+
+ ---[ Instruction Block Address Translation ]---
+ 0: 0xc0000000-0xc03fffff 0x00000000 4M Kernel x m
+ 1: 0xc0400000-0xc05fffff 0x00400000 2M Kernel x m
+ 2: 0xc0600000-0xc06fffff 0x00600000 1M Kernel x m
+ 3: 0xc0700000-0xc077ffff 0x00700000 512K Kernel x m
+ 4: 0xc0780000-0xc079ffff 0x00780000 128K Kernel x m
+ 5: 0xc07a0000-0xc07bffff 0x007a0000 128K Kernel x m
+ 6: -
+ 7: -
+
+The two 128K should be a single 256K instead.
+
+When _etext is not aligned to 128Kbytes, the system will allocate
+all necessary BATs to the lower 128Kbytes boundary, then allocate
+an additional 128Kbytes BAT for the remaining block.
+
+Instead, align the top to 128Kbytes so that the function directly
+allocates a 256Kbytes last block:
+
+ ---[ Instruction Block Address Translation ]---
+ 0: 0xc0000000-0xc03fffff 0x00000000 4M Kernel x m
+ 1: 0xc0400000-0xc05fffff 0x00400000 2M Kernel x m
+ 2: 0xc0600000-0xc06fffff 0x00600000 1M Kernel x m
+ 3: 0xc0700000-0xc077ffff 0x00700000 512K Kernel x m
+ 4: 0xc0780000-0xc07bffff 0x00780000 256K Kernel x m
+ 5: -
+ 6: -
+ 7: -
+
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/ab58b296832b0ec650e2203200e060adbcb2677d.1637930421.git.christophe.leroy@csgroup.eu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/mm/book3s32/mmu.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/mm/book3s32/mmu.c
++++ b/arch/powerpc/mm/book3s32/mmu.c
+@@ -201,18 +201,17 @@ void mmu_mark_initmem_nx(void)
+ int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
+ int i;
+ unsigned long base = (unsigned long)_stext - PAGE_OFFSET;
+- unsigned long top = (unsigned long)_etext - PAGE_OFFSET;
++ unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K);
+ unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
+ unsigned long size;
+
+- for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) {
++ for (i = 0; i < nb - 1 && base < top;) {
+ size = block_size(base, top);
+ setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
+ base += size;
+ }
+ if (base < top) {
+ size = block_size(base, top);
+- size = max(size, 128UL << 10);
+ if ((top - base) > size) {
+ size <<= 1;
+ if (strict_kernel_rwx_enabled() && base + size > border)
--- /dev/null
+From d37823c3528e5e0705fc7746bcbc2afffb619259 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Mon, 10 Jan 2022 15:29:25 +0000
+Subject: powerpc/32s: Fix kasan_init_region() for KASAN
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit d37823c3528e5e0705fc7746bcbc2afffb619259 upstream.
+
+It has been reported some configuration where the kernel doesn't
+boot with KASAN enabled.
+
+This is due to wrong BAT allocation for the KASAN area:
+
+ ---[ Data Block Address Translation ]---
+ 0: 0xc0000000-0xcfffffff 0x00000000 256M Kernel rw m
+ 1: 0xd0000000-0xdfffffff 0x10000000 256M Kernel rw m
+ 2: 0xe0000000-0xefffffff 0x20000000 256M Kernel rw m
+ 3: 0xf8000000-0xf9ffffff 0x2a000000 32M Kernel rw m
+ 4: 0xfa000000-0xfdffffff 0x2c000000 64M Kernel rw m
+
+A BAT must have both virtual and physical addresses alignment matching
+the size of the BAT. This is not the case for BAT 4 above.
+
+Fix kasan_init_region() by using block_size() function that is in
+book3s32/mmu.c. To be able to reuse it here, make it non static and
+change its name to bat_block_size() in order to avoid name conflict
+with block_size() defined in <linux/blkdev.h>
+
+Also reuse find_free_bat() to avoid an error message from setbat()
+when no BAT is available.
+
+And allocate memory outside of linear memory mapping to avoid
+wasting that precious space.
+
+With this change we get correct alignment for BATs and KASAN shadow
+memory is allocated outside the linear memory space.
+
+ ---[ Data Block Address Translation ]---
+ 0: 0xc0000000-0xcfffffff 0x00000000 256M Kernel rw
+ 1: 0xd0000000-0xdfffffff 0x10000000 256M Kernel rw
+ 2: 0xe0000000-0xefffffff 0x20000000 256M Kernel rw
+ 3: 0xf8000000-0xfbffffff 0x7c000000 64M Kernel rw
+ 4: 0xfc000000-0xfdffffff 0x7a000000 32M Kernel rw
+
+Fixes: 7974c4732642 ("powerpc/32s: Implement dedicated kasan_init_region()")
+Cc: stable@vger.kernel.org
+Reported-by: Maxime Bizon <mbizon@freebox.fr>
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Tested-by: Maxime Bizon <mbizon@freebox.fr>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/7a50ef902494d1325227d47d33dada01e52e5518.1641818726.git.christophe.leroy@csgroup.eu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/book3s/32/mmu-hash.h | 2
+ arch/powerpc/mm/book3s32/mmu.c | 10 ++--
+ arch/powerpc/mm/kasan/book3s_32.c | 57 +++++++++++++-------------
+ 3 files changed, 37 insertions(+), 32 deletions(-)
+
+--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h
++++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
+@@ -102,6 +102,8 @@ extern s32 patch__hash_page_B, patch__ha
+ extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
+ extern s32 patch__flush_hash_B;
+
++int __init find_free_bat(void);
++unsigned int bat_block_size(unsigned long base, unsigned long top);
+ #endif /* !__ASSEMBLY__ */
+
+ /* We happily ignore the smaller BATs on 601, we don't actually use
+--- a/arch/powerpc/mm/book3s32/mmu.c
++++ b/arch/powerpc/mm/book3s32/mmu.c
+@@ -72,7 +72,7 @@ unsigned long p_block_mapped(phys_addr_t
+ return 0;
+ }
+
+-static int find_free_bat(void)
++int __init find_free_bat(void)
+ {
+ int b;
+ int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
+@@ -96,7 +96,7 @@ static int find_free_bat(void)
+ * - block size has to be a power of two. This is calculated by finding the
+ * highest bit set to 1.
+ */
+-static unsigned int block_size(unsigned long base, unsigned long top)
++unsigned int bat_block_size(unsigned long base, unsigned long top)
+ {
+ unsigned int max_size = SZ_256M;
+ unsigned int base_shift = (ffs(base) - 1) & 31;
+@@ -141,7 +141,7 @@ static unsigned long __init __mmu_mapin_
+ int idx;
+
+ while ((idx = find_free_bat()) != -1 && base != top) {
+- unsigned int size = block_size(base, top);
++ unsigned int size = bat_block_size(base, top);
+
+ if (size < 128 << 10)
+ break;
+@@ -206,12 +206,12 @@ void mmu_mark_initmem_nx(void)
+ unsigned long size;
+
+ for (i = 0; i < nb - 1 && base < top;) {
+- size = block_size(base, top);
++ size = bat_block_size(base, top);
+ setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
+ base += size;
+ }
+ if (base < top) {
+- size = block_size(base, top);
++ size = bat_block_size(base, top);
+ if ((top - base) > size) {
+ size <<= 1;
+ if (strict_kernel_rwx_enabled() && base + size > border)
+--- a/arch/powerpc/mm/kasan/book3s_32.c
++++ b/arch/powerpc/mm/kasan/book3s_32.c
+@@ -10,48 +10,51 @@ int __init kasan_init_region(void *start
+ {
+ unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
+ unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
+- unsigned long k_cur = k_start;
+- int k_size = k_end - k_start;
+- int k_size_base = 1 << (ffs(k_size) - 1);
++ unsigned long k_nobat = k_start;
++ unsigned long k_cur;
++ phys_addr_t phys;
+ int ret;
+- void *block;
+
+- block = memblock_alloc(k_size, k_size_base);
++ while (k_nobat < k_end) {
++ unsigned int k_size = bat_block_size(k_nobat, k_end);
++ int idx = find_free_bat();
++
++ if (idx == -1)
++ break;
++ if (k_size < SZ_128K)
++ break;
++ phys = memblock_phys_alloc_range(k_size, k_size, 0,
++ MEMBLOCK_ALLOC_ANYWHERE);
++ if (!phys)
++ break;
+
+- if (block && k_size_base >= SZ_128K && k_start == ALIGN(k_start, k_size_base)) {
+- int shift = ffs(k_size - k_size_base);
+- int k_size_more = shift ? 1 << (shift - 1) : 0;
+-
+- setbat(-1, k_start, __pa(block), k_size_base, PAGE_KERNEL);
+- if (k_size_more >= SZ_128K)
+- setbat(-1, k_start + k_size_base, __pa(block) + k_size_base,
+- k_size_more, PAGE_KERNEL);
+- if (v_block_mapped(k_start))
+- k_cur = k_start + k_size_base;
+- if (v_block_mapped(k_start + k_size_base))
+- k_cur = k_start + k_size_base + k_size_more;
+-
+- update_bats();
++ setbat(idx, k_nobat, phys, k_size, PAGE_KERNEL);
++ k_nobat += k_size;
+ }
++ if (k_nobat != k_start)
++ update_bats();
+
+- if (!block)
+- block = memblock_alloc(k_size, PAGE_SIZE);
+- if (!block)
+- return -ENOMEM;
++ if (k_nobat < k_end) {
++ phys = memblock_phys_alloc_range(k_end - k_nobat, PAGE_SIZE, 0,
++ MEMBLOCK_ALLOC_ANYWHERE);
++ if (!phys)
++ return -ENOMEM;
++ }
+
+ ret = kasan_init_shadow_page_tables(k_start, k_end);
+ if (ret)
+ return ret;
+
+- kasan_update_early_region(k_start, k_cur, __pte(0));
++ kasan_update_early_region(k_start, k_nobat, __pte(0));
+
+- for (; k_cur < k_end; k_cur += PAGE_SIZE) {
++ for (k_cur = k_nobat; k_cur < k_end; k_cur += PAGE_SIZE) {
+ pmd_t *pmd = pmd_off_k(k_cur);
+- void *va = block + k_cur - k_start;
+- pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
++ pte_t pte = pfn_pte(PHYS_PFN(phys + k_cur - k_nobat), PAGE_KERNEL);
+
+ __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
+ }
+ flush_tlb_kernel_range(k_start, k_end);
++ memset(kasan_mem_to_shadow(start), 0, k_end - k_start);
++
+ return 0;
+ }
--- /dev/null
+From 809232619f5b15e31fb3563985e705454f32621f Mon Sep 17 00:00:00 2001
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Date: Mon, 17 Jan 2022 15:30:10 -0500
+Subject: sched/membarrier: Fix membarrier-rseq fence command missing from query bitmask
+
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+commit 809232619f5b15e31fb3563985e705454f32621f upstream.
+
+The membarrier command MEMBARRIER_CMD_QUERY allows querying the
+available membarrier commands. When the membarrier-rseq fence commands
+were added, a new MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK was
+introduced with the intent to expose them with the MEMBARRIER_CMD_QUERY
+command, the but it was never added to MEMBARRIER_CMD_BITMASK.
+
+The membarrier-rseq fence commands are therefore not wired up with the
+query command.
+
+Rename MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK to
+MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK (the bitmask is not a command
+per-se), and change the erroneous
+MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ_BITMASK (which does not
+actually exist) to MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ.
+
+Wire up MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK in
+MEMBARRIER_CMD_BITMASK. Fixing this allows discovering availability of
+the membarrier-rseq fence feature.
+
+Fixes: 2a36ab717e8f ("rseq/membarrier: Add MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ")
+Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: <stable@vger.kernel.org> # 5.10+
+Link: https://lkml.kernel.org/r/20220117203010.30129-1-mathieu.desnoyers@efficios.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/membarrier.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/kernel/sched/membarrier.c
++++ b/kernel/sched/membarrier.c
+@@ -19,11 +19,11 @@
+ #endif
+
+ #ifdef CONFIG_RSEQ
+-#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK \
++#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \
+ (MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ \
+- | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
++ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ)
+ #else
+-#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK 0
++#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK 0
+ #endif
+
+ #define MEMBARRIER_CMD_BITMASK \
+@@ -31,7 +31,8 @@
+ | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
+ | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
+ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
+- | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
++ | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
++ | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
+
+ static void ipi_mb(void *info)
+ {
usb-core-fix-hang-in-usb_kill_urb-by-adding-memory-barriers.patch
usb-typec-tcpm-do-not-disconnect-while-receiving-vbus-off.patch
ucsi_ccg-check-dev_int-bit-only-when-starting-ccg4.patch
+jbd2-export-jbd2_journal__journal_head.patch
+ocfs2-fix-a-deadlock-when-commit-trans.patch
+sched-membarrier-fix-membarrier-rseq-fence-command-missing-from-query-bitmask.patch
+x86-mce-amd-allow-thresholding-interface-updates-after-init.patch
+powerpc-32s-allocate-one-256k-ibat-instead-of-two-consecutives-128k-ibats.patch
+powerpc-32s-fix-kasan_init_region-for-kasan.patch
+powerpc-32-fix-boot-failure-with-gcc-latent-entropy-plugin.patch
--- /dev/null
+From 1f52b0aba6fd37653416375cb8a1ca673acf8d5f Mon Sep 17 00:00:00 2001
+From: Yazen Ghannam <yazen.ghannam@amd.com>
+Date: Mon, 17 Jan 2022 16:13:28 +0000
+Subject: x86/MCE/AMD: Allow thresholding interface updates after init
+
+From: Yazen Ghannam <yazen.ghannam@amd.com>
+
+commit 1f52b0aba6fd37653416375cb8a1ca673acf8d5f upstream.
+
+Changes to the AMD Thresholding sysfs code prevents sysfs writes from
+updating the underlying registers once CPU init is completed, i.e.
+"threshold_banks" is set.
+
+Allow the registers to be updated if the thresholding interface is
+already initialized or if in the init path. Use the "set_lvt_off" value
+to indicate if running in the init path, since this value is only set
+during init.
+
+Fixes: a037f3ca0ea0 ("x86/mce/amd: Make threshold bank setting hotplug robust")
+Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20220117161328.19148-1-yazen.ghannam@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/mce/amd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/mce/amd.c
++++ b/arch/x86/kernel/cpu/mce/amd.c
+@@ -387,7 +387,7 @@ static void threshold_restart_bank(void
+ u32 hi, lo;
+
+ /* sysfs write might race against an offline operation */
+- if (this_cpu_read(threshold_banks))
++ if (!this_cpu_read(threshold_banks) && !tr->set_lvt_off)
+ return;
+
+ rdmsr(tr->b->address, lo, hi);