--- /dev/null
+From e69012400b0cb42b2070748322cb72f9effec00f Mon Sep 17 00:00:00 2001
+From: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+Date: Tue, 25 May 2021 10:45:51 +0800
+Subject: arm64: mm: don't use CON and BLK mapping if KFENCE is enabled
+
+From: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+
+commit e69012400b0cb42b2070748322cb72f9effec00f upstream.
+
+When we added KFENCE support for arm64, we intended that it would
+force the entire linear map to be mapped at page granularity, but we
+only enforced this in arch_add_memory() and not in map_mem(), so
+memory mapped at boot time can be mapped at a larger granularity.
+
+When booting a kernel with KFENCE=y and RODATA_FULL=n, this results in
+the following WARNING at boot:
+
+[ 0.000000] ------------[ cut here ]------------
+[ 0.000000] WARNING: CPU: 0 PID: 0 at mm/memory.c:2462 apply_to_pmd_range+0xec/0x190
+[ 0.000000] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.13.0-rc1+ #10
+[ 0.000000] Hardware name: linux,dummy-virt (DT)
+[ 0.000000] pstate: 600000c5 (nZCv daIF -PAN -UAO -TCO BTYPE=--)
+[ 0.000000] pc : apply_to_pmd_range+0xec/0x190
+[ 0.000000] lr : __apply_to_page_range+0x94/0x170
+[ 0.000000] sp : ffffffc010573e20
+[ 0.000000] x29: ffffffc010573e20 x28: ffffff801f400000 x27: ffffff801f401000
+[ 0.000000] x26: 0000000000000001 x25: ffffff801f400fff x24: ffffffc010573f28
+[ 0.000000] x23: ffffffc01002b710 x22: ffffffc0105fa450 x21: ffffffc010573ee4
+[ 0.000000] x20: ffffff801fffb7d0 x19: ffffff801f401000 x18: 00000000fffffffe
+[ 0.000000] x17: 000000000000003f x16: 000000000000000a x15: ffffffc01060b940
+[ 0.000000] x14: 0000000000000000 x13: 0098968000000000 x12: 0000000098968000
+[ 0.000000] x11: 0000000000000000 x10: 0000000098968000 x9 : 0000000000000001
+[ 0.000000] x8 : 0000000000000000 x7 : ffffffc010573ee4 x6 : 0000000000000001
+[ 0.000000] x5 : ffffffc010573f28 x4 : ffffffc01002b710 x3 : 0000000040000000
+[ 0.000000] x2 : ffffff801f5fffff x1 : 0000000000000001 x0 : 007800005f400705
+[ 0.000000] Call trace:
+[ 0.000000] apply_to_pmd_range+0xec/0x190
+[ 0.000000] __apply_to_page_range+0x94/0x170
+[ 0.000000] apply_to_page_range+0x10/0x20
+[ 0.000000] __change_memory_common+0x50/0xdc
+[ 0.000000] set_memory_valid+0x30/0x40
+[ 0.000000] kfence_init_pool+0x9c/0x16c
+[ 0.000000] kfence_init+0x20/0x98
+[ 0.000000] start_kernel+0x284/0x3f8
+
+Fixes: 840b23986344 ("arm64, kfence: enable KFENCE for ARM64")
+Cc: <stable@vger.kernel.org> # 5.12.x
+Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Marco Elver <elver@google.com>
+Tested-by: Marco Elver <elver@google.com>
+Link: https://lore.kernel.org/r/20210525104551.2ec37f77@xhacker.debian
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/mmu.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -492,7 +492,8 @@ static void __init map_mem(pgd_t *pgdp)
+ int flags = 0;
+ u64 i;
+
+- if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
++ if (rodata_full || crash_mem_map || debug_pagealloc_enabled() ||
++ IS_ENABLED(CONFIG_KFENCE))
+ flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+
+ /*
--- /dev/null
+From eefb45eef5c4c425e87667af8f5e904fbdd47abf Mon Sep 17 00:00:00 2001
+From: Chinmay Agarwal <chinagar@codeaurora.org>
+Date: Thu, 22 Apr 2021 01:12:22 +0530
+Subject: neighbour: Prevent Race condition in neighbour subsytem
+
+From: Chinmay Agarwal <chinagar@codeaurora.org>
+
+commit eefb45eef5c4c425e87667af8f5e904fbdd47abf upstream.
+
+Following Race Condition was detected:
+
+<CPU A, t0>: Executing: __netif_receive_skb() ->__netif_receive_skb_core()
+-> arp_rcv() -> arp_process().arp_process() calls __neigh_lookup() which
+takes a reference on neighbour entry 'n'.
+Moves further along, arp_process() and calls neigh_update()->
+__neigh_update(). Neighbour entry is unlocked just before a call to
+neigh_update_gc_list.
+
+This unlocking paves way for another thread that may take a reference on
+the same and mark it dead and remove it from gc_list.
+
+<CPU B, t1> - neigh_flush_dev() is under execution and calls
+neigh_mark_dead(n) marking the neighbour entry 'n' as dead. Also n will be
+removed from gc_list.
+Moves further along neigh_flush_dev() and calls
+neigh_cleanup_and_release(n), but since reference count increased in t1,
+'n' couldn't be destroyed.
+
+<CPU A, t3>- Code hits neigh_update_gc_list, with neighbour entry
+set as dead.
+
+<CPU A, t4> - arp_process() finally calls neigh_release(n), destroying
+the neighbour entry and we have a destroyed ntry still part of gc_list.
+
+Fixes: eb4e8fac00d1("neighbour: Prevent a dead entry from updating gc_list")
+Signed-off-by: Chinmay Agarwal <chinagar@codeaurora.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/neighbour.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -131,6 +131,9 @@ static void neigh_update_gc_list(struct
+ write_lock_bh(&n->tbl->lock);
+ write_lock(&n->lock);
+
++ if (n->dead)
++ goto out;
++
+ /* remove from the gc list if new state is permanent or if neighbor
+ * is externally learned; otherwise entry should be on the gc list
+ */
+@@ -147,6 +150,7 @@ static void neigh_update_gc_list(struct
+ atomic_inc(&n->tbl->gc_entries);
+ }
+
++out:
+ write_unlock(&n->lock);
+ write_unlock_bh(&n->tbl->lock);
+ }