--- /dev/null
+From 4df7cbe523fb644a6ed48e4bd26b485a4f168fb2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Dec 2021 15:14:06 +0000
+Subject: arm64: atomics: format whitespace consistently
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 8e6082e94aac6d0338883b5953631b662a5a9188 ]
+
+The code for the atomic ops is formatted inconsistently, and while this
+is not a functional problem it is rather distracting when working on
+them.
+
+Some have ops have consistent indentation, e.g.
+
+| #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
+| static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \
+| { \
+| u32 tmp; \
+| \
+| asm volatile( \
+| __LSE_PREAMBLE \
+| " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
+| " add %w[i], %w[i], %w[tmp]" \
+| : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
+| : "r" (v) \
+| : cl); \
+| \
+| return i; \
+| }
+
+While others have negative indentation for some lines, and/or have
+misaligned trailing backslashes, e.g.
+
+| static inline void __lse_atomic_##op(int i, atomic_t *v) \
+| { \
+| asm volatile( \
+| __LSE_PREAMBLE \
+| " " #asm_op " %w[i], %[v]\n" \
+| : [i] "+r" (i), [v] "+Q" (v->counter) \
+| : "r" (v)); \
+| }
+
+This patch makes the indentation consistent and also aligns the trailing
+backslashes. This makes the code easier to read for those (like myself)
+who are easily distracted by these inconsistencies.
+
+This is intended as a cleanup.
+There should be no functional change as a result of this patch.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Boqun Feng <boqun.feng@gmail.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will@kernel.org>
+Acked-by: Will Deacon <will@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20211210151410.2782645-2-mark.rutland@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Stable-dep-of: 031af50045ea ("arm64: cmpxchg_double*: hazard against entire exchange variable")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/atomic_ll_sc.h | 86 +++++++++++++--------------
+ arch/arm64/include/asm/atomic_lse.h | 14 ++---
+ 2 files changed, 50 insertions(+), 50 deletions(-)
+
+diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
+index 7b012148bfd6..f5743c911303 100644
+--- a/arch/arm64/include/asm/atomic_ll_sc.h
++++ b/arch/arm64/include/asm/atomic_ll_sc.h
+@@ -44,11 +44,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \
+ \
+ asm volatile("// atomic_" #op "\n" \
+ __LL_SC_FALLBACK( \
+-" prfm pstl1strm, %2\n" \
+-"1: ldxr %w0, %2\n" \
+-" " #asm_op " %w0, %w0, %w3\n" \
+-" stxr %w1, %w0, %2\n" \
+-" cbnz %w1, 1b\n") \
++ " prfm pstl1strm, %2\n" \
++ "1: ldxr %w0, %2\n" \
++ " " #asm_op " %w0, %w0, %w3\n" \
++ " stxr %w1, %w0, %2\n" \
++ " cbnz %w1, 1b\n") \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i)); \
+ }
+@@ -62,12 +62,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
+ \
+ asm volatile("// atomic_" #op "_return" #name "\n" \
+ __LL_SC_FALLBACK( \
+-" prfm pstl1strm, %2\n" \
+-"1: ld" #acq "xr %w0, %2\n" \
+-" " #asm_op " %w0, %w0, %w3\n" \
+-" st" #rel "xr %w1, %w0, %2\n" \
+-" cbnz %w1, 1b\n" \
+-" " #mb ) \
++ " prfm pstl1strm, %2\n" \
++ "1: ld" #acq "xr %w0, %2\n" \
++ " " #asm_op " %w0, %w0, %w3\n" \
++ " st" #rel "xr %w1, %w0, %2\n" \
++ " cbnz %w1, 1b\n" \
++ " " #mb ) \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i) \
+ : cl); \
+@@ -84,12 +84,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
+ \
+ asm volatile("// atomic_fetch_" #op #name "\n" \
+ __LL_SC_FALLBACK( \
+-" prfm pstl1strm, %3\n" \
+-"1: ld" #acq "xr %w0, %3\n" \
+-" " #asm_op " %w1, %w0, %w4\n" \
+-" st" #rel "xr %w2, %w1, %3\n" \
+-" cbnz %w2, 1b\n" \
+-" " #mb ) \
++ " prfm pstl1strm, %3\n" \
++ "1: ld" #acq "xr %w0, %3\n" \
++ " " #asm_op " %w1, %w0, %w4\n" \
++ " st" #rel "xr %w2, %w1, %3\n" \
++ " cbnz %w2, 1b\n" \
++ " " #mb ) \
+ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i) \
+ : cl); \
+@@ -143,11 +143,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
+ \
+ asm volatile("// atomic64_" #op "\n" \
+ __LL_SC_FALLBACK( \
+-" prfm pstl1strm, %2\n" \
+-"1: ldxr %0, %2\n" \
+-" " #asm_op " %0, %0, %3\n" \
+-" stxr %w1, %0, %2\n" \
+-" cbnz %w1, 1b") \
++ " prfm pstl1strm, %2\n" \
++ "1: ldxr %0, %2\n" \
++ " " #asm_op " %0, %0, %3\n" \
++ " stxr %w1, %0, %2\n" \
++ " cbnz %w1, 1b") \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i)); \
+ }
+@@ -161,12 +161,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
+ \
+ asm volatile("// atomic64_" #op "_return" #name "\n" \
+ __LL_SC_FALLBACK( \
+-" prfm pstl1strm, %2\n" \
+-"1: ld" #acq "xr %0, %2\n" \
+-" " #asm_op " %0, %0, %3\n" \
+-" st" #rel "xr %w1, %0, %2\n" \
+-" cbnz %w1, 1b\n" \
+-" " #mb ) \
++ " prfm pstl1strm, %2\n" \
++ "1: ld" #acq "xr %0, %2\n" \
++ " " #asm_op " %0, %0, %3\n" \
++ " st" #rel "xr %w1, %0, %2\n" \
++ " cbnz %w1, 1b\n" \
++ " " #mb ) \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i) \
+ : cl); \
+@@ -176,19 +176,19 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
+
+ #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
+ static inline long \
+-__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
++__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
+ { \
+ s64 result, val; \
+ unsigned long tmp; \
+ \
+ asm volatile("// atomic64_fetch_" #op #name "\n" \
+ __LL_SC_FALLBACK( \
+-" prfm pstl1strm, %3\n" \
+-"1: ld" #acq "xr %0, %3\n" \
+-" " #asm_op " %1, %0, %4\n" \
+-" st" #rel "xr %w2, %1, %3\n" \
+-" cbnz %w2, 1b\n" \
+-" " #mb ) \
++ " prfm pstl1strm, %3\n" \
++ "1: ld" #acq "xr %0, %3\n" \
++ " " #asm_op " %1, %0, %4\n" \
++ " st" #rel "xr %w2, %1, %3\n" \
++ " cbnz %w2, 1b\n" \
++ " " #mb ) \
+ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i) \
+ : cl); \
+@@ -241,14 +241,14 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
+
+ asm volatile("// atomic64_dec_if_positive\n"
+ __LL_SC_FALLBACK(
+-" prfm pstl1strm, %2\n"
+-"1: ldxr %0, %2\n"
+-" subs %0, %0, #1\n"
+-" b.lt 2f\n"
+-" stlxr %w1, %0, %2\n"
+-" cbnz %w1, 1b\n"
+-" dmb ish\n"
+-"2:")
++ " prfm pstl1strm, %2\n"
++ "1: ldxr %0, %2\n"
++ " subs %0, %0, #1\n"
++ " b.lt 2f\n"
++ " stlxr %w1, %0, %2\n"
++ " cbnz %w1, 1b\n"
++ " dmb ish\n"
++ "2:")
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ :
+ : "cc", "memory");
+diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
+index da3280f639cd..ab661375835e 100644
+--- a/arch/arm64/include/asm/atomic_lse.h
++++ b/arch/arm64/include/asm/atomic_lse.h
+@@ -11,11 +11,11 @@
+ #define __ASM_ATOMIC_LSE_H
+
+ #define ATOMIC_OP(op, asm_op) \
+-static inline void __lse_atomic_##op(int i, atomic_t *v) \
++static inline void __lse_atomic_##op(int i, atomic_t *v) \
+ { \
+ asm volatile( \
+ __LSE_PREAMBLE \
+-" " #asm_op " %w[i], %[v]\n" \
++ " " #asm_op " %w[i], %[v]\n" \
+ : [i] "+r" (i), [v] "+Q" (v->counter) \
+ : "r" (v)); \
+ }
+@@ -32,7 +32,7 @@ static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
+ { \
+ asm volatile( \
+ __LSE_PREAMBLE \
+-" " #asm_op #mb " %w[i], %w[i], %[v]" \
++ " " #asm_op #mb " %w[i], %w[i], %[v]" \
+ : [i] "+r" (i), [v] "+Q" (v->counter) \
+ : "r" (v) \
+ : cl); \
+@@ -130,7 +130,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
+ " add %w[i], %w[i], %w[tmp]" \
+ : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
+ : "r" (v) \
+- : cl); \
++ : cl); \
+ \
+ return i; \
+ }
+@@ -168,7 +168,7 @@ static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
+ { \
+ asm volatile( \
+ __LSE_PREAMBLE \
+-" " #asm_op " %[i], %[v]\n" \
++ " " #asm_op " %[i], %[v]\n" \
+ : [i] "+r" (i), [v] "+Q" (v->counter) \
+ : "r" (v)); \
+ }
+@@ -185,7 +185,7 @@ static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
+ { \
+ asm volatile( \
+ __LSE_PREAMBLE \
+-" " #asm_op #mb " %[i], %[i], %[v]" \
++ " " #asm_op #mb " %[i], %[i], %[v]" \
+ : [i] "+r" (i), [v] "+Q" (v->counter) \
+ : "r" (v) \
+ : cl); \
+@@ -272,7 +272,7 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
+ }
+
+ #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
+-static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
++static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
+ { \
+ unsigned long tmp; \
+ \
+--
+2.35.1
+
--- /dev/null
+From 4f10330cb442492036baf4aa87ec6987cf03716d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Aug 2022 16:59:13 +0100
+Subject: arm64: atomics: remove LL/SC trampolines
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit b2c3ccbd0011bb3b51d0fec24cb3a5812b1ec8ea ]
+
+When CONFIG_ARM64_LSE_ATOMICS=y, each use of an LL/SC atomic results in
+a fragment of code being generated in a subsection without a clear
+association with its caller. A trampoline in the caller branches to the
+LL/SC atomic with with a direct branch, and the atomic directly branches
+back into its trampoline.
+
+This breaks backtracing, as any PC within the out-of-line fragment will
+be symbolized as an offset from the nearest prior symbol (which may not
+be the function using the atomic), and since the atomic returns with a
+direct branch, the caller's PC may be missing from the backtrace.
+
+For example, with secondary_start_kernel() hacked to contain
+atomic_inc(NULL), the resulting exception can be reported as being taken
+from cpus_are_stuck_in_kernel():
+
+| Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000
+| Mem abort info:
+| ESR = 0x0000000096000004
+| EC = 0x25: DABT (current EL), IL = 32 bits
+| SET = 0, FnV = 0
+| EA = 0, S1PTW = 0
+| FSC = 0x04: level 0 translation fault
+| Data abort info:
+| ISV = 0, ISS = 0x00000004
+| CM = 0, WnR = 0
+| [0000000000000000] user address but active_mm is swapper
+| Internal error: Oops: 96000004 [#1] PREEMPT SMP
+| Modules linked in:
+| CPU: 1 PID: 0 Comm: swapper/1 Not tainted 5.19.0-11219-geb555cb5b794-dirty #3
+| Hardware name: linux,dummy-virt (DT)
+| pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+| pc : cpus_are_stuck_in_kernel+0xa4/0x120
+| lr : secondary_start_kernel+0x164/0x170
+| sp : ffff80000a4cbe90
+| x29: ffff80000a4cbe90 x28: 0000000000000000 x27: 0000000000000000
+| x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000
+| x23: 0000000000000000 x22: 0000000000000000 x21: 0000000000000000
+| x20: 0000000000000001 x19: 0000000000000001 x18: 0000000000000008
+| x17: 3030383832343030 x16: 3030303030307830 x15: ffff80000a4cbab0
+| x14: 0000000000000001 x13: 5d31666130663133 x12: 3478305b20313030
+| x11: 3030303030303078 x10: 3020726f73736563 x9 : 726f737365636f72
+| x8 : ffff800009ff2ef0 x7 : 0000000000000003 x6 : 0000000000000000
+| x5 : 0000000000000000 x4 : 0000000000000000 x3 : 0000000000000100
+| x2 : 0000000000000000 x1 : ffff0000029bd880 x0 : 0000000000000000
+| Call trace:
+| cpus_are_stuck_in_kernel+0xa4/0x120
+| __secondary_switched+0xb0/0xb4
+| Code: 35ffffa3 17fffc6c d53cd040 f9800011 (885f7c01)
+| ---[ end trace 0000000000000000 ]---
+
+This is confusing and hinders debugging, and will be problematic for
+CONFIG_LIVEPATCH as these cases cannot be unwound reliably.
+
+This is very similar to recent issues with out-of-line exception fixups,
+which were removed in commits:
+
+ 35d67794b8828333 ("arm64: lib: __arch_clear_user(): fold fixups into body")
+ 4012e0e22739eef9 ("arm64: lib: __arch_copy_from_user(): fold fixups into body")
+ 139f9ab73d60cf76 ("arm64: lib: __arch_copy_to_user(): fold fixups into body")
+
+When the trampolines were introduced in commit:
+
+ addfc38672c73efd ("arm64: atomics: avoid out-of-line ll/sc atomics")
+
+The rationale was to improve icache performance by grouping the LL/SC
+atomics together. This has never been measured, and this theoretical
+benefit is outweighed by other factors:
+
+* As the subsections are collapsed into sections at object file
+ granularity, these are spread out throughout the kernel and can share
+ cachelines with unrelated code regardless.
+
+* GCC 12.1.0 has been observed to place the trampoline out-of-line in
+ specialised __ll_sc_*() functions, introducing more branching than was
+ intended.
+
+* Removing the trampolines has been observed to shrink a defconfig
+ kernel Image by 64KiB when building with GCC 12.1.0.
+
+This patch removes the LL/SC trampolines, meaning that the LL/SC atomics
+will be inlined into their callers (or placed in out-of line functions
+using regular BL/RET pairs). When CONFIG_ARM64_LSE_ATOMICS=y, the LL/SC
+atomics are always called in an unlikely branch, and will be placed in a
+cold portion of the function, so this should have minimal impact to the
+hot paths.
+
+Other than the improved backtracing, there should be no functional
+change as a result of this patch.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20220817155914.3975112-2-mark.rutland@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Stable-dep-of: 031af50045ea ("arm64: cmpxchg_double*: hazard against entire exchange variable")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/atomic_ll_sc.h | 40 ++++++---------------------
+ 1 file changed, 9 insertions(+), 31 deletions(-)
+
+diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
+index f5743c911303..906e2d8c254c 100644
+--- a/arch/arm64/include/asm/atomic_ll_sc.h
++++ b/arch/arm64/include/asm/atomic_ll_sc.h
+@@ -12,19 +12,6 @@
+
+ #include <linux/stringify.h>
+
+-#if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE)
+-#define __LL_SC_FALLBACK(asm_ops) \
+-" b 3f\n" \
+-" .subsection 1\n" \
+-"3:\n" \
+-asm_ops "\n" \
+-" b 4f\n" \
+-" .previous\n" \
+-"4:\n"
+-#else
+-#define __LL_SC_FALLBACK(asm_ops) asm_ops
+-#endif
+-
+ #ifndef CONFIG_CC_HAS_K_CONSTRAINT
+ #define K
+ #endif
+@@ -43,12 +30,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \
+ int result; \
+ \
+ asm volatile("// atomic_" #op "\n" \
+- __LL_SC_FALLBACK( \
+ " prfm pstl1strm, %2\n" \
+ "1: ldxr %w0, %2\n" \
+ " " #asm_op " %w0, %w0, %w3\n" \
+ " stxr %w1, %w0, %2\n" \
+- " cbnz %w1, 1b\n") \
++ " cbnz %w1, 1b\n" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i)); \
+ }
+@@ -61,13 +47,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
+ int result; \
+ \
+ asm volatile("// atomic_" #op "_return" #name "\n" \
+- __LL_SC_FALLBACK( \
+ " prfm pstl1strm, %2\n" \
+ "1: ld" #acq "xr %w0, %2\n" \
+ " " #asm_op " %w0, %w0, %w3\n" \
+ " st" #rel "xr %w1, %w0, %2\n" \
+ " cbnz %w1, 1b\n" \
+- " " #mb ) \
++ " " #mb \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i) \
+ : cl); \
+@@ -83,13 +68,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
+ int val, result; \
+ \
+ asm volatile("// atomic_fetch_" #op #name "\n" \
+- __LL_SC_FALLBACK( \
+ " prfm pstl1strm, %3\n" \
+ "1: ld" #acq "xr %w0, %3\n" \
+ " " #asm_op " %w1, %w0, %w4\n" \
+ " st" #rel "xr %w2, %w1, %3\n" \
+ " cbnz %w2, 1b\n" \
+- " " #mb ) \
++ " " #mb \
+ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i) \
+ : cl); \
+@@ -142,12 +126,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
+ unsigned long tmp; \
+ \
+ asm volatile("// atomic64_" #op "\n" \
+- __LL_SC_FALLBACK( \
+ " prfm pstl1strm, %2\n" \
+ "1: ldxr %0, %2\n" \
+ " " #asm_op " %0, %0, %3\n" \
+ " stxr %w1, %0, %2\n" \
+- " cbnz %w1, 1b") \
++ " cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i)); \
+ }
+@@ -160,13 +143,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
+ unsigned long tmp; \
+ \
+ asm volatile("// atomic64_" #op "_return" #name "\n" \
+- __LL_SC_FALLBACK( \
+ " prfm pstl1strm, %2\n" \
+ "1: ld" #acq "xr %0, %2\n" \
+ " " #asm_op " %0, %0, %3\n" \
+ " st" #rel "xr %w1, %0, %2\n" \
+ " cbnz %w1, 1b\n" \
+- " " #mb ) \
++ " " #mb \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i) \
+ : cl); \
+@@ -182,13 +164,12 @@ __ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
+ unsigned long tmp; \
+ \
+ asm volatile("// atomic64_fetch_" #op #name "\n" \
+- __LL_SC_FALLBACK( \
+ " prfm pstl1strm, %3\n" \
+ "1: ld" #acq "xr %0, %3\n" \
+ " " #asm_op " %1, %0, %4\n" \
+ " st" #rel "xr %w2, %1, %3\n" \
+ " cbnz %w2, 1b\n" \
+- " " #mb ) \
++ " " #mb \
+ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i) \
+ : cl); \
+@@ -240,7 +221,6 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
+ unsigned long tmp;
+
+ asm volatile("// atomic64_dec_if_positive\n"
+- __LL_SC_FALLBACK(
+ " prfm pstl1strm, %2\n"
+ "1: ldxr %0, %2\n"
+ " subs %0, %0, #1\n"
+@@ -248,7 +228,7 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
+ " stlxr %w1, %0, %2\n"
+ " cbnz %w1, 1b\n"
+ " dmb ish\n"
+- "2:")
++ "2:"
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ :
+ : "cc", "memory");
+@@ -274,7 +254,6 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
+ old = (u##sz)old; \
+ \
+ asm volatile( \
+- __LL_SC_FALLBACK( \
+ " prfm pstl1strm, %[v]\n" \
+ "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
+ " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
+@@ -282,7 +261,7 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
+ " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
+ " cbnz %w[tmp], 1b\n" \
+ " " #mb "\n" \
+- "2:") \
++ "2:" \
+ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
+ [v] "+Q" (*(u##sz *)ptr) \
+ : [old] __stringify(constraint) "r" (old), [new] "r" (new) \
+@@ -326,7 +305,6 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
+ unsigned long tmp, ret; \
+ \
+ asm volatile("// __cmpxchg_double" #name "\n" \
+- __LL_SC_FALLBACK( \
+ " prfm pstl1strm, %2\n" \
+ "1: ldxp %0, %1, %2\n" \
+ " eor %0, %0, %3\n" \
+@@ -336,7 +314,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
+ " st" #rel "xp %w0, %5, %6, %2\n" \
+ " cbnz %w0, 1b\n" \
+ " " #mb "\n" \
+- "2:") \
++ "2:" \
+ : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
+ : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
+ : cl); \
+--
+2.35.1
+
--- /dev/null
+From 77d615fb479326042ab48f209f8b25d1c45d082e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Jan 2023 15:16:26 +0000
+Subject: arm64: cmpxchg_double*: hazard against entire exchange variable
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 031af50045ea97ed4386eb3751ca2c134d0fc911 ]
+
+The inline assembly for arm64's cmpxchg_double*() implementations use a
++Q constraint to hazard against other accesses to the memory location
+being exchanged. However, the pointer passed to the constraint is a
+pointer to unsigned long, and thus the hazard only applies to the first
+8 bytes of the location.
+
+GCC can take advantage of this, assuming that other portions of the
+location are unchanged, leading to a number of potential problems.
+
+This is similar to what we fixed back in commit:
+
+ fee960bed5e857eb ("arm64: xchg: hazard against entire exchange variable")
+
+... but we forgot to adjust cmpxchg_double*() similarly at the same
+time.
+
+The same problem applies, as demonstrated with the following test:
+
+| struct big {
+| u64 lo, hi;
+| } __aligned(128);
+|
+| unsigned long foo(struct big *b)
+| {
+| u64 hi_old, hi_new;
+|
+| hi_old = b->hi;
+| cmpxchg_double_local(&b->lo, &b->hi, 0x12, 0x34, 0x56, 0x78);
+| hi_new = b->hi;
+|
+| return hi_old ^ hi_new;
+| }
+
+... which GCC 12.1.0 compiles as:
+
+| 0000000000000000 <foo>:
+| 0: d503233f paciasp
+| 4: aa0003e4 mov x4, x0
+| 8: 1400000e b 40 <foo+0x40>
+| c: d2800240 mov x0, #0x12 // #18
+| 10: d2800681 mov x1, #0x34 // #52
+| 14: aa0003e5 mov x5, x0
+| 18: aa0103e6 mov x6, x1
+| 1c: d2800ac2 mov x2, #0x56 // #86
+| 20: d2800f03 mov x3, #0x78 // #120
+| 24: 48207c82 casp x0, x1, x2, x3, [x4]
+| 28: ca050000 eor x0, x0, x5
+| 2c: ca060021 eor x1, x1, x6
+| 30: aa010000 orr x0, x0, x1
+| 34: d2800000 mov x0, #0x0 // #0 <--- BANG
+| 38: d50323bf autiasp
+| 3c: d65f03c0 ret
+| 40: d2800240 mov x0, #0x12 // #18
+| 44: d2800681 mov x1, #0x34 // #52
+| 48: d2800ac2 mov x2, #0x56 // #86
+| 4c: d2800f03 mov x3, #0x78 // #120
+| 50: f9800091 prfm pstl1strm, [x4]
+| 54: c87f1885 ldxp x5, x6, [x4]
+| 58: ca0000a5 eor x5, x5, x0
+| 5c: ca0100c6 eor x6, x6, x1
+| 60: aa0600a6 orr x6, x5, x6
+| 64: b5000066 cbnz x6, 70 <foo+0x70>
+| 68: c8250c82 stxp w5, x2, x3, [x4]
+| 6c: 35ffff45 cbnz w5, 54 <foo+0x54>
+| 70: d2800000 mov x0, #0x0 // #0 <--- BANG
+| 74: d50323bf autiasp
+| 78: d65f03c0 ret
+
+Notice that at the lines with "BANG" comments, GCC has assumed that the
+higher 8 bytes are unchanged by the cmpxchg_double() call, and that
+`hi_old ^ hi_new` can be reduced to a constant zero, for both LSE and
+LL/SC versions of cmpxchg_double().
+
+This patch fixes the issue by passing a pointer to __uint128_t into the
++Q constraint, ensuring that the compiler hazards against the entire 16
+bytes being modified.
+
+With this change, GCC 12.1.0 compiles the above test as:
+
+| 0000000000000000 <foo>:
+| 0: f9400407 ldr x7, [x0, #8]
+| 4: d503233f paciasp
+| 8: aa0003e4 mov x4, x0
+| c: 1400000f b 48 <foo+0x48>
+| 10: d2800240 mov x0, #0x12 // #18
+| 14: d2800681 mov x1, #0x34 // #52
+| 18: aa0003e5 mov x5, x0
+| 1c: aa0103e6 mov x6, x1
+| 20: d2800ac2 mov x2, #0x56 // #86
+| 24: d2800f03 mov x3, #0x78 // #120
+| 28: 48207c82 casp x0, x1, x2, x3, [x4]
+| 2c: ca050000 eor x0, x0, x5
+| 30: ca060021 eor x1, x1, x6
+| 34: aa010000 orr x0, x0, x1
+| 38: f9400480 ldr x0, [x4, #8]
+| 3c: d50323bf autiasp
+| 40: ca0000e0 eor x0, x7, x0
+| 44: d65f03c0 ret
+| 48: d2800240 mov x0, #0x12 // #18
+| 4c: d2800681 mov x1, #0x34 // #52
+| 50: d2800ac2 mov x2, #0x56 // #86
+| 54: d2800f03 mov x3, #0x78 // #120
+| 58: f9800091 prfm pstl1strm, [x4]
+| 5c: c87f1885 ldxp x5, x6, [x4]
+| 60: ca0000a5 eor x5, x5, x0
+| 64: ca0100c6 eor x6, x6, x1
+| 68: aa0600a6 orr x6, x5, x6
+| 6c: b5000066 cbnz x6, 78 <foo+0x78>
+| 70: c8250c82 stxp w5, x2, x3, [x4]
+| 74: 35ffff45 cbnz w5, 5c <foo+0x5c>
+| 78: f9400480 ldr x0, [x4, #8]
+| 7c: d50323bf autiasp
+| 80: ca0000e0 eor x0, x7, x0
+| 84: d65f03c0 ret
+
+... sampling the high 8 bytes before and after the cmpxchg, and
+performing an EOR, as we'd expect.
+
+For backporting, I've tested this atop linux-4.9.y with GCC 5.5.0. Note
+that linux-4.9.y is oldest currently supported stable release, and
+mandates GCC 5.1+. Unfortunately I couldn't get a GCC 5.1 binary to run
+on my machines due to library incompatibilities.
+
+I've also used a standalone test to check that we can use a __uint128_t
+pointer in a +Q constraint at least as far back as GCC 4.8.5 and LLVM
+3.9.1.
+
+Fixes: 5284e1b4bc8a ("arm64: xchg: Implement cmpxchg_double")
+Fixes: e9a4b795652f ("arm64: cmpxchg_dbl: patch in lse instructions when supported by the CPU")
+Reported-by: Boqun Feng <boqun.feng@gmail.com>
+Link: https://lore.kernel.org/lkml/Y6DEfQXymYVgL3oJ@boqun-archlinux/
+Reported-by: Peter Zijlstra <peterz@infradead.org>
+Link: https://lore.kernel.org/lkml/Y6GXoO4qmH9OIZ5Q@hirez.programming.kicks-ass.net/
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: stable@vger.kernel.org
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Steve Capper <steve.capper@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20230104151626.3262137-1-mark.rutland@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/atomic_ll_sc.h | 2 +-
+ arch/arm64/include/asm/atomic_lse.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
+index 906e2d8c254c..abd302e521c0 100644
+--- a/arch/arm64/include/asm/atomic_ll_sc.h
++++ b/arch/arm64/include/asm/atomic_ll_sc.h
+@@ -315,7 +315,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
+ " cbnz %w0, 1b\n" \
+ " " #mb "\n" \
+ "2:" \
+- : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
++ : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr) \
+ : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
+ : cl); \
+ \
+diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
+index ab661375835e..28e96118c1e5 100644
+--- a/arch/arm64/include/asm/atomic_lse.h
++++ b/arch/arm64/include/asm/atomic_lse.h
+@@ -403,7 +403,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \
+ " eor %[old2], %[old2], %[oldval2]\n" \
+ " orr %[old1], %[old1], %[old2]" \
+ : [old1] "+&r" (x0), [old2] "+&r" (x1), \
+- [v] "+Q" (*(unsigned long *)ptr) \
++ [v] "+Q" (*(__uint128_t *)ptr) \
+ : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
+ [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
+ : cl); \
+--
+2.35.1
+
--- /dev/null
+From 6319e14464835581a7794d16e25de12156f55272 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 16 Dec 2022 15:33:55 -0800
+Subject: drm/virtio: Fix GEM handle creation UAF
+
+From: Rob Clark <robdclark@chromium.org>
+
+[ Upstream commit 52531258318ed59a2dc5a43df2eaf0eb1d65438e ]
+
+Userspace can guess the handle value and try to race GEM object creation
+with handle close, resulting in a use-after-free if we dereference the
+object after dropping the handle's reference. For that reason, dropping
+the handle's reference must be done *after* we are done dereferencing
+the object.
+
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
+Fixes: 62fb7a5e1096 ("virtio-gpu: add 3d/virgl support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20221216233355.542197-2-robdclark@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/virtio/virtgpu_ioctl.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+index 0a88ef11b9d3..5ae132e37277 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+@@ -327,10 +327,18 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
+ drm_gem_object_release(obj);
+ return ret;
+ }
+- drm_gem_object_put_unlocked(obj);
+
+ rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
+ rc->bo_handle = handle;
++
++ /*
++ * The handle owns the reference now. But we must drop our
++ * remaining reference *after* we no longer need to dereference
++ * the obj. Otherwise userspace could guess the handle and
++ * race closing it from another thread.
++ */
++ drm_gem_object_put_unlocked(obj);
++
+ return 0;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From c79a4b8a225509ca39894fc7c318936ae6b89b09 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 19 Dec 2022 10:10:04 +0100
+Subject: efi: fix NULL-deref in init error path
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit 703c13fe3c9af557d312f5895ed6a5fda2711104 ]
+
+In cases where runtime services are not supported or have been disabled,
+the runtime services workqueue will never have been allocated.
+
+Do not try to destroy the workqueue unconditionally in the unlikely
+event that EFI initialisation fails to avoid dereferencing a NULL
+pointer.
+
+Fixes: 98086df8b70c ("efi: add missed destroy_workqueue when efisubsys_init fails")
+Cc: stable@vger.kernel.org
+Cc: Li Heng <liheng40@huawei.com>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/efi/efi.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index ac9fb336c80f..eb98018ab420 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -345,8 +345,8 @@ static int __init efisubsys_init(void)
+ efi_kobj = kobject_create_and_add("efi", firmware_kobj);
+ if (!efi_kobj) {
+ pr_err("efi: Firmware registration failed.\n");
+- destroy_workqueue(efi_rts_wq);
+- return -ENOMEM;
++ error = -ENOMEM;
++ goto err_destroy_wq;
+ }
+
+ error = generic_ops_register();
+@@ -382,7 +382,10 @@ static int __init efisubsys_init(void)
+ generic_ops_unregister();
+ err_put:
+ kobject_put(efi_kobj);
+- destroy_workqueue(efi_rts_wq);
++err_destroy_wq:
++ if (efi_rts_wq)
++ destroy_workqueue(efi_rts_wq);
++
+ return error;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 798b75de2ca4879e2d65e4809f820a8c182cd040 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Nov 2022 17:36:02 +0100
+Subject: hvc/xen: lock console list traversal
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Roger Pau Monne <roger.pau@citrix.com>
+
+[ Upstream commit c0dccad87cf68fc6012aec7567e354353097ec1a ]
+
+The currently lockless access to the xen console list in
+vtermno_to_xencons() is incorrect, as additions and removals from the
+list can happen anytime, and as such the traversal of the list to get
+the private console data for a given termno needs to happen with the
+lock held. Note users that modify the list already do so with the
+lock taken.
+
+Adjust current lock takers to use the _irq{save,restore} helpers,
+since the context in which vtermno_to_xencons() is called can have
+interrupts disabled. Use the _irq{save,restore} set of helpers to
+switch the current callers to disable interrupts in the locked region.
+I haven't checked if existing users could instead use the _irq
+variant, as I think it's safer to use _irq{save,restore} upfront.
+
+While there switch from using list_for_each_entry_safe to
+list_for_each_entry: the current entry cursor won't be removed as
+part of the code in the loop body, so using the _safe variant is
+pointless.
+
+Fixes: 02e19f9c7cac ('hvc_xen: implement multiconsole support')
+Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
+Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
+Link: https://lore.kernel.org/r/20221130163611.14686-1-roger.pau@citrix.com
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/hvc/hvc_xen.c | 46 ++++++++++++++++++++++++---------------
+ 1 file changed, 29 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index 2d2d04c07140..7dd11b62a196 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -52,17 +52,22 @@ static DEFINE_SPINLOCK(xencons_lock);
+
+ static struct xencons_info *vtermno_to_xencons(int vtermno)
+ {
+- struct xencons_info *entry, *n, *ret = NULL;
++ struct xencons_info *entry, *ret = NULL;
++ unsigned long flags;
+
+- if (list_empty(&xenconsoles))
+- return NULL;
++ spin_lock_irqsave(&xencons_lock, flags);
++ if (list_empty(&xenconsoles)) {
++ spin_unlock_irqrestore(&xencons_lock, flags);
++ return NULL;
++ }
+
+- list_for_each_entry_safe(entry, n, &xenconsoles, list) {
++ list_for_each_entry(entry, &xenconsoles, list) {
+ if (entry->vtermno == vtermno) {
+ ret = entry;
+ break;
+ }
+ }
++ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return ret;
+ }
+@@ -223,7 +228,7 @@ static int xen_hvm_console_init(void)
+ {
+ int r;
+ uint64_t v = 0;
+- unsigned long gfn;
++ unsigned long gfn, flags;
+ struct xencons_info *info;
+
+ if (!xen_hvm_domain())
+@@ -258,9 +263,9 @@ static int xen_hvm_console_init(void)
+ goto err;
+ info->vtermno = HVC_COOKIE;
+
+- spin_lock(&xencons_lock);
++ spin_lock_irqsave(&xencons_lock, flags);
+ list_add_tail(&info->list, &xenconsoles);
+- spin_unlock(&xencons_lock);
++ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return 0;
+ err:
+@@ -283,6 +288,7 @@ static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
+ static int xen_pv_console_init(void)
+ {
+ struct xencons_info *info;
++ unsigned long flags;
+
+ if (!xen_pv_domain())
+ return -ENODEV;
+@@ -299,9 +305,9 @@ static int xen_pv_console_init(void)
+ /* already configured */
+ return 0;
+ }
+- spin_lock(&xencons_lock);
++ spin_lock_irqsave(&xencons_lock, flags);
+ xencons_info_pv_init(info, HVC_COOKIE);
+- spin_unlock(&xencons_lock);
++ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return 0;
+ }
+@@ -309,6 +315,7 @@ static int xen_pv_console_init(void)
+ static int xen_initial_domain_console_init(void)
+ {
+ struct xencons_info *info;
++ unsigned long flags;
+
+ if (!xen_initial_domain())
+ return -ENODEV;
+@@ -323,9 +330,9 @@ static int xen_initial_domain_console_init(void)
+ info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
+ info->vtermno = HVC_COOKIE;
+
+- spin_lock(&xencons_lock);
++ spin_lock_irqsave(&xencons_lock, flags);
+ list_add_tail(&info->list, &xenconsoles);
+- spin_unlock(&xencons_lock);
++ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return 0;
+ }
+@@ -380,10 +387,12 @@ static void xencons_free(struct xencons_info *info)
+
+ static int xen_console_remove(struct xencons_info *info)
+ {
++ unsigned long flags;
++
+ xencons_disconnect_backend(info);
+- spin_lock(&xencons_lock);
++ spin_lock_irqsave(&xencons_lock, flags);
+ list_del(&info->list);
+- spin_unlock(&xencons_lock);
++ spin_unlock_irqrestore(&xencons_lock, flags);
+ if (info->xbdev != NULL)
+ xencons_free(info);
+ else {
+@@ -464,6 +473,7 @@ static int xencons_probe(struct xenbus_device *dev,
+ {
+ int ret, devid;
+ struct xencons_info *info;
++ unsigned long flags;
+
+ devid = dev->nodename[strlen(dev->nodename) - 1] - '0';
+ if (devid == 0)
+@@ -482,9 +492,9 @@ static int xencons_probe(struct xenbus_device *dev,
+ ret = xencons_connect_backend(dev, info);
+ if (ret < 0)
+ goto error;
+- spin_lock(&xencons_lock);
++ spin_lock_irqsave(&xencons_lock, flags);
+ list_add_tail(&info->list, &xenconsoles);
+- spin_unlock(&xencons_lock);
++ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return 0;
+
+@@ -583,10 +593,12 @@ static int __init xen_hvc_init(void)
+
+ info->hvc = hvc_alloc(HVC_COOKIE, info->irq, ops, 256);
+ if (IS_ERR(info->hvc)) {
++ unsigned long flags;
++
+ r = PTR_ERR(info->hvc);
+- spin_lock(&xencons_lock);
++ spin_lock_irqsave(&xencons_lock, flags);
+ list_del(&info->list);
+- spin_unlock(&xencons_lock);
++ spin_unlock_irqrestore(&xencons_lock, flags);
+ if (info->irq)
+ unbind_from_irqhandler(info->irq, NULL);
+ kfree(info);
+--
+2.35.1
+
--- /dev/null
+From 0ede8e0168cb1b1f8d143552d0d0f284b38f3b7c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Apr 2021 14:48:43 +0800
+Subject: iommu/mediatek-v1: Add error handle for mtk_iommu_probe
+
+From: Yong Wu <yong.wu@mediatek.com>
+
+[ Upstream commit ac304c070c54413efabf29f9e73c54576d329774 ]
+
+In the original code, we lack the error handle. This patch adds them.
+
+Signed-off-by: Yong Wu <yong.wu@mediatek.com>
+Link: https://lore.kernel.org/r/20210412064843.11614-2-yong.wu@mediatek.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Stable-dep-of: 142e821f68cf ("iommu/mediatek-v1: Fix an error handling path in mtk_iommu_v1_probe()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/mtk_iommu_v1.c | 22 ++++++++++++++++++----
+ 1 file changed, 18 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
+index b5efd6dac953..7b1833b0f059 100644
+--- a/drivers/iommu/mtk_iommu_v1.c
++++ b/drivers/iommu/mtk_iommu_v1.c
+@@ -632,12 +632,26 @@ static int mtk_iommu_probe(struct platform_device *pdev)
+
+ ret = iommu_device_register(&data->iommu);
+ if (ret)
+- return ret;
++ goto out_sysfs_remove;
+
+- if (!iommu_present(&platform_bus_type))
+- bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
++ if (!iommu_present(&platform_bus_type)) {
++ ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
++ if (ret)
++ goto out_dev_unreg;
++ }
+
+- return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
++ ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
++ if (ret)
++ goto out_bus_set_null;
++ return ret;
++
++out_bus_set_null:
++ bus_set_iommu(&platform_bus_type, NULL);
++out_dev_unreg:
++ iommu_device_unregister(&data->iommu);
++out_sysfs_remove:
++ iommu_device_sysfs_remove(&data->iommu);
++ return ret;
+ }
+
+ static int mtk_iommu_remove(struct platform_device *pdev)
+--
+2.35.1
+
--- /dev/null
+From 3a5f5411d8f4edb966eb356e06a451cb9423ad69 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 19 Dec 2022 19:06:22 +0100
+Subject: iommu/mediatek-v1: Fix an error handling path in mtk_iommu_v1_probe()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 142e821f68cf5da79ce722cb9c1323afae30e185 ]
+
+A clk, prepared and enabled in mtk_iommu_v1_hw_init(), is not released in
+the error handling path of mtk_iommu_v1_probe().
+
+Add the corresponding clk_disable_unprepare(), as already done in the
+remove function.
+
+Fixes: b17336c55d89 ("iommu/mediatek: add support for mtk iommu generation one HW")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Reviewed-by: Yong Wu <yong.wu@mediatek.com>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Reviewed-by: Matthias Brugger <matthias.bgg@gmail.com>
+Link: https://lore.kernel.org/r/593e7b7d97c6e064b29716b091a9d4fd122241fb.1671473163.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/mtk_iommu_v1.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
+index 7b1833b0f059..e31bd281e59d 100644
+--- a/drivers/iommu/mtk_iommu_v1.c
++++ b/drivers/iommu/mtk_iommu_v1.c
+@@ -626,7 +626,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
+ ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
+ dev_name(&pdev->dev));
+ if (ret)
+- return ret;
++ goto out_clk_unprepare;
+
+ iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
+
+@@ -651,6 +651,8 @@ static int mtk_iommu_probe(struct platform_device *pdev)
+ iommu_device_unregister(&data->iommu);
+ out_sysfs_remove:
+ iommu_device_sysfs_remove(&data->iommu);
++out_clk_unprepare:
++ clk_disable_unprepare(data->bclk);
+ return ret;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 8de317db0e97f88ceac9d0ba6312d78bd81623c6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Dec 2022 14:26:09 -0800
+Subject: net/mlx5: Fix ptp max frequency adjustment range
+
+From: Rahul Rameshbabu <rrameshbabu@nvidia.com>
+
+[ Upstream commit fe91d57277eef8bb4aca05acfa337b4a51d0bba4 ]
+
+.max_adj of ptp_clock_info acts as an absolute value for the amount in ppb
+that can be set for a single call of .adjfine. This means that a single
+call to .getfine cannot be greater than .max_adj or less than -(.max_adj).
+Provides correct value for max frequency adjustment value supported by
+devices.
+
+Fixes: 3d8c38af1493 ("net/mlx5e: Add PTP Hardware Clock (PHC) support")
+Signed-off-by: Rahul Rameshbabu <rrameshbabu@nvidia.com>
+Reviewed-by: Gal Pressman <gal@nvidia.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index e0b361ff5a97..2c81ec31e0a2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -418,7 +418,7 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ static const struct ptp_clock_info mlx5_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "mlx5_ptp",
+- .max_adj = 100000000,
++ .max_adj = 50000000,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+--
+2.35.1
+
--- /dev/null
+From b677e609cce4a85a2dff561c84ec188af7518d74 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Jun 2020 10:58:31 +0300
+Subject: net/mlx5: Rename ptp clock info
+
+From: Eran Ben Elisha <eranbe@mellanox.com>
+
+[ Upstream commit aac2df7f022eccb5d117f07b1e231410db1a863a ]
+
+Fix a typo in ptp_clock_info naming: mlx5_p2p -> mlx5_ptp.
+
+Signed-off-by: Eran Ben Elisha <eranbe@mellanox.com>
+Stable-dep-of: fe91d57277ee ("net/mlx5: Fix ptp max frequency adjustment range")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index 492ff2ef9a40..e0b361ff5a97 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -417,7 +417,7 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+
+ static const struct ptp_clock_info mlx5_ptp_clock_info = {
+ .owner = THIS_MODULE,
+- .name = "mlx5_p2p",
++ .name = "mlx5_ptp",
+ .max_adj = 100000000,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+--
+2.35.1
+
--- /dev/null
+From 0d52d1deaf840a90560f7213d374270ba64c0f8f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Jan 2023 19:10:04 +0200
+Subject: net/sched: act_mpls: Fix warning during failed attribute validation
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit 9e17f99220d111ea031b44153fdfe364b0024ff2 ]
+
+The 'TCA_MPLS_LABEL' attribute is of 'NLA_U32' type, but has a
+validation type of 'NLA_VALIDATE_FUNCTION'. This is an invalid
+combination according to the comment above 'struct nla_policy':
+
+"
+Meaning of `validate' field, use via NLA_POLICY_VALIDATE_FN:
+ NLA_BINARY Validation function called for the attribute.
+ All other Unused - but note that it's a union
+"
+
+This can trigger the warning [1] in nla_get_range_unsigned() when
+validation of the attribute fails. Despite being of 'NLA_U32' type, the
+associated 'min'/'max' fields in the policy are negative as they are
+aliased by the 'validate' field.
+
+Fix by changing the attribute type to 'NLA_BINARY' which is consistent
+with the above comment and all other users of NLA_POLICY_VALIDATE_FN().
+As a result, move the length validation to the validation function.
+
+No regressions in MPLS tests:
+
+ # ./tdc.py -f tc-tests/actions/mpls.json
+ [...]
+ # echo $?
+ 0
+
+[1]
+WARNING: CPU: 0 PID: 17743 at lib/nlattr.c:118
+nla_get_range_unsigned+0x1d8/0x1e0 lib/nlattr.c:117
+Modules linked in:
+CPU: 0 PID: 17743 Comm: syz-executor.0 Not tainted 6.1.0-rc8 #3
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
+rel-1.13.0-48-gd9c812dda519-prebuilt.qemu.org 04/01/2014
+RIP: 0010:nla_get_range_unsigned+0x1d8/0x1e0 lib/nlattr.c:117
+[...]
+Call Trace:
+ <TASK>
+ __netlink_policy_dump_write_attr+0x23d/0x990 net/netlink/policy.c:310
+ netlink_policy_dump_write_attr+0x22/0x30 net/netlink/policy.c:411
+ netlink_ack_tlv_fill net/netlink/af_netlink.c:2454 [inline]
+ netlink_ack+0x546/0x760 net/netlink/af_netlink.c:2506
+ netlink_rcv_skb+0x1b7/0x240 net/netlink/af_netlink.c:2546
+ rtnetlink_rcv+0x18/0x20 net/core/rtnetlink.c:6109
+ netlink_unicast_kernel net/netlink/af_netlink.c:1319 [inline]
+ netlink_unicast+0x5e9/0x6b0 net/netlink/af_netlink.c:1345
+ netlink_sendmsg+0x739/0x860 net/netlink/af_netlink.c:1921
+ sock_sendmsg_nosec net/socket.c:714 [inline]
+ sock_sendmsg net/socket.c:734 [inline]
+ ____sys_sendmsg+0x38f/0x500 net/socket.c:2482
+ ___sys_sendmsg net/socket.c:2536 [inline]
+ __sys_sendmsg+0x197/0x230 net/socket.c:2565
+ __do_sys_sendmsg net/socket.c:2574 [inline]
+ __se_sys_sendmsg net/socket.c:2572 [inline]
+ __x64_sys_sendmsg+0x42/0x50 net/socket.c:2572
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x2b/0x70 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Link: https://lore.kernel.org/netdev/CAO4mrfdmjvRUNbDyP0R03_DrD_eFCLCguz6OxZ2TYRSv0K9gxA@mail.gmail.com/
+Fixes: 2a2ea50870ba ("net: sched: add mpls manipulation actions to TC")
+Reported-by: Wei Chen <harperchen1110@gmail.com>
+Tested-by: Wei Chen <harperchen1110@gmail.com>
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
+Link: https://lore.kernel.org/r/20230107171004.608436-1-idosch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/act_mpls.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
+index 0fccae356dc1..197915332b42 100644
+--- a/net/sched/act_mpls.c
++++ b/net/sched/act_mpls.c
+@@ -116,6 +116,11 @@ static int valid_label(const struct nlattr *attr,
+ {
+ const u32 *label = nla_data(attr);
+
++ if (nla_len(attr) != sizeof(*label)) {
++ NL_SET_ERR_MSG_MOD(extack, "Invalid MPLS label length");
++ return -EINVAL;
++ }
++
+ if (*label & ~MPLS_LABEL_MASK || *label == MPLS_LABEL_IMPLNULL) {
+ NL_SET_ERR_MSG_MOD(extack, "MPLS label out of range");
+ return -EINVAL;
+@@ -128,7 +133,8 @@ static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = {
+ [TCA_MPLS_UNSPEC] = { .strict_start_type = TCA_MPLS_UNSPEC + 1 },
+ [TCA_MPLS_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_mpls)),
+ [TCA_MPLS_PROTO] = { .type = NLA_U16 },
+- [TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_U32, valid_label),
++ [TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
++ valid_label),
+ [TCA_MPLS_TC] = NLA_POLICY_RANGE(NLA_U8, 0, 7),
+ [TCA_MPLS_TTL] = NLA_POLICY_MIN(NLA_U8, 1),
+ [TCA_MPLS_BOS] = NLA_POLICY_RANGE(NLA_U8, 0, 1),
+--
+2.35.1
+
--- /dev/null
+From a4ef7616aa0b83face94969ba98d81e0c988ea89 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Jan 2023 17:23:44 +0900
+Subject: nfc: pn533: Wait for out_urb's completion in pn533_usb_send_frame()
+
+From: Minsuk Kang <linuxlovemin@yonsei.ac.kr>
+
+[ Upstream commit 9dab880d675b9d0dd56c6428e4e8352a3339371d ]
+
+Fix a use-after-free that occurs in hcd when in_urb sent from
+pn533_usb_send_frame() is completed earlier than out_urb. Its callback
+frees the skb data in pn533_send_async_complete() that is used as a
+transfer buffer of out_urb. Wait before sending in_urb until the
+callback of out_urb is called. To modify the callback of out_urb alone,
+separate the complete function of out_urb and ack_urb.
+
+Found by a modified version of syzkaller.
+
+BUG: KASAN: use-after-free in dummy_timer
+Call Trace:
+ memcpy (mm/kasan/shadow.c:65)
+ dummy_perform_transfer (drivers/usb/gadget/udc/dummy_hcd.c:1352)
+ transfer (drivers/usb/gadget/udc/dummy_hcd.c:1453)
+ dummy_timer (drivers/usb/gadget/udc/dummy_hcd.c:1972)
+ arch_static_branch (arch/x86/include/asm/jump_label.h:27)
+ static_key_false (include/linux/jump_label.h:207)
+ timer_expire_exit (include/trace/events/timer.h:127)
+ call_timer_fn (kernel/time/timer.c:1475)
+ expire_timers (kernel/time/timer.c:1519)
+ __run_timers (kernel/time/timer.c:1790)
+ run_timer_softirq (kernel/time/timer.c:1803)
+
+Fixes: c46ee38620a2 ("NFC: pn533: add NXP pn533 nfc device driver")
+Signed-off-by: Minsuk Kang <linuxlovemin@yonsei.ac.kr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nfc/pn533/usb.c | 44 ++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 41 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
+index d7a355d05368..82e5b7dbaee9 100644
+--- a/drivers/nfc/pn533/usb.c
++++ b/drivers/nfc/pn533/usb.c
+@@ -153,10 +153,17 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags)
+ return usb_submit_urb(phy->ack_urb, flags);
+ }
+
++struct pn533_out_arg {
++ struct pn533_usb_phy *phy;
++ struct completion done;
++};
++
+ static int pn533_usb_send_frame(struct pn533 *dev,
+ struct sk_buff *out)
+ {
+ struct pn533_usb_phy *phy = dev->phy;
++ struct pn533_out_arg arg;
++ void *cntx;
+ int rc;
+
+ if (phy->priv == NULL)
+@@ -168,10 +175,17 @@ static int pn533_usb_send_frame(struct pn533 *dev,
+ print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
+ out->data, out->len, false);
+
++ init_completion(&arg.done);
++ cntx = phy->out_urb->context;
++ phy->out_urb->context = &arg;
++
+ rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
+ if (rc)
+ return rc;
+
++ wait_for_completion(&arg.done);
++ phy->out_urb->context = cntx;
++
+ if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
+ /* request for response for sent packet directly */
+ rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
+@@ -412,7 +426,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
+ return arg.rc;
+ }
+
+-static void pn533_send_complete(struct urb *urb)
++static void pn533_out_complete(struct urb *urb)
++{
++ struct pn533_out_arg *arg = urb->context;
++ struct pn533_usb_phy *phy = arg->phy;
++
++ switch (urb->status) {
++ case 0:
++ break; /* success */
++ case -ECONNRESET:
++ case -ENOENT:
++ dev_dbg(&phy->udev->dev,
++ "The urb has been stopped (status %d)\n",
++ urb->status);
++ break;
++ case -ESHUTDOWN:
++ default:
++ nfc_err(&phy->udev->dev,
++ "Urb failure (status %d)\n",
++ urb->status);
++ }
++
++ complete(&arg->done);
++}
++
++static void pn533_ack_complete(struct urb *urb)
+ {
+ struct pn533_usb_phy *phy = urb->context;
+
+@@ -500,10 +538,10 @@ static int pn533_usb_probe(struct usb_interface *interface,
+
+ usb_fill_bulk_urb(phy->out_urb, phy->udev,
+ usb_sndbulkpipe(phy->udev, out_endpoint),
+- NULL, 0, pn533_send_complete, phy);
++ NULL, 0, pn533_out_complete, phy);
+ usb_fill_bulk_urb(phy->ack_urb, phy->udev,
+ usb_sndbulkpipe(phy->udev, out_endpoint),
+- NULL, 0, pn533_send_complete, phy);
++ NULL, 0, pn533_ack_complete, phy);
+
+ switch (id->driver_info) {
+ case PN533_DEVICE_STD:
+--
+2.35.1
+
--- /dev/null
+From 1deeacd1d000383db93a9241c6e20b6e17d58509 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 27 Nov 2022 22:06:02 +0100
+Subject: regulator: da9211: Use irq handler when ready
+
+From: Ricardo Ribalda <ribalda@chromium.org>
+
+[ Upstream commit 02228f6aa6a64d588bc31e3267d05ff184d772eb ]
+
+If the system does not come from reset (like when it is kexec()), the
+regulator might have an IRQ waiting for us.
+
+If we enable the IRQ handler before its structures are ready, we crash.
+
+This patch fixes:
+
+[ 1.141839] Unable to handle kernel read from unreadable memory at virtual address 0000000000000078
+[ 1.316096] Call trace:
+[ 1.316101] blocking_notifier_call_chain+0x20/0xa8
+[ 1.322757] cpu cpu0: dummy supplies not allowed for exclusive requests
+[ 1.327823] regulator_notifier_call_chain+0x1c/0x2c
+[ 1.327825] da9211_irq_handler+0x68/0xf8
+[ 1.327829] irq_thread+0x11c/0x234
+[ 1.327833] kthread+0x13c/0x154
+
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Reviewed-by: Adam Ward <DLG-Adam.Ward.opensource@dm.renesas.com>
+Link: https://lore.kernel.org/r/20221124-da9211-v2-0-1779e3c5d491@chromium.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/da9211-regulator.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
+index bf80748f1ccc..7baa6121cc66 100644
+--- a/drivers/regulator/da9211-regulator.c
++++ b/drivers/regulator/da9211-regulator.c
+@@ -471,6 +471,12 @@ static int da9211_i2c_probe(struct i2c_client *i2c,
+
+ chip->chip_irq = i2c->irq;
+
++ ret = da9211_regulator_init(chip);
++ if (ret < 0) {
++ dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
++ return ret;
++ }
++
+ if (chip->chip_irq != 0) {
+ ret = devm_request_threaded_irq(chip->dev, chip->chip_irq, NULL,
+ da9211_irq_handler,
+@@ -485,11 +491,6 @@ static int da9211_i2c_probe(struct i2c_client *i2c,
+ dev_warn(chip->dev, "No IRQ configured\n");
+ }
+
+- ret = da9211_regulator_init(chip);
+-
+- if (ret < 0)
+- dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
+-
+ return ret;
+ }
+
+--
+2.35.1
+
powerpc-imc-pmu-fix-use-of-mutex-in-irqs-disabled-section.patch
x86-boot-avoid-using-intel-mnemonics-in-at-t-syntax-asm.patch
edac-device-fix-period-calculation-in-edac_device_reset_delay_period.patch
+regulator-da9211-use-irq-handler-when-ready.patch
+tipc-improve-throughput-between-nodes-in-netns.patch
+tipc-eliminate-checking-netns-if-node-established.patch
+tipc-fix-unexpected-link-reset-due-to-discovery-mess.patch
+hvc-xen-lock-console-list-traversal.patch
+nfc-pn533-wait-for-out_urb-s-completion-in-pn533_usb.patch
+net-sched-act_mpls-fix-warning-during-failed-attribu.patch
+net-mlx5-rename-ptp-clock-info.patch
+net-mlx5-fix-ptp-max-frequency-adjustment-range.patch
+iommu-mediatek-v1-add-error-handle-for-mtk_iommu_pro.patch
+iommu-mediatek-v1-fix-an-error-handling-path-in-mtk_.patch
+x86-resctrl-use-task_curr-instead-of-task_struct-on_.patch
+x86-resctrl-fix-task-closid-rmid-update-race.patch
+drm-virtio-fix-gem-handle-creation-uaf.patch
+arm64-atomics-format-whitespace-consistently.patch
+arm64-atomics-remove-ll-sc-trampolines.patch
+arm64-cmpxchg_double-hazard-against-entire-exchange-.patch
+efi-fix-null-deref-in-init-error-path.patch
--- /dev/null
+From 6a588d9c2cb943257dbe84c67485ee010f106521 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Nov 2019 10:02:37 +0700
+Subject: tipc: eliminate checking netns if node established
+
+From: Hoang Le <hoang.h.le@dektech.com.au>
+
+[ Upstream commit d408bef4bfa60bac665b6e7239269570039a968b ]
+
+Currently, we scan over all network namespaces at each received
+discovery message in order to check if the sending peer might be
+present in a host local namespaces.
+
+This is unnecessary since we can assume that a peer will not change its
+location during an established session.
+
+We now improve the condition for this testing so that we don't perform
+any redundant scans.
+
+Fixes: f73b12812a3d ("tipc: improve throughput between nodes in netns")
+Acked-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: Hoang Le <hoang.h.le@dektech.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: c244c092f1ed ("tipc: fix unexpected link reset due to discovery messages")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tipc/node.c | 14 +++++---------
+ 1 file changed, 5 insertions(+), 9 deletions(-)
+
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 3136e2a777fd..81fe8d051ba4 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -472,10 +472,6 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
+ tipc_bc_sndlink(net),
+ &n->bc_entry.link)) {
+ pr_warn("Broadcast rcv link creation failed, no memory\n");
+- if (n->peer_net) {
+- n->peer_net = NULL;
+- n->peer_hash_mix = 0;
+- }
+ kfree(n);
+ n = NULL;
+ goto exit;
+@@ -1068,6 +1064,9 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ if (sign_match && addr_match && link_up) {
+ /* All is fine. Do nothing. */
+ reset = false;
++ /* Peer node is not a container/local namespace */
++ if (!n->peer_hash_mix)
++ n->peer_hash_mix = hash_mixes;
+ } else if (sign_match && addr_match && !link_up) {
+ /* Respond. The link will come up in due time */
+ *respond = true;
+@@ -1393,11 +1392,8 @@ static void node_lost_contact(struct tipc_node *n,
+
+ /* Notify publications from this node */
+ n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
+-
+- if (n->peer_net) {
+- n->peer_net = NULL;
+- n->peer_hash_mix = 0;
+- }
++ n->peer_net = NULL;
++ n->peer_hash_mix = 0;
+ /* Notify sockets connected to node */
+ list_for_each_entry_safe(conn, safe, conns, list) {
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
+--
+2.35.1
+
--- /dev/null
+From af101de6f0a7900fc200230a976608b5d4f29c15 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Jan 2023 06:02:51 +0000
+Subject: tipc: fix unexpected link reset due to discovery messages
+
+From: Tung Nguyen <tung.q.nguyen@dektech.com.au>
+
+[ Upstream commit c244c092f1ed2acfb5af3d3da81e22367d3dd733 ]
+
+This unexpected behavior is observed:
+
+node 1 | node 2
+------ | ------
+link is established | link is established
+reboot | link is reset
+up | send discovery message
+receive discovery message |
+link is established | link is established
+send discovery message |
+ | receive discovery message
+ | link is reset (unexpected)
+ | send reset message
+link is reset |
+
+It is due to delayed re-discovery as described in function
+tipc_node_check_dest(): "this link endpoint has already reset
+and re-established contact with the peer, before receiving a
+discovery message from that node."
+
+However, commit 598411d70f85 has changed the condition for calling
+tipc_node_link_down() which was the acceptance of new media address.
+
+This commit fixes this by restoring the old and correct behavior.
+
+Fixes: 598411d70f85 ("tipc: make resetting of links non-atomic")
+Acked-by: Jon Maloy <jmaloy@redhat.com>
+Signed-off-by: Tung Nguyen <tung.q.nguyen@dektech.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tipc/node.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 81fe8d051ba4..a6ac67c38770 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -1035,8 +1035,9 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ bool addr_match = false;
+ bool sign_match = false;
+ bool link_up = false;
++ bool link_is_reset = false;
+ bool accept_addr = false;
+- bool reset = true;
++ bool reset = false;
+ char *if_name;
+ unsigned long intv;
+ u16 session;
+@@ -1056,14 +1057,14 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ /* Prepare to validate requesting node's signature and media address */
+ l = le->link;
+ link_up = l && tipc_link_is_up(l);
++ link_is_reset = l && tipc_link_is_reset(l);
+ addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
+ sign_match = (signature == n->signature);
+
+ /* These three flags give us eight permutations: */
+
+ if (sign_match && addr_match && link_up) {
+- /* All is fine. Do nothing. */
+- reset = false;
++ /* All is fine. Ignore requests. */
+ /* Peer node is not a container/local namespace */
+ if (!n->peer_hash_mix)
+ n->peer_hash_mix = hash_mixes;
+@@ -1088,6 +1089,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ */
+ accept_addr = true;
+ *respond = true;
++ reset = true;
+ } else if (!sign_match && addr_match && link_up) {
+ /* Peer node rebooted. Two possibilities:
+ * - Delayed re-discovery; this link endpoint has already
+@@ -1119,6 +1121,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ n->signature = signature;
+ accept_addr = true;
+ *respond = true;
++ reset = true;
+ }
+
+ if (!accept_addr)
+@@ -1147,6 +1150,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ tipc_link_fsm_evt(l, LINK_RESET_EVT);
+ if (n->state == NODE_FAILINGOVER)
+ tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
++ link_is_reset = tipc_link_is_reset(l);
+ le->link = l;
+ n->link_cnt++;
+ tipc_node_calculate_timer(n, l);
+@@ -1159,7 +1163,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ memcpy(&le->maddr, maddr, sizeof(*maddr));
+ exit:
+ tipc_node_write_unlock(n);
+- if (reset && l && !tipc_link_is_reset(l))
++ if (reset && !link_is_reset)
+ tipc_node_link_down(n, b->identity, false);
+ tipc_node_put(n);
+ }
+--
+2.35.1
+
--- /dev/null
+From 055c16c1625f0dd0c8128c6b6f2cb647f7bd8f9c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Oct 2019 07:51:21 +0700
+Subject: tipc: improve throughput between nodes in netns
+
+From: Hoang Le <hoang.h.le@dektech.com.au>
+
+[ Upstream commit f73b12812a3d1d798b7517547ccdcf864844d2cd ]
+
+Currently, TIPC transports intra-node user data messages directly
+socket to socket, hence shortcutting all the lower layers of the
+communication stack. This gives TIPC very good intra node performance,
+both regarding throughput and latency.
+
+We now introduce a similar mechanism for TIPC data traffic across
+network namespaces located in the same kernel. On the send path, the
+call chain is as always accompanied by the sending node's network name
+space pointer. However, once we have reliably established that the
+receiving node is represented by a namespace on the same host, we just
+replace the namespace pointer with the receiving node/namespace's
+ditto, and follow the regular socket receive patch though the receiving
+node. This technique gives us a throughput similar to the node internal
+throughput, several times larger than if we let the traffic go though
+the full network stacks. As a comparison, max throughput for 64k
+messages is four times larger than TCP throughput for the same type of
+traffic.
+
+To meet any security concerns, the following should be noted.
+
+- All nodes joining a cluster are supposed to have been be certified
+and authenticated by mechanisms outside TIPC. This is no different for
+nodes/namespaces on the same host; they have to auto discover each
+other using the attached interfaces, and establish links which are
+supervised via the regular link monitoring mechanism. Hence, a kernel
+local node has no other way to join a cluster than any other node, and
+have to obey to policies set in the IP or device layers of the stack.
+
+- Only when a sender has established with 100% certainty that the peer
+node is located in a kernel local namespace does it choose to let user
+data messages, and only those, take the crossover path to the receiving
+node/namespace.
+
+- If the receiving node/namespace is removed, its namespace pointer
+is invalidated at all peer nodes, and their neighbor link monitoring
+will eventually note that this node is gone.
+
+- To ensure the "100% certainty" criteria, and prevent any possible
+spoofing, received discovery messages must contain a proof that the
+sender knows a common secret. We use the hash mix of the sending
+node/namespace for this purpose, since it can be accessed directly by
+all other namespaces in the kernel. Upon reception of a discovery
+message, the receiver checks this proof against all the local
+namespaces'hash_mix:es. If it finds a match, that, along with a
+matching node id and cluster id, this is deemed sufficient proof that
+the peer node in question is in a local namespace, and a wormhole can
+be opened.
+
+- We should also consider that TIPC is intended to be a cluster local
+IPC mechanism (just like e.g. UNIX sockets) rather than a network
+protocol, and hence we think it can justified to allow it to shortcut the
+lower protocol layers.
+
+Regarding traceability, we should notice that since commit 6c9081a3915d
+("tipc: add loopback device tracking") it is possible to follow the node
+internal packet flow by just activating tcpdump on the loopback
+interface. This will be true even for this mechanism; by activating
+tcpdump on the involved nodes' loopback interfaces their inter-name
+space messaging can easily be tracked.
+
+v2:
+- update 'net' pointer when node left/rejoined
+v3:
+- grab read/write lock when using node ref obj
+v4:
+- clone traffics between netns to loopback
+
+Suggested-by: Jon Maloy <jon.maloy@ericsson.com>
+Acked-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: Hoang Le <hoang.h.le@dektech.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: c244c092f1ed ("tipc: fix unexpected link reset due to discovery messages")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tipc/core.c | 16 +++++
+ net/tipc/core.h | 6 ++
+ net/tipc/discover.c | 4 +-
+ net/tipc/msg.h | 14 ++++
+ net/tipc/name_distr.c | 2 +-
+ net/tipc/node.c | 155 ++++++++++++++++++++++++++++++++++++++++--
+ net/tipc/node.h | 5 +-
+ net/tipc/socket.c | 6 +-
+ 8 files changed, 197 insertions(+), 11 deletions(-)
+
+diff --git a/net/tipc/core.c b/net/tipc/core.c
+index 90cf7e0bbaf0..58ee5ee70781 100644
+--- a/net/tipc/core.c
++++ b/net/tipc/core.c
+@@ -112,6 +112,15 @@ static void __net_exit tipc_exit_net(struct net *net)
+ cond_resched();
+ }
+
++static void __net_exit tipc_pernet_pre_exit(struct net *net)
++{
++ tipc_node_pre_cleanup_net(net);
++}
++
++static struct pernet_operations tipc_pernet_pre_exit_ops = {
++ .pre_exit = tipc_pernet_pre_exit,
++};
++
+ static struct pernet_operations tipc_net_ops = {
+ .init = tipc_init_net,
+ .exit = tipc_exit_net,
+@@ -150,6 +159,10 @@ static int __init tipc_init(void)
+ if (err)
+ goto out_pernet_topsrv;
+
++ err = register_pernet_subsys(&tipc_pernet_pre_exit_ops);
++ if (err)
++ goto out_register_pernet_subsys;
++
+ err = tipc_bearer_setup();
+ if (err)
+ goto out_bearer;
+@@ -170,6 +183,8 @@ static int __init tipc_init(void)
+ out_netlink:
+ tipc_bearer_cleanup();
+ out_bearer:
++ unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
++out_register_pernet_subsys:
+ unregister_pernet_device(&tipc_topsrv_net_ops);
+ out_pernet_topsrv:
+ tipc_socket_stop();
+@@ -187,6 +202,7 @@ static void __exit tipc_exit(void)
+ tipc_netlink_compat_stop();
+ tipc_netlink_stop();
+ tipc_bearer_cleanup();
++ unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
+ unregister_pernet_device(&tipc_topsrv_net_ops);
+ tipc_socket_stop();
+ unregister_pernet_device(&tipc_net_ops);
+diff --git a/net/tipc/core.h b/net/tipc/core.h
+index c6bda91f8581..59f97ef12e60 100644
+--- a/net/tipc/core.h
++++ b/net/tipc/core.h
+@@ -59,6 +59,7 @@
+ #include <net/netns/generic.h>
+ #include <linux/rhashtable.h>
+ #include <net/genetlink.h>
++#include <net/netns/hash.h>
+
+ #ifdef pr_fmt
+ #undef pr_fmt
+@@ -202,6 +203,11 @@ static inline int in_range(u16 val, u16 min, u16 max)
+ return !less(val, min) && !more(val, max);
+ }
+
++static inline u32 tipc_net_hash_mixes(struct net *net, int tn_rand)
++{
++ return net_hash_mix(&init_net) ^ net_hash_mix(net) ^ tn_rand;
++}
++
+ #ifdef CONFIG_SYSCTL
+ int tipc_register_sysctl(void);
+ void tipc_unregister_sysctl(void);
+diff --git a/net/tipc/discover.c b/net/tipc/discover.c
+index 0436c8f2967d..61b80de93489 100644
+--- a/net/tipc/discover.c
++++ b/net/tipc/discover.c
+@@ -94,6 +94,7 @@ static void tipc_disc_init_msg(struct net *net, struct sk_buff *skb,
+ msg_set_dest_domain(hdr, dest_domain);
+ msg_set_bc_netid(hdr, tn->net_id);
+ b->media->addr2msg(msg_media_addr(hdr), &b->addr);
++ msg_set_peer_net_hash(hdr, tipc_net_hash_mixes(net, tn->random));
+ msg_set_node_id(hdr, tipc_own_id(net));
+ }
+
+@@ -245,7 +246,8 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
+ if (!tipc_in_scope(legacy, b->domain, src))
+ return;
+ tipc_node_check_dest(net, src, peer_id, b, caps, signature,
+- &maddr, &respond, &dupl_addr);
++ msg_peer_net_hash(hdr), &maddr, &respond,
++ &dupl_addr);
+ if (dupl_addr)
+ disc_dupl_alert(b, src, &maddr);
+ if (!respond)
+diff --git a/net/tipc/msg.h b/net/tipc/msg.h
+index 0daa6f04ca81..2d7cb66a6912 100644
+--- a/net/tipc/msg.h
++++ b/net/tipc/msg.h
+@@ -1026,6 +1026,20 @@ static inline bool msg_is_reset(struct tipc_msg *hdr)
+ return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
+ }
+
++/* Word 13
++ */
++static inline void msg_set_peer_net_hash(struct tipc_msg *m, u32 n)
++{
++ msg_set_word(m, 13, n);
++}
++
++static inline u32 msg_peer_net_hash(struct tipc_msg *m)
++{
++ return msg_word(m, 13);
++}
++
++/* Word 14
++ */
+ static inline u32 msg_sugg_node_addr(struct tipc_msg *m)
+ {
+ return msg_word(m, 14);
+diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
+index 661bc2551a0a..6ac84e7c8b63 100644
+--- a/net/tipc/name_distr.c
++++ b/net/tipc/name_distr.c
+@@ -146,7 +146,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
+ struct publication *publ;
+ struct sk_buff *skb = NULL;
+ struct distr_item *item = NULL;
+- u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
++ u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
+ ITEM_SIZE) * ITEM_SIZE;
+ u32 msg_rem = msg_dsz;
+
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index c8f6177dd5a2..3136e2a777fd 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -126,6 +126,8 @@ struct tipc_node {
+ struct timer_list timer;
+ struct rcu_head rcu;
+ unsigned long delete_at;
++ struct net *peer_net;
++ u32 peer_hash_mix;
+ };
+
+ /* Node FSM states and events:
+@@ -184,7 +186,7 @@ static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
+ return n->links[bearer_id].link;
+ }
+
+-int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
++int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
+ {
+ struct tipc_node *n;
+ int bearer_id;
+@@ -194,6 +196,14 @@ int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
+ if (unlikely(!n))
+ return mtu;
+
++ /* Allow MAX_MSG_SIZE when building connection oriented message
++ * if they are in the same core network
++ */
++ if (n->peer_net && connected) {
++ tipc_node_put(n);
++ return mtu;
++ }
++
+ bearer_id = n->active_links[sel & 1];
+ if (likely(bearer_id != INVALID_BEARER_ID))
+ mtu = n->links[bearer_id].mtu;
+@@ -360,8 +370,37 @@ static void tipc_node_write_unlock(struct tipc_node *n)
+ }
+ }
+
++static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
++{
++ int net_id = tipc_netid(n->net);
++ struct tipc_net *tn_peer;
++ struct net *tmp;
++ u32 hash_chk;
++
++ if (n->peer_net)
++ return;
++
++ for_each_net_rcu(tmp) {
++ tn_peer = tipc_net(tmp);
++ if (!tn_peer)
++ continue;
++ /* Integrity checking whether node exists in namespace or not */
++ if (tn_peer->net_id != net_id)
++ continue;
++ if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
++ continue;
++ hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
++ if (hash_mixes ^ hash_chk)
++ continue;
++ n->peer_net = tmp;
++ n->peer_hash_mix = hash_mixes;
++ break;
++ }
++}
++
+ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
+- u8 *peer_id, u16 capabilities)
++ u8 *peer_id, u16 capabilities,
++ u32 signature, u32 hash_mixes)
+ {
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_node *n, *temp_node;
+@@ -372,6 +411,8 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
+ spin_lock_bh(&tn->node_list_lock);
+ n = tipc_node_find(net, addr);
+ if (n) {
++ if (n->peer_hash_mix ^ hash_mixes)
++ tipc_node_assign_peer_net(n, hash_mixes);
+ if (n->capabilities == capabilities)
+ goto exit;
+ /* Same node may come back with new capabilities */
+@@ -389,6 +430,7 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ tn->capabilities &= temp_node->capabilities;
+ }
++
+ goto exit;
+ }
+ n = kzalloc(sizeof(*n), GFP_ATOMIC);
+@@ -399,6 +441,10 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
+ n->addr = addr;
+ memcpy(&n->peer_id, peer_id, 16);
+ n->net = net;
++ n->peer_net = NULL;
++ n->peer_hash_mix = 0;
++ /* Assign kernel local namespace if exists */
++ tipc_node_assign_peer_net(n, hash_mixes);
+ n->capabilities = capabilities;
+ kref_init(&n->kref);
+ rwlock_init(&n->lock);
+@@ -426,6 +472,10 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
+ tipc_bc_sndlink(net),
+ &n->bc_entry.link)) {
+ pr_warn("Broadcast rcv link creation failed, no memory\n");
++ if (n->peer_net) {
++ n->peer_net = NULL;
++ n->peer_hash_mix = 0;
++ }
+ kfree(n);
+ n = NULL;
+ goto exit;
+@@ -979,7 +1029,7 @@ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
+
+ void tipc_node_check_dest(struct net *net, u32 addr,
+ u8 *peer_id, struct tipc_bearer *b,
+- u16 capabilities, u32 signature,
++ u16 capabilities, u32 signature, u32 hash_mixes,
+ struct tipc_media_addr *maddr,
+ bool *respond, bool *dupl_addr)
+ {
+@@ -998,7 +1048,8 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ *dupl_addr = false;
+ *respond = false;
+
+- n = tipc_node_create(net, addr, peer_id, capabilities);
++ n = tipc_node_create(net, addr, peer_id, capabilities, signature,
++ hash_mixes);
+ if (!n)
+ return;
+
+@@ -1343,6 +1394,10 @@ static void node_lost_contact(struct tipc_node *n,
+ /* Notify publications from this node */
+ n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
+
++ if (n->peer_net) {
++ n->peer_net = NULL;
++ n->peer_hash_mix = 0;
++ }
+ /* Notify sockets connected to node */
+ list_for_each_entry_safe(conn, safe, conns, list) {
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
+@@ -1424,6 +1479,56 @@ static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
+ return -EMSGSIZE;
+ }
+
++static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
++{
++ struct tipc_msg *hdr = buf_msg(skb_peek(list));
++ struct sk_buff_head inputq;
++
++ switch (msg_user(hdr)) {
++ case TIPC_LOW_IMPORTANCE:
++ case TIPC_MEDIUM_IMPORTANCE:
++ case TIPC_HIGH_IMPORTANCE:
++ case TIPC_CRITICAL_IMPORTANCE:
++ if (msg_connected(hdr) || msg_named(hdr)) {
++ tipc_loopback_trace(peer_net, list);
++ spin_lock_init(&list->lock);
++ tipc_sk_rcv(peer_net, list);
++ return;
++ }
++ if (msg_mcast(hdr)) {
++ tipc_loopback_trace(peer_net, list);
++ skb_queue_head_init(&inputq);
++ tipc_sk_mcast_rcv(peer_net, list, &inputq);
++ __skb_queue_purge(list);
++ skb_queue_purge(&inputq);
++ return;
++ }
++ return;
++ case MSG_FRAGMENTER:
++ if (tipc_msg_assemble(list)) {
++ tipc_loopback_trace(peer_net, list);
++ skb_queue_head_init(&inputq);
++ tipc_sk_mcast_rcv(peer_net, list, &inputq);
++ __skb_queue_purge(list);
++ skb_queue_purge(&inputq);
++ }
++ return;
++ case GROUP_PROTOCOL:
++ case CONN_MANAGER:
++ tipc_loopback_trace(peer_net, list);
++ spin_lock_init(&list->lock);
++ tipc_sk_rcv(peer_net, list);
++ return;
++ case LINK_PROTOCOL:
++ case NAME_DISTRIBUTOR:
++ case TUNNEL_PROTOCOL:
++ case BCAST_PROTOCOL:
++ return;
++ default:
++ return;
++ };
++}
++
+ /**
+ * tipc_node_xmit() is the general link level function for message sending
+ * @net: the applicable net namespace
+@@ -1439,6 +1544,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
+ struct tipc_link_entry *le = NULL;
+ struct tipc_node *n;
+ struct sk_buff_head xmitq;
++ bool node_up = false;
+ int bearer_id;
+ int rc;
+
+@@ -1456,6 +1562,17 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
+ }
+
+ tipc_node_read_lock(n);
++ node_up = node_is_up(n);
++ if (node_up && n->peer_net && check_net(n->peer_net)) {
++ /* xmit inner linux container */
++ tipc_lxc_xmit(n->peer_net, list);
++ if (likely(skb_queue_empty(list))) {
++ tipc_node_read_unlock(n);
++ tipc_node_put(n);
++ return 0;
++ }
++ }
++
+ bearer_id = n->active_links[selector & 1];
+ if (unlikely(bearer_id == INVALID_BEARER_ID)) {
+ tipc_node_read_unlock(n);
+@@ -2591,3 +2708,33 @@ int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
+
+ return i;
+ }
++
++void tipc_node_pre_cleanup_net(struct net *exit_net)
++{
++ struct tipc_node *n;
++ struct tipc_net *tn;
++ struct net *tmp;
++
++ rcu_read_lock();
++ for_each_net_rcu(tmp) {
++ if (tmp == exit_net)
++ continue;
++ tn = tipc_net(tmp);
++ if (!tn)
++ continue;
++ spin_lock_bh(&tn->node_list_lock);
++ list_for_each_entry_rcu(n, &tn->node_list, list) {
++ if (!n->peer_net)
++ continue;
++ if (n->peer_net != exit_net)
++ continue;
++ tipc_node_write_lock(n);
++ n->peer_net = NULL;
++ n->peer_hash_mix = 0;
++ tipc_node_write_unlock_fast(n);
++ break;
++ }
++ spin_unlock_bh(&tn->node_list_lock);
++ }
++ rcu_read_unlock();
++}
+diff --git a/net/tipc/node.h b/net/tipc/node.h
+index 291d0ecd4101..30563c4f35d5 100644
+--- a/net/tipc/node.h
++++ b/net/tipc/node.h
+@@ -75,7 +75,7 @@ u32 tipc_node_get_addr(struct tipc_node *node);
+ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr);
+ void tipc_node_check_dest(struct net *net, u32 onode, u8 *peer_id128,
+ struct tipc_bearer *bearer,
+- u16 capabilities, u32 signature,
++ u16 capabilities, u32 signature, u32 hash_mixes,
+ struct tipc_media_addr *maddr,
+ bool *respond, bool *dupl_addr);
+ void tipc_node_delete_links(struct net *net, int bearer_id);
+@@ -92,7 +92,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr);
+ void tipc_node_broadcast(struct net *net, struct sk_buff *skb);
+ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
+ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
+-int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel);
++int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected);
+ bool tipc_node_is_up(struct net *net, u32 addr);
+ u16 tipc_node_get_capabilities(struct net *net, u32 addr);
+ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
+@@ -107,4 +107,5 @@ int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info);
+ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb);
+ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
+ struct netlink_callback *cb);
++void tipc_node_pre_cleanup_net(struct net *exit_net);
+ #endif
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 58c4d61d603f..e1e148da538d 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -866,7 +866,7 @@ static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
+
+ /* Build message as chain of buffers */
+ __skb_queue_head_init(&pkts);
+- mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
++ mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
+ rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
+ if (unlikely(rc != dlen))
+ return rc;
+@@ -1407,7 +1407,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
+ }
+
+ __skb_queue_head_init(&pkts);
+- mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
++ mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
+ rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
+ if (unlikely(rc != dlen))
+ return rc;
+@@ -1547,7 +1547,7 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
+ tipc_set_sk_state(sk, TIPC_ESTABLISHED);
+ tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
+- tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
++ tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
+ tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
+ __skb_queue_purge(&sk->sk_write_queue);
+ if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
+--
+2.35.1
+
--- /dev/null
+From ea0d0476a71b0fc7a0f73607d305481612fc5a12 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Dec 2022 17:11:23 +0100
+Subject: x86/resctrl: Fix task CLOSID/RMID update race
+
+From: Peter Newman <peternewman@google.com>
+
+[ Upstream commit fe1f0714385fbcf76b0cbceb02b7277d842014fc ]
+
+When the user moves a running task to a new rdtgroup using the task's
+file interface or by deleting its rdtgroup, the resulting change in
+CLOSID/RMID must be immediately propagated to the PQR_ASSOC MSR on the
+task(s) CPUs.
+
+x86 allows reordering loads with prior stores, so if the task starts
+running between a task_curr() check that the CPU hoisted before the
+stores in the CLOSID/RMID update then it can start running with the old
+CLOSID/RMID until it is switched again because __rdtgroup_move_task()
+failed to determine that it needs to be interrupted to obtain the new
+CLOSID/RMID.
+
+Refer to the diagram below:
+
+CPU 0 CPU 1
+----- -----
+__rdtgroup_move_task():
+ curr <- t1->cpu->rq->curr
+ __schedule():
+ rq->curr <- t1
+ resctrl_sched_in():
+ t1->{closid,rmid} -> {1,1}
+ t1->{closid,rmid} <- {2,2}
+ if (curr == t1) // false
+ IPI(t1->cpu)
+
+A similar race impacts rdt_move_group_tasks(), which updates tasks in a
+deleted rdtgroup.
+
+In both cases, use smp_mb() to order the task_struct::{closid,rmid}
+stores before the loads in task_curr(). In particular, in the
+rdt_move_group_tasks() case, simply execute an smp_mb() on every
+iteration with a matching task.
+
+It is possible to use a single smp_mb() in rdt_move_group_tasks(), but
+this would require two passes and a means of remembering which
+task_structs were updated in the first loop. However, benchmarking
+results below showed too little performance impact in the simple
+approach to justify implementing the two-pass approach.
+
+Times below were collected using `perf stat` to measure the time to
+remove a group containing a 1600-task, parallel workload.
+
+CPU: Intel(R) Xeon(R) Platinum P-8136 CPU @ 2.00GHz (112 threads)
+
+ # mkdir /sys/fs/resctrl/test
+ # echo $$ > /sys/fs/resctrl/test/tasks
+ # perf bench sched messaging -g 40 -l 100000
+
+task-clock time ranges collected using:
+
+ # perf stat rmdir /sys/fs/resctrl/test
+
+Baseline: 1.54 - 1.60 ms
+smp_mb() every matching task: 1.57 - 1.67 ms
+
+ [ bp: Massage commit message. ]
+
+Fixes: ae28d1aae48a ("x86/resctrl: Use an IPI instead of task_work_add() to update PQR_ASSOC MSR")
+Fixes: 0efc89be9471 ("x86/intel_rdt: Update task closid immediately on CPU in rmdir and unmount")
+Signed-off-by: Peter Newman <peternewman@google.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
+Reviewed-by: Babu Moger <babu.moger@amd.com>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/20221220161123.432120-1-peternewman@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index 2c19f2ecfa03..8d6023e6ad9e 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -577,8 +577,10 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
+ /*
+ * Ensure the task's closid and rmid are written before determining if
+ * the task is current that will decide if it will be interrupted.
++ * This pairs with the full barrier between the rq->curr update and
++ * resctrl_sched_in() during context switch.
+ */
+- barrier();
++ smp_mb();
+
+ /*
+ * By now, the task's closid and rmid are set. If the task is current
+@@ -2178,6 +2180,14 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
+ t->closid = to->closid;
+ t->rmid = to->mon.rmid;
+
++ /*
++ * Order the closid/rmid stores above before the loads
++ * in task_curr(). This pairs with the full barrier
++ * between the rq->curr update and resctrl_sched_in()
++ * during context switch.
++ */
++ smp_mb();
++
+ /*
+ * If the task is on a CPU, set the CPU in the mask.
+ * The detection is inaccurate as tasks might move or
+--
+2.35.1
+
--- /dev/null
+From d5f6fea8fc19726797d5048e744fac65db2a7c90 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Dec 2020 14:31:20 -0800
+Subject: x86/resctrl: Use task_curr() instead of task_struct->on_cpu to
+ prevent unnecessary IPI
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+[ Upstream commit e0ad6dc8969f790f14bddcfd7ea284b7e5f88a16 ]
+
+James reported in [1] that there could be two tasks running on the same CPU
+with task_struct->on_cpu set. Using task_struct->on_cpu as a test if a task
+is running on a CPU may thus match the old task for a CPU while the
+scheduler is running and IPI it unnecessarily.
+
+task_curr() is the correct helper to use. While doing so move the #ifdef
+check of the CONFIG_SMP symbol to be a C conditional used to determine
+if this helper should be used to ensure the code is always checked for
+correctness by the compiler.
+
+[1] https://lore.kernel.org/lkml/a782d2f3-d2f6-795f-f4b1-9462205fd581@arm.com
+
+Reported-by: James Morse <james.morse@arm.com>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Link: https://lkml.kernel.org/r/e9e68ce1441a73401e08b641cc3b9a3cf13fe6d4.1608243147.git.reinette.chatre@intel.com
+Stable-dep-of: fe1f0714385f ("x86/resctrl: Fix task CLOSID/RMID update race")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 14 +++++---------
+ 1 file changed, 5 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index 28f786289fce..2c19f2ecfa03 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -2178,19 +2178,15 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
+ t->closid = to->closid;
+ t->rmid = to->mon.rmid;
+
+-#ifdef CONFIG_SMP
+ /*
+- * This is safe on x86 w/o barriers as the ordering
+- * of writing to task_cpu() and t->on_cpu is
+- * reverse to the reading here. The detection is
+- * inaccurate as tasks might move or schedule
+- * before the smp function call takes place. In
+- * such a case the function call is pointless, but
++ * If the task is on a CPU, set the CPU in the mask.
++ * The detection is inaccurate as tasks might move or
++ * schedule before the smp function call takes place.
++ * In such a case the function call is pointless, but
+ * there is no other side effect.
+ */
+- if (mask && t->on_cpu)
++ if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
+ cpumask_set_cpu(task_cpu(t), mask);
+-#endif
+ }
+ }
+ read_unlock(&tasklist_lock);
+--
+2.35.1
+