]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.10
authorSasha Levin <sashal@kernel.org>
Sun, 15 Jan 2023 23:58:57 +0000 (18:58 -0500)
committerSasha Levin <sashal@kernel.org>
Sun, 15 Jan 2023 23:58:57 +0000 (18:58 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
24 files changed:
queue-5.10/arm64-atomics-format-whitespace-consistently.patch [new file with mode: 0644]
queue-5.10/arm64-atomics-remove-ll-sc-trampolines.patch [new file with mode: 0644]
queue-5.10/arm64-cmpxchg_double-hazard-against-entire-exchange-.patch [new file with mode: 0644]
queue-5.10/asoc-wm8904-fix-wrong-outputs-volume-after-power-rea.patch [new file with mode: 0644]
queue-5.10/documentation-kvm-add-api-issues-section.patch [new file with mode: 0644]
queue-5.10/drm-virtio-fix-gem-handle-creation-uaf.patch [new file with mode: 0644]
queue-5.10/efi-fix-null-deref-in-init-error-path.patch [new file with mode: 0644]
queue-5.10/hvc-xen-lock-console-list-traversal.patch [new file with mode: 0644]
queue-5.10/iommu-mediatek-v1-add-error-handle-for-mtk_iommu_pro.patch [new file with mode: 0644]
queue-5.10/iommu-mediatek-v1-fix-an-error-handling-path-in-mtk_.patch [new file with mode: 0644]
queue-5.10/kvm-x86-do-not-return-host-topology-information-from.patch [new file with mode: 0644]
queue-5.10/mm-always-release-pages-to-the-buddy-allocator-in-me.patch [new file with mode: 0644]
queue-5.10/net-mlx5-fix-ptp-max-frequency-adjustment-range.patch [new file with mode: 0644]
queue-5.10/net-mlx5e-don-t-support-encap-rules-with-gbp-option.patch [new file with mode: 0644]
queue-5.10/net-sched-act_mpls-fix-warning-during-failed-attribu.patch [new file with mode: 0644]
queue-5.10/nfc-pn533-wait-for-out_urb-s-completion-in-pn533_usb.patch [new file with mode: 0644]
queue-5.10/octeontx2-af-fix-lmac-config-in-cgx_lmac_rx_tx_enabl.patch [new file with mode: 0644]
queue-5.10/octeontx2-af-map-nix-block-from-cgx-connection.patch [new file with mode: 0644]
queue-5.10/octeontx2-af-update-get-set-resource-count-functions.patch [new file with mode: 0644]
queue-5.10/regulator-da9211-use-irq-handler-when-ready.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/tipc-fix-unexpected-link-reset-due-to-discovery-mess.patch [new file with mode: 0644]
queue-5.10/x86-resctrl-fix-task-closid-rmid-update-race.patch [new file with mode: 0644]
queue-5.10/x86-resctrl-use-task_curr-instead-of-task_struct-on_.patch [new file with mode: 0644]

diff --git a/queue-5.10/arm64-atomics-format-whitespace-consistently.patch b/queue-5.10/arm64-atomics-format-whitespace-consistently.patch
new file mode 100644 (file)
index 0000000..a310d5c
--- /dev/null
@@ -0,0 +1,276 @@
+From 74b343de81020dac79032aefc926283bf5136487 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Dec 2021 15:14:06 +0000
+Subject: arm64: atomics: format whitespace consistently
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 8e6082e94aac6d0338883b5953631b662a5a9188 ]
+
+The code for the atomic ops is formatted inconsistently, and while this
+is not a functional problem it is rather distracting when working on
+them.
+
+Some have ops have consistent indentation, e.g.
+
+| #define ATOMIC_OP_ADD_RETURN(name, mb, cl...)                           \
+| static inline int __lse_atomic_add_return##name(int i, atomic_t *v)     \
+| {                                                                       \
+|         u32 tmp;                                                        \
+|                                                                         \
+|         asm volatile(                                                   \
+|         __LSE_PREAMBLE                                                  \
+|         "       ldadd" #mb "    %w[i], %w[tmp], %[v]\n"                 \
+|         "       add     %w[i], %w[i], %w[tmp]"                          \
+|         : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)        \
+|         : "r" (v)                                                       \
+|         : cl);                                                          \
+|                                                                         \
+|         return i;                                                       \
+| }
+
+While others have negative indentation for some lines, and/or have
+misaligned trailing backslashes, e.g.
+
+| static inline void __lse_atomic_##op(int i, atomic_t *v)                        \
+| {                                                                       \
+|         asm volatile(                                                   \
+|         __LSE_PREAMBLE                                                  \
+| "       " #asm_op "     %w[i], %[v]\n"                                  \
+|         : [i] "+r" (i), [v] "+Q" (v->counter)                           \
+|         : "r" (v));                                                     \
+| }
+
+This patch makes the indentation consistent and also aligns the trailing
+backslashes. This makes the code easier to read for those (like myself)
+who are easily distracted by these inconsistencies.
+
+This is intended as a cleanup.
+There should be no functional change as a result of this patch.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Boqun Feng <boqun.feng@gmail.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will@kernel.org>
+Acked-by: Will Deacon <will@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20211210151410.2782645-2-mark.rutland@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Stable-dep-of: 031af50045ea ("arm64: cmpxchg_double*: hazard against entire exchange variable")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/atomic_ll_sc.h | 86 +++++++++++++--------------
+ arch/arm64/include/asm/atomic_lse.h   | 14 ++---
+ 2 files changed, 50 insertions(+), 50 deletions(-)
+
+diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
+index 13869b76b58c..fe0db8d416fb 100644
+--- a/arch/arm64/include/asm/atomic_ll_sc.h
++++ b/arch/arm64/include/asm/atomic_ll_sc.h
+@@ -44,11 +44,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v)                                    \
+                                                                       \
+       asm volatile("// atomic_" #op "\n"                              \
+       __LL_SC_FALLBACK(                                               \
+-"     prfm    pstl1strm, %2\n"                                        \
+-"1:   ldxr    %w0, %2\n"                                              \
+-"     " #asm_op "     %w0, %w0, %w3\n"                                \
+-"     stxr    %w1, %w0, %2\n"                                         \
+-"     cbnz    %w1, 1b\n")                                             \
++      "       prfm    pstl1strm, %2\n"                                \
++      "1:     ldxr    %w0, %2\n"                                      \
++      "       " #asm_op "     %w0, %w0, %w3\n"                        \
++      "       stxr    %w1, %w0, %2\n"                                 \
++      "       cbnz    %w1, 1b\n")                                     \
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
+       : __stringify(constraint) "r" (i));                             \
+ }
+@@ -62,12 +62,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v)                     \
+                                                                       \
+       asm volatile("// atomic_" #op "_return" #name "\n"              \
+       __LL_SC_FALLBACK(                                               \
+-"     prfm    pstl1strm, %2\n"                                        \
+-"1:   ld" #acq "xr    %w0, %2\n"                                      \
+-"     " #asm_op "     %w0, %w0, %w3\n"                                \
+-"     st" #rel "xr    %w1, %w0, %2\n"                                 \
+-"     cbnz    %w1, 1b\n"                                              \
+-"     " #mb )                                                         \
++      "       prfm    pstl1strm, %2\n"                                \
++      "1:     ld" #acq "xr    %w0, %2\n"                              \
++      "       " #asm_op "     %w0, %w0, %w3\n"                        \
++      "       st" #rel "xr    %w1, %w0, %2\n"                         \
++      "       cbnz    %w1, 1b\n"                                      \
++      "       " #mb )                                                 \
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
+       : __stringify(constraint) "r" (i)                               \
+       : cl);                                                          \
+@@ -84,12 +84,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v)                        \
+                                                                       \
+       asm volatile("// atomic_fetch_" #op #name "\n"                  \
+       __LL_SC_FALLBACK(                                               \
+-"     prfm    pstl1strm, %3\n"                                        \
+-"1:   ld" #acq "xr    %w0, %3\n"                                      \
+-"     " #asm_op "     %w1, %w0, %w4\n"                                \
+-"     st" #rel "xr    %w2, %w1, %3\n"                                 \
+-"     cbnz    %w2, 1b\n"                                              \
+-"     " #mb )                                                         \
++      "       prfm    pstl1strm, %3\n"                                \
++      "1:     ld" #acq "xr    %w0, %3\n"                              \
++      "       " #asm_op "     %w1, %w0, %w4\n"                        \
++      "       st" #rel "xr    %w2, %w1, %3\n"                         \
++      "       cbnz    %w2, 1b\n"                                      \
++      "       " #mb )                                                 \
+       : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
+       : __stringify(constraint) "r" (i)                               \
+       : cl);                                                          \
+@@ -143,11 +143,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v)                              \
+                                                                       \
+       asm volatile("// atomic64_" #op "\n"                            \
+       __LL_SC_FALLBACK(                                               \
+-"     prfm    pstl1strm, %2\n"                                        \
+-"1:   ldxr    %0, %2\n"                                               \
+-"     " #asm_op "     %0, %0, %3\n"                                   \
+-"     stxr    %w1, %0, %2\n"                                          \
+-"     cbnz    %w1, 1b")                                               \
++      "       prfm    pstl1strm, %2\n"                                \
++      "1:     ldxr    %0, %2\n"                                       \
++      "       " #asm_op "     %0, %0, %3\n"                           \
++      "       stxr    %w1, %0, %2\n"                                  \
++      "       cbnz    %w1, 1b")                                       \
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
+       : __stringify(constraint) "r" (i));                             \
+ }
+@@ -161,12 +161,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v)               \
+                                                                       \
+       asm volatile("// atomic64_" #op "_return" #name "\n"            \
+       __LL_SC_FALLBACK(                                               \
+-"     prfm    pstl1strm, %2\n"                                        \
+-"1:   ld" #acq "xr    %0, %2\n"                                       \
+-"     " #asm_op "     %0, %0, %3\n"                                   \
+-"     st" #rel "xr    %w1, %0, %2\n"                                  \
+-"     cbnz    %w1, 1b\n"                                              \
+-"     " #mb )                                                         \
++      "       prfm    pstl1strm, %2\n"                                \
++      "1:     ld" #acq "xr    %0, %2\n"                               \
++      "       " #asm_op "     %0, %0, %3\n"                           \
++      "       st" #rel "xr    %w1, %0, %2\n"                          \
++      "       cbnz    %w1, 1b\n"                                      \
++      "       " #mb )                                                 \
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
+       : __stringify(constraint) "r" (i)                               \
+       : cl);                                                          \
+@@ -176,19 +176,19 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v)               \
+ #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
+ static inline long                                                    \
+-__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v)               \
++__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v)                       \
+ {                                                                     \
+       s64 result, val;                                                \
+       unsigned long tmp;                                              \
+                                                                       \
+       asm volatile("// atomic64_fetch_" #op #name "\n"                \
+       __LL_SC_FALLBACK(                                               \
+-"     prfm    pstl1strm, %3\n"                                        \
+-"1:   ld" #acq "xr    %0, %3\n"                                       \
+-"     " #asm_op "     %1, %0, %4\n"                                   \
+-"     st" #rel "xr    %w2, %1, %3\n"                                  \
+-"     cbnz    %w2, 1b\n"                                              \
+-"     " #mb )                                                         \
++      "       prfm    pstl1strm, %3\n"                                \
++      "1:     ld" #acq "xr    %0, %3\n"                               \
++      "       " #asm_op "     %1, %0, %4\n"                           \
++      "       st" #rel "xr    %w2, %1, %3\n"                          \
++      "       cbnz    %w2, 1b\n"                                      \
++      "       " #mb )                                                 \
+       : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
+       : __stringify(constraint) "r" (i)                               \
+       : cl);                                                          \
+@@ -241,14 +241,14 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
+       asm volatile("// atomic64_dec_if_positive\n"
+       __LL_SC_FALLBACK(
+-"     prfm    pstl1strm, %2\n"
+-"1:   ldxr    %0, %2\n"
+-"     subs    %0, %0, #1\n"
+-"     b.lt    2f\n"
+-"     stlxr   %w1, %0, %2\n"
+-"     cbnz    %w1, 1b\n"
+-"     dmb     ish\n"
+-"2:")
++      "       prfm    pstl1strm, %2\n"
++      "1:     ldxr    %0, %2\n"
++      "       subs    %0, %0, #1\n"
++      "       b.lt    2f\n"
++      "       stlxr   %w1, %0, %2\n"
++      "       cbnz    %w1, 1b\n"
++      "       dmb     ish\n"
++      "2:")
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+       :
+       : "cc", "memory");
+diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
+index da3280f639cd..ab661375835e 100644
+--- a/arch/arm64/include/asm/atomic_lse.h
++++ b/arch/arm64/include/asm/atomic_lse.h
+@@ -11,11 +11,11 @@
+ #define __ASM_ATOMIC_LSE_H
+ #define ATOMIC_OP(op, asm_op)                                         \
+-static inline void __lse_atomic_##op(int i, atomic_t *v)                      \
++static inline void __lse_atomic_##op(int i, atomic_t *v)              \
+ {                                                                     \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+-"     " #asm_op "     %w[i], %[v]\n"                                  \
++      "       " #asm_op "     %w[i], %[v]\n"                          \
+       : [i] "+r" (i), [v] "+Q" (v->counter)                           \
+       : "r" (v));                                                     \
+ }
+@@ -32,7 +32,7 @@ static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v)  \
+ {                                                                     \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+-"     " #asm_op #mb " %w[i], %w[i], %[v]"                             \
++      "       " #asm_op #mb " %w[i], %w[i], %[v]"                     \
+       : [i] "+r" (i), [v] "+Q" (v->counter)                           \
+       : "r" (v)                                                       \
+       : cl);                                                          \
+@@ -130,7 +130,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v)        \
+       "       add     %w[i], %w[i], %w[tmp]"                          \
+       : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)       \
+       : "r" (v)                                                       \
+-      : cl);                                                  \
++      : cl);                                                          \
+                                                                       \
+       return i;                                                       \
+ }
+@@ -168,7 +168,7 @@ static inline void __lse_atomic64_##op(s64 i, atomic64_t *v)               \
+ {                                                                     \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+-"     " #asm_op "     %[i], %[v]\n"                                   \
++      "       " #asm_op "     %[i], %[v]\n"                           \
+       : [i] "+r" (i), [v] "+Q" (v->counter)                           \
+       : "r" (v));                                                     \
+ }
+@@ -185,7 +185,7 @@ static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
+ {                                                                     \
+       asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
+-"     " #asm_op #mb " %[i], %[i], %[v]"                               \
++      "       " #asm_op #mb " %[i], %[i], %[v]"                       \
+       : [i] "+r" (i), [v] "+Q" (v->counter)                           \
+       : "r" (v)                                                       \
+       : cl);                                                          \
+@@ -272,7 +272,7 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
+ }
+ #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)                               \
+-static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)      \
++static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
+ {                                                                     \
+       unsigned long tmp;                                              \
+                                                                       \
+-- 
+2.35.1
+
diff --git a/queue-5.10/arm64-atomics-remove-ll-sc-trampolines.patch b/queue-5.10/arm64-atomics-remove-ll-sc-trampolines.patch
new file mode 100644 (file)
index 0000000..9f1475e
--- /dev/null
@@ -0,0 +1,274 @@
+From c1a22d8b6274e5eb17944761e26a17671072cc0d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Aug 2022 16:59:13 +0100
+Subject: arm64: atomics: remove LL/SC trampolines
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit b2c3ccbd0011bb3b51d0fec24cb3a5812b1ec8ea ]
+
+When CONFIG_ARM64_LSE_ATOMICS=y, each use of an LL/SC atomic results in
+a fragment of code being generated in a subsection without a clear
+association with its caller. A trampoline in the caller branches to the
+LL/SC atomic with with a direct branch, and the atomic directly branches
+back into its trampoline.
+
+This breaks backtracing, as any PC within the out-of-line fragment will
+be symbolized as an offset from the nearest prior symbol (which may not
+be the function using the atomic), and since the atomic returns with a
+direct branch, the caller's PC may be missing from the backtrace.
+
+For example, with secondary_start_kernel() hacked to contain
+atomic_inc(NULL), the resulting exception can be reported as being taken
+from cpus_are_stuck_in_kernel():
+
+| Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000
+| Mem abort info:
+|   ESR = 0x0000000096000004
+|   EC = 0x25: DABT (current EL), IL = 32 bits
+|   SET = 0, FnV = 0
+|   EA = 0, S1PTW = 0
+|   FSC = 0x04: level 0 translation fault
+| Data abort info:
+|   ISV = 0, ISS = 0x00000004
+|   CM = 0, WnR = 0
+| [0000000000000000] user address but active_mm is swapper
+| Internal error: Oops: 96000004 [#1] PREEMPT SMP
+| Modules linked in:
+| CPU: 1 PID: 0 Comm: swapper/1 Not tainted 5.19.0-11219-geb555cb5b794-dirty #3
+| Hardware name: linux,dummy-virt (DT)
+| pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+| pc : cpus_are_stuck_in_kernel+0xa4/0x120
+| lr : secondary_start_kernel+0x164/0x170
+| sp : ffff80000a4cbe90
+| x29: ffff80000a4cbe90 x28: 0000000000000000 x27: 0000000000000000
+| x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000
+| x23: 0000000000000000 x22: 0000000000000000 x21: 0000000000000000
+| x20: 0000000000000001 x19: 0000000000000001 x18: 0000000000000008
+| x17: 3030383832343030 x16: 3030303030307830 x15: ffff80000a4cbab0
+| x14: 0000000000000001 x13: 5d31666130663133 x12: 3478305b20313030
+| x11: 3030303030303078 x10: 3020726f73736563 x9 : 726f737365636f72
+| x8 : ffff800009ff2ef0 x7 : 0000000000000003 x6 : 0000000000000000
+| x5 : 0000000000000000 x4 : 0000000000000000 x3 : 0000000000000100
+| x2 : 0000000000000000 x1 : ffff0000029bd880 x0 : 0000000000000000
+| Call trace:
+|  cpus_are_stuck_in_kernel+0xa4/0x120
+|  __secondary_switched+0xb0/0xb4
+| Code: 35ffffa3 17fffc6c d53cd040 f9800011 (885f7c01)
+| ---[ end trace 0000000000000000 ]---
+
+This is confusing and hinders debugging, and will be problematic for
+CONFIG_LIVEPATCH as these cases cannot be unwound reliably.
+
+This is very similar to recent issues with out-of-line exception fixups,
+which were removed in commits:
+
+  35d67794b8828333 ("arm64: lib: __arch_clear_user(): fold fixups into body")
+  4012e0e22739eef9 ("arm64: lib: __arch_copy_from_user(): fold fixups into body")
+  139f9ab73d60cf76 ("arm64: lib: __arch_copy_to_user(): fold fixups into body")
+
+When the trampolines were introduced in commit:
+
+  addfc38672c73efd ("arm64: atomics: avoid out-of-line ll/sc atomics")
+
+The rationale was to improve icache performance by grouping the LL/SC
+atomics together. This has never been measured, and this theoretical
+benefit is outweighed by other factors:
+
+* As the subsections are collapsed into sections at object file
+  granularity, these are spread out throughout the kernel and can share
+  cachelines with unrelated code regardless.
+
+* GCC 12.1.0 has been observed to place the trampoline out-of-line in
+  specialised __ll_sc_*() functions, introducing more branching than was
+  intended.
+
+* Removing the trampolines has been observed to shrink a defconfig
+  kernel Image by 64KiB when building with GCC 12.1.0.
+
+This patch removes the LL/SC trampolines, meaning that the LL/SC atomics
+will be inlined into their callers (or placed in out-of line functions
+using regular BL/RET pairs). When CONFIG_ARM64_LSE_ATOMICS=y, the LL/SC
+atomics are always called in an unlikely branch, and will be placed in a
+cold portion of the function, so this should have minimal impact to the
+hot paths.
+
+Other than the improved backtracing, there should be no functional
+change as a result of this patch.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20220817155914.3975112-2-mark.rutland@arm.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Stable-dep-of: 031af50045ea ("arm64: cmpxchg_double*: hazard against entire exchange variable")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/atomic_ll_sc.h | 40 ++++++---------------------
+ 1 file changed, 9 insertions(+), 31 deletions(-)
+
+diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
+index fe0db8d416fb..906e2d8c254c 100644
+--- a/arch/arm64/include/asm/atomic_ll_sc.h
++++ b/arch/arm64/include/asm/atomic_ll_sc.h
+@@ -12,19 +12,6 @@
+ #include <linux/stringify.h>
+-#ifdef CONFIG_ARM64_LSE_ATOMICS
+-#define __LL_SC_FALLBACK(asm_ops)                                     \
+-"     b       3f\n"                                                   \
+-"     .subsection     1\n"                                            \
+-"3:\n"                                                                        \
+-asm_ops "\n"                                                          \
+-"     b       4f\n"                                                   \
+-"     .previous\n"                                                    \
+-"4:\n"
+-#else
+-#define __LL_SC_FALLBACK(asm_ops) asm_ops
+-#endif
+-
+ #ifndef CONFIG_CC_HAS_K_CONSTRAINT
+ #define K
+ #endif
+@@ -43,12 +30,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v)                                    \
+       int result;                                                     \
+                                                                       \
+       asm volatile("// atomic_" #op "\n"                              \
+-      __LL_SC_FALLBACK(                                               \
+       "       prfm    pstl1strm, %2\n"                                \
+       "1:     ldxr    %w0, %2\n"                                      \
+       "       " #asm_op "     %w0, %w0, %w3\n"                        \
+       "       stxr    %w1, %w0, %2\n"                                 \
+-      "       cbnz    %w1, 1b\n")                                     \
++      "       cbnz    %w1, 1b\n"                                      \
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
+       : __stringify(constraint) "r" (i));                             \
+ }
+@@ -61,13 +47,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v)                     \
+       int result;                                                     \
+                                                                       \
+       asm volatile("// atomic_" #op "_return" #name "\n"              \
+-      __LL_SC_FALLBACK(                                               \
+       "       prfm    pstl1strm, %2\n"                                \
+       "1:     ld" #acq "xr    %w0, %2\n"                              \
+       "       " #asm_op "     %w0, %w0, %w3\n"                        \
+       "       st" #rel "xr    %w1, %w0, %2\n"                         \
+       "       cbnz    %w1, 1b\n"                                      \
+-      "       " #mb )                                                 \
++      "       " #mb                                                   \
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
+       : __stringify(constraint) "r" (i)                               \
+       : cl);                                                          \
+@@ -83,13 +68,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v)                        \
+       int val, result;                                                \
+                                                                       \
+       asm volatile("// atomic_fetch_" #op #name "\n"                  \
+-      __LL_SC_FALLBACK(                                               \
+       "       prfm    pstl1strm, %3\n"                                \
+       "1:     ld" #acq "xr    %w0, %3\n"                              \
+       "       " #asm_op "     %w1, %w0, %w4\n"                        \
+       "       st" #rel "xr    %w2, %w1, %3\n"                         \
+       "       cbnz    %w2, 1b\n"                                      \
+-      "       " #mb )                                                 \
++      "       " #mb                                                   \
+       : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
+       : __stringify(constraint) "r" (i)                               \
+       : cl);                                                          \
+@@ -142,12 +126,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v)                              \
+       unsigned long tmp;                                              \
+                                                                       \
+       asm volatile("// atomic64_" #op "\n"                            \
+-      __LL_SC_FALLBACK(                                               \
+       "       prfm    pstl1strm, %2\n"                                \
+       "1:     ldxr    %0, %2\n"                                       \
+       "       " #asm_op "     %0, %0, %3\n"                           \
+       "       stxr    %w1, %0, %2\n"                                  \
+-      "       cbnz    %w1, 1b")                                       \
++      "       cbnz    %w1, 1b"                                        \
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
+       : __stringify(constraint) "r" (i));                             \
+ }
+@@ -160,13 +143,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v)               \
+       unsigned long tmp;                                              \
+                                                                       \
+       asm volatile("// atomic64_" #op "_return" #name "\n"            \
+-      __LL_SC_FALLBACK(                                               \
+       "       prfm    pstl1strm, %2\n"                                \
+       "1:     ld" #acq "xr    %0, %2\n"                               \
+       "       " #asm_op "     %0, %0, %3\n"                           \
+       "       st" #rel "xr    %w1, %0, %2\n"                          \
+       "       cbnz    %w1, 1b\n"                                      \
+-      "       " #mb )                                                 \
++      "       " #mb                                                   \
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
+       : __stringify(constraint) "r" (i)                               \
+       : cl);                                                          \
+@@ -182,13 +164,12 @@ __ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v)                  \
+       unsigned long tmp;                                              \
+                                                                       \
+       asm volatile("// atomic64_fetch_" #op #name "\n"                \
+-      __LL_SC_FALLBACK(                                               \
+       "       prfm    pstl1strm, %3\n"                                \
+       "1:     ld" #acq "xr    %0, %3\n"                               \
+       "       " #asm_op "     %1, %0, %4\n"                           \
+       "       st" #rel "xr    %w2, %1, %3\n"                          \
+       "       cbnz    %w2, 1b\n"                                      \
+-      "       " #mb )                                                 \
++      "       " #mb                                                   \
+       : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
+       : __stringify(constraint) "r" (i)                               \
+       : cl);                                                          \
+@@ -240,7 +221,6 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
+       unsigned long tmp;
+       asm volatile("// atomic64_dec_if_positive\n"
+-      __LL_SC_FALLBACK(
+       "       prfm    pstl1strm, %2\n"
+       "1:     ldxr    %0, %2\n"
+       "       subs    %0, %0, #1\n"
+@@ -248,7 +228,7 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
+       "       stlxr   %w1, %0, %2\n"
+       "       cbnz    %w1, 1b\n"
+       "       dmb     ish\n"
+-      "2:")
++      "2:"
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+       :
+       : "cc", "memory");
+@@ -274,7 +254,6 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr,                       \
+               old = (u##sz)old;                                       \
+                                                                       \
+       asm volatile(                                                   \
+-      __LL_SC_FALLBACK(                                               \
+       "       prfm    pstl1strm, %[v]\n"                              \
+       "1:     ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n"          \
+       "       eor     %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n"  \
+@@ -282,7 +261,7 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr,                       \
+       "       st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n"    \
+       "       cbnz    %w[tmp], 1b\n"                                  \
+       "       " #mb "\n"                                              \
+-      "2:")                                                           \
++      "2:"                                                            \
+       : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval),                   \
+         [v] "+Q" (*(u##sz *)ptr)                                      \
+       : [old] __stringify(constraint) "r" (old), [new] "r" (new)      \
+@@ -326,7 +305,6 @@ __ll_sc__cmpxchg_double##name(unsigned long old1,                  \
+       unsigned long tmp, ret;                                         \
+                                                                       \
+       asm volatile("// __cmpxchg_double" #name "\n"                   \
+-      __LL_SC_FALLBACK(                                               \
+       "       prfm    pstl1strm, %2\n"                                \
+       "1:     ldxp    %0, %1, %2\n"                                   \
+       "       eor     %0, %0, %3\n"                                   \
+@@ -336,7 +314,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1,                  \
+       "       st" #rel "xp    %w0, %5, %6, %2\n"                      \
+       "       cbnz    %w0, 1b\n"                                      \
+       "       " #mb "\n"                                              \
+-      "2:")                                                           \
++      "2:"                                                            \
+       : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr)        \
+       : "r" (old1), "r" (old2), "r" (new1), "r" (new2)                \
+       : cl);                                                          \
+-- 
+2.35.1
+
diff --git a/queue-5.10/arm64-cmpxchg_double-hazard-against-entire-exchange-.patch b/queue-5.10/arm64-cmpxchg_double-hazard-against-entire-exchange-.patch
new file mode 100644 (file)
index 0000000..87658e2
--- /dev/null
@@ -0,0 +1,185 @@
+From 2b068723e93a5d027b48365979503f22b330f680 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Jan 2023 15:16:26 +0000
+Subject: arm64: cmpxchg_double*: hazard against entire exchange variable
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+[ Upstream commit 031af50045ea97ed4386eb3751ca2c134d0fc911 ]
+
+The inline assembly for arm64's cmpxchg_double*() implementations use a
++Q constraint to hazard against other accesses to the memory location
+being exchanged. However, the pointer passed to the constraint is a
+pointer to unsigned long, and thus the hazard only applies to the first
+8 bytes of the location.
+
+GCC can take advantage of this, assuming that other portions of the
+location are unchanged, leading to a number of potential problems.
+
+This is similar to what we fixed back in commit:
+
+  fee960bed5e857eb ("arm64: xchg: hazard against entire exchange variable")
+
+... but we forgot to adjust cmpxchg_double*() similarly at the same
+time.
+
+The same problem applies, as demonstrated with the following test:
+
+| struct big {
+|         u64 lo, hi;
+| } __aligned(128);
+|
+| unsigned long foo(struct big *b)
+| {
+|         u64 hi_old, hi_new;
+|
+|         hi_old = b->hi;
+|         cmpxchg_double_local(&b->lo, &b->hi, 0x12, 0x34, 0x56, 0x78);
+|         hi_new = b->hi;
+|
+|         return hi_old ^ hi_new;
+| }
+
+... which GCC 12.1.0 compiles as:
+
+| 0000000000000000 <foo>:
+|    0:   d503233f        paciasp
+|    4:   aa0003e4        mov     x4, x0
+|    8:   1400000e        b       40 <foo+0x40>
+|    c:   d2800240        mov     x0, #0x12                       // #18
+|   10:   d2800681        mov     x1, #0x34                       // #52
+|   14:   aa0003e5        mov     x5, x0
+|   18:   aa0103e6        mov     x6, x1
+|   1c:   d2800ac2        mov     x2, #0x56                       // #86
+|   20:   d2800f03        mov     x3, #0x78                       // #120
+|   24:   48207c82        casp    x0, x1, x2, x3, [x4]
+|   28:   ca050000        eor     x0, x0, x5
+|   2c:   ca060021        eor     x1, x1, x6
+|   30:   aa010000        orr     x0, x0, x1
+|   34:   d2800000        mov     x0, #0x0                        // #0    <--- BANG
+|   38:   d50323bf        autiasp
+|   3c:   d65f03c0        ret
+|   40:   d2800240        mov     x0, #0x12                       // #18
+|   44:   d2800681        mov     x1, #0x34                       // #52
+|   48:   d2800ac2        mov     x2, #0x56                       // #86
+|   4c:   d2800f03        mov     x3, #0x78                       // #120
+|   50:   f9800091        prfm    pstl1strm, [x4]
+|   54:   c87f1885        ldxp    x5, x6, [x4]
+|   58:   ca0000a5        eor     x5, x5, x0
+|   5c:   ca0100c6        eor     x6, x6, x1
+|   60:   aa0600a6        orr     x6, x5, x6
+|   64:   b5000066        cbnz    x6, 70 <foo+0x70>
+|   68:   c8250c82        stxp    w5, x2, x3, [x4]
+|   6c:   35ffff45        cbnz    w5, 54 <foo+0x54>
+|   70:   d2800000        mov     x0, #0x0                        // #0     <--- BANG
+|   74:   d50323bf        autiasp
+|   78:   d65f03c0        ret
+
+Notice that at the lines with "BANG" comments, GCC has assumed that the
+higher 8 bytes are unchanged by the cmpxchg_double() call, and that
+`hi_old ^ hi_new` can be reduced to a constant zero, for both LSE and
+LL/SC versions of cmpxchg_double().
+
+This patch fixes the issue by passing a pointer to __uint128_t into the
++Q constraint, ensuring that the compiler hazards against the entire 16
+bytes being modified.
+
+With this change, GCC 12.1.0 compiles the above test as:
+
+| 0000000000000000 <foo>:
+|    0:   f9400407        ldr     x7, [x0, #8]
+|    4:   d503233f        paciasp
+|    8:   aa0003e4        mov     x4, x0
+|    c:   1400000f        b       48 <foo+0x48>
+|   10:   d2800240        mov     x0, #0x12                       // #18
+|   14:   d2800681        mov     x1, #0x34                       // #52
+|   18:   aa0003e5        mov     x5, x0
+|   1c:   aa0103e6        mov     x6, x1
+|   20:   d2800ac2        mov     x2, #0x56                       // #86
+|   24:   d2800f03        mov     x3, #0x78                       // #120
+|   28:   48207c82        casp    x0, x1, x2, x3, [x4]
+|   2c:   ca050000        eor     x0, x0, x5
+|   30:   ca060021        eor     x1, x1, x6
+|   34:   aa010000        orr     x0, x0, x1
+|   38:   f9400480        ldr     x0, [x4, #8]
+|   3c:   d50323bf        autiasp
+|   40:   ca0000e0        eor     x0, x7, x0
+|   44:   d65f03c0        ret
+|   48:   d2800240        mov     x0, #0x12                       // #18
+|   4c:   d2800681        mov     x1, #0x34                       // #52
+|   50:   d2800ac2        mov     x2, #0x56                       // #86
+|   54:   d2800f03        mov     x3, #0x78                       // #120
+|   58:   f9800091        prfm    pstl1strm, [x4]
+|   5c:   c87f1885        ldxp    x5, x6, [x4]
+|   60:   ca0000a5        eor     x5, x5, x0
+|   64:   ca0100c6        eor     x6, x6, x1
+|   68:   aa0600a6        orr     x6, x5, x6
+|   6c:   b5000066        cbnz    x6, 78 <foo+0x78>
+|   70:   c8250c82        stxp    w5, x2, x3, [x4]
+|   74:   35ffff45        cbnz    w5, 5c <foo+0x5c>
+|   78:   f9400480        ldr     x0, [x4, #8]
+|   7c:   d50323bf        autiasp
+|   80:   ca0000e0        eor     x0, x7, x0
+|   84:   d65f03c0        ret
+
+... sampling the high 8 bytes before and after the cmpxchg, and
+performing an EOR, as we'd expect.
+
+For backporting, I've tested this atop linux-4.9.y with GCC 5.5.0. Note
+that linux-4.9.y is oldest currently supported stable release, and
+mandates GCC 5.1+. Unfortunately I couldn't get a GCC 5.1 binary to run
+on my machines due to library incompatibilities.
+
+I've also used a standalone test to check that we can use a __uint128_t
+pointer in a +Q constraint at least as far back as GCC 4.8.5 and LLVM
+3.9.1.
+
+Fixes: 5284e1b4bc8a ("arm64: xchg: Implement cmpxchg_double")
+Fixes: e9a4b795652f ("arm64: cmpxchg_dbl: patch in lse instructions when supported by the CPU")
+Reported-by: Boqun Feng <boqun.feng@gmail.com>
+Link: https://lore.kernel.org/lkml/Y6DEfQXymYVgL3oJ@boqun-archlinux/
+Reported-by: Peter Zijlstra <peterz@infradead.org>
+Link: https://lore.kernel.org/lkml/Y6GXoO4qmH9OIZ5Q@hirez.programming.kicks-ass.net/
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: stable@vger.kernel.org
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Steve Capper <steve.capper@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Link: https://lore.kernel.org/r/20230104151626.3262137-1-mark.rutland@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/atomic_ll_sc.h | 2 +-
+ arch/arm64/include/asm/atomic_lse.h   | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
+index 906e2d8c254c..abd302e521c0 100644
+--- a/arch/arm64/include/asm/atomic_ll_sc.h
++++ b/arch/arm64/include/asm/atomic_ll_sc.h
+@@ -315,7 +315,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1,                  \
+       "       cbnz    %w0, 1b\n"                                      \
+       "       " #mb "\n"                                              \
+       "2:"                                                            \
+-      : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr)        \
++      : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr)          \
+       : "r" (old1), "r" (old2), "r" (new1), "r" (new2)                \
+       : cl);                                                          \
+                                                                       \
+diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
+index ab661375835e..28e96118c1e5 100644
+--- a/arch/arm64/include/asm/atomic_lse.h
++++ b/arch/arm64/include/asm/atomic_lse.h
+@@ -403,7 +403,7 @@ __lse__cmpxchg_double##name(unsigned long old1,                            \
+       "       eor     %[old2], %[old2], %[oldval2]\n"                 \
+       "       orr     %[old1], %[old1], %[old2]"                      \
+       : [old1] "+&r" (x0), [old2] "+&r" (x1),                         \
+-        [v] "+Q" (*(unsigned long *)ptr)                              \
++        [v] "+Q" (*(__uint128_t *)ptr)                                \
+       : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),             \
+         [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)              \
+       : cl);                                                          \
+-- 
+2.35.1
+
diff --git a/queue-5.10/asoc-wm8904-fix-wrong-outputs-volume-after-power-rea.patch b/queue-5.10/asoc-wm8904-fix-wrong-outputs-volume-after-power-rea.patch
new file mode 100644 (file)
index 0000000..86d602c
--- /dev/null
@@ -0,0 +1,69 @@
+From 47b28a2e433b59ab3e66359daf1112c28e38db6e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Dec 2022 09:02:47 +0100
+Subject: ASoC: wm8904: fix wrong outputs volume after power reactivation
+
+From: Emanuele Ghidoli <emanuele.ghidoli@toradex.com>
+
+[ Upstream commit 472a6309c6467af89dbf660a8310369cc9cb041f ]
+
+Restore volume after charge pump and PGA activation to ensure
+that volume settings are correctly applied when re-enabling codec
+from SND_SOC_BIAS_OFF state.
+CLASS_W, CHARGE_PUMP and POWER_MANAGEMENT_2 register configuration
+affect how the volume register are applied and must be configured first.
+
+Fixes: a91eb199e4dc ("ASoC: Initial WM8904 CODEC driver")
+Link: https://lore.kernel.org/all/c7864c35-738c-a867-a6a6-ddf9f98df7e7@gmail.com/
+Signed-off-by: Emanuele Ghidoli <emanuele.ghidoli@toradex.com>
+Signed-off-by: Francesco Dolcini <francesco.dolcini@toradex.com>
+Acked-by: Charles Keepax <ckeepax@opensource.cirrus.com>
+Link: https://lore.kernel.org/r/20221223080247.7258-1-francesco@dolcini.it
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/wm8904.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
+index 1c360bae5652..cc96c9bdff41 100644
+--- a/sound/soc/codecs/wm8904.c
++++ b/sound/soc/codecs/wm8904.c
+@@ -697,6 +697,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
+       int dcs_mask;
+       int dcs_l, dcs_r;
+       int dcs_l_reg, dcs_r_reg;
++      int an_out_reg;
+       int timeout;
+       int pwr_reg;
+@@ -712,6 +713,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
+               dcs_mask = WM8904_DCS_ENA_CHAN_0 | WM8904_DCS_ENA_CHAN_1;
+               dcs_r_reg = WM8904_DC_SERVO_8;
+               dcs_l_reg = WM8904_DC_SERVO_9;
++              an_out_reg = WM8904_ANALOGUE_OUT1_LEFT;
+               dcs_l = 0;
+               dcs_r = 1;
+               break;
+@@ -720,6 +722,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
+               dcs_mask = WM8904_DCS_ENA_CHAN_2 | WM8904_DCS_ENA_CHAN_3;
+               dcs_r_reg = WM8904_DC_SERVO_6;
+               dcs_l_reg = WM8904_DC_SERVO_7;
++              an_out_reg = WM8904_ANALOGUE_OUT2_LEFT;
+               dcs_l = 2;
+               dcs_r = 3;
+               break;
+@@ -792,6 +795,10 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
+               snd_soc_component_update_bits(component, reg,
+                                   WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP,
+                                   WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP);
++
++              /* Update volume, requires PGA to be powered */
++              val = snd_soc_component_read(component, an_out_reg);
++              snd_soc_component_write(component, an_out_reg, val);
+               break;
+       case SND_SOC_DAPM_POST_PMU:
+-- 
+2.35.1
+
diff --git a/queue-5.10/documentation-kvm-add-api-issues-section.patch b/queue-5.10/documentation-kvm-add-api-issues-section.patch
new file mode 100644 (file)
index 0000000..82ab582
--- /dev/null
@@ -0,0 +1,79 @@
+From 047cdac6c83951c6a25e64b37d0ee1d19a8e96d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Mar 2022 12:07:12 +0100
+Subject: Documentation: KVM: add API issues section
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+[ Upstream commit cde363ab7ca7aea7a853851cd6a6745a9e1aaf5e ]
+
+Add a section to document all the different ways in which the KVM API sucks.
+
+I am sure there are way more, give people a place to vent so that userspace
+authors are aware.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Message-Id: <20220322110712.222449-4-pbonzini@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/virt/kvm/api.rst | 46 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 46 insertions(+)
+
+diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
+index cd8a58556804..d807994360d4 100644
+--- a/Documentation/virt/kvm/api.rst
++++ b/Documentation/virt/kvm/api.rst
+@@ -6398,3 +6398,49 @@ When enabled, KVM will disable paravirtual features provided to the
+ guest according to the bits in the KVM_CPUID_FEATURES CPUID leaf
+ (0x40000001). Otherwise, a guest may use the paravirtual features
+ regardless of what has actually been exposed through the CPUID leaf.
++
++9. Known KVM API problems
++=========================
++
++In some cases, KVM's API has some inconsistencies or common pitfalls
++that userspace need to be aware of.  This section details some of
++these issues.
++
++Most of them are architecture specific, so the section is split by
++architecture.
++
++9.1. x86
++--------
++
++``KVM_GET_SUPPORTED_CPUID`` issues
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++In general, ``KVM_GET_SUPPORTED_CPUID`` is designed so that it is possible
++to take its result and pass it directly to ``KVM_SET_CPUID2``.  This section
++documents some cases in which that requires some care.
++
++Local APIC features
++~~~~~~~~~~~~~~~~~~~
++
++CPU[EAX=1]:ECX[21] (X2APIC) is reported by ``KVM_GET_SUPPORTED_CPUID``,
++but it can only be enabled if ``KVM_CREATE_IRQCHIP`` or
++``KVM_ENABLE_CAP(KVM_CAP_IRQCHIP_SPLIT)`` are used to enable in-kernel emulation of
++the local APIC.
++
++The same is true for the ``KVM_FEATURE_PV_UNHALT`` paravirtualized feature.
++
++CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``.
++It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel
++has enabled in-kernel emulation of the local APIC.
++
++Obsolete ioctls and capabilities
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++KVM_CAP_DISABLE_QUIRKS does not let userspace know which quirks are actually
++available.  Use ``KVM_CHECK_EXTENSION(KVM_CAP_DISABLE_QUIRKS2)`` instead if
++available.
++
++Ordering of KVM_GET_*/KVM_SET_* ioctls
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++TBD
+-- 
+2.35.1
+
diff --git a/queue-5.10/drm-virtio-fix-gem-handle-creation-uaf.patch b/queue-5.10/drm-virtio-fix-gem-handle-creation-uaf.patch
new file mode 100644 (file)
index 0000000..49ffd16
--- /dev/null
@@ -0,0 +1,53 @@
+From a31f24564f926552a0e0443c085c5e3f74fbe047 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 16 Dec 2022 15:33:55 -0800
+Subject: drm/virtio: Fix GEM handle creation UAF
+
+From: Rob Clark <robdclark@chromium.org>
+
+[ Upstream commit 52531258318ed59a2dc5a43df2eaf0eb1d65438e ]
+
+Userspace can guess the handle value and try to race GEM object creation
+with handle close, resulting in a use-after-free if we dereference the
+object after dropping the handle's reference.  For that reason, dropping
+the handle's reference must be done *after* we are done dereferencing
+the object.
+
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
+Fixes: 62fb7a5e1096 ("virtio-gpu: add 3d/virgl support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20221216233355.542197-2-robdclark@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/virtio/virtgpu_ioctl.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+index 33b8ebab178a..36efa273155d 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+@@ -279,10 +279,18 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
+               drm_gem_object_release(obj);
+               return ret;
+       }
+-      drm_gem_object_put(obj);
+       rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
+       rc->bo_handle = handle;
++
++      /*
++       * The handle owns the reference now.  But we must drop our
++       * remaining reference *after* we no longer need to dereference
++       * the obj.  Otherwise userspace could guess the handle and
++       * race closing it from another thread.
++       */
++      drm_gem_object_put(obj);
++
+       return 0;
+ }
+-- 
+2.35.1
+
diff --git a/queue-5.10/efi-fix-null-deref-in-init-error-path.patch b/queue-5.10/efi-fix-null-deref-in-init-error-path.patch
new file mode 100644 (file)
index 0000000..bc1f3dc
--- /dev/null
@@ -0,0 +1,56 @@
+From 3420a0b580202322fc84cd30c94c70b78d85fd85 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 19 Dec 2022 10:10:04 +0100
+Subject: efi: fix NULL-deref in init error path
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+[ Upstream commit 703c13fe3c9af557d312f5895ed6a5fda2711104 ]
+
+In cases where runtime services are not supported or have been disabled,
+the runtime services workqueue will never have been allocated.
+
+Do not try to destroy the workqueue unconditionally in the unlikely
+event that EFI initialisation fails to avoid dereferencing a NULL
+pointer.
+
+Fixes: 98086df8b70c ("efi: add missed destroy_workqueue when efisubsys_init fails")
+Cc: stable@vger.kernel.org
+Cc: Li Heng <liheng40@huawei.com>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/efi/efi.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index ba03f5a4b30c..a2765d668856 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -385,8 +385,8 @@ static int __init efisubsys_init(void)
+       efi_kobj = kobject_create_and_add("efi", firmware_kobj);
+       if (!efi_kobj) {
+               pr_err("efi: Firmware registration failed.\n");
+-              destroy_workqueue(efi_rts_wq);
+-              return -ENOMEM;
++              error = -ENOMEM;
++              goto err_destroy_wq;
+       }
+       if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
+@@ -429,7 +429,10 @@ static int __init efisubsys_init(void)
+               generic_ops_unregister();
+ err_put:
+       kobject_put(efi_kobj);
+-      destroy_workqueue(efi_rts_wq);
++err_destroy_wq:
++      if (efi_rts_wq)
++              destroy_workqueue(efi_rts_wq);
++
+       return error;
+ }
+-- 
+2.35.1
+
diff --git a/queue-5.10/hvc-xen-lock-console-list-traversal.patch b/queue-5.10/hvc-xen-lock-console-list-traversal.patch
new file mode 100644 (file)
index 0000000..05cf9ae
--- /dev/null
@@ -0,0 +1,186 @@
+From e12fbe7157d16fef94929665275f59b9f6bcbe9d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Nov 2022 17:36:02 +0100
+Subject: hvc/xen: lock console list traversal
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Roger Pau Monne <roger.pau@citrix.com>
+
+[ Upstream commit c0dccad87cf68fc6012aec7567e354353097ec1a ]
+
+The currently lockless access to the xen console list in
+vtermno_to_xencons() is incorrect, as additions and removals from the
+list can happen anytime, and as such the traversal of the list to get
+the private console data for a given termno needs to happen with the
+lock held.  Note users that modify the list already do so with the
+lock taken.
+
+Adjust current lock takers to use the _irq{save,restore} helpers,
+since the context in which vtermno_to_xencons() is called can have
+interrupts disabled.  Use the _irq{save,restore} set of helpers to
+switch the current callers to disable interrupts in the locked region.
+I haven't checked if existing users could instead use the _irq
+variant, as I think it's safer to use _irq{save,restore} upfront.
+
+While there switch from using list_for_each_entry_safe to
+list_for_each_entry: the current entry cursor won't be removed as
+part of the code in the loop body, so using the _safe variant is
+pointless.
+
+Fixes: 02e19f9c7cac ('hvc_xen: implement multiconsole support')
+Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
+Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
+Link: https://lore.kernel.org/r/20221130163611.14686-1-roger.pau@citrix.com
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/hvc/hvc_xen.c | 46 ++++++++++++++++++++++++---------------
+ 1 file changed, 29 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index 7948660e042f..6f387a4fd96a 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -52,17 +52,22 @@ static DEFINE_SPINLOCK(xencons_lock);
+ static struct xencons_info *vtermno_to_xencons(int vtermno)
+ {
+-      struct xencons_info *entry, *n, *ret = NULL;
++      struct xencons_info *entry, *ret = NULL;
++      unsigned long flags;
+-      if (list_empty(&xenconsoles))
+-                      return NULL;
++      spin_lock_irqsave(&xencons_lock, flags);
++      if (list_empty(&xenconsoles)) {
++              spin_unlock_irqrestore(&xencons_lock, flags);
++              return NULL;
++      }
+-      list_for_each_entry_safe(entry, n, &xenconsoles, list) {
++      list_for_each_entry(entry, &xenconsoles, list) {
+               if (entry->vtermno == vtermno) {
+                       ret  = entry;
+                       break;
+               }
+       }
++      spin_unlock_irqrestore(&xencons_lock, flags);
+       return ret;
+ }
+@@ -223,7 +228,7 @@ static int xen_hvm_console_init(void)
+ {
+       int r;
+       uint64_t v = 0;
+-      unsigned long gfn;
++      unsigned long gfn, flags;
+       struct xencons_info *info;
+       if (!xen_hvm_domain())
+@@ -258,9 +263,9 @@ static int xen_hvm_console_init(void)
+               goto err;
+       info->vtermno = HVC_COOKIE;
+-      spin_lock(&xencons_lock);
++      spin_lock_irqsave(&xencons_lock, flags);
+       list_add_tail(&info->list, &xenconsoles);
+-      spin_unlock(&xencons_lock);
++      spin_unlock_irqrestore(&xencons_lock, flags);
+       return 0;
+ err:
+@@ -283,6 +288,7 @@ static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
+ static int xen_pv_console_init(void)
+ {
+       struct xencons_info *info;
++      unsigned long flags;
+       if (!xen_pv_domain())
+               return -ENODEV;
+@@ -299,9 +305,9 @@ static int xen_pv_console_init(void)
+               /* already configured */
+               return 0;
+       }
+-      spin_lock(&xencons_lock);
++      spin_lock_irqsave(&xencons_lock, flags);
+       xencons_info_pv_init(info, HVC_COOKIE);
+-      spin_unlock(&xencons_lock);
++      spin_unlock_irqrestore(&xencons_lock, flags);
+       return 0;
+ }
+@@ -309,6 +315,7 @@ static int xen_pv_console_init(void)
+ static int xen_initial_domain_console_init(void)
+ {
+       struct xencons_info *info;
++      unsigned long flags;
+       if (!xen_initial_domain())
+               return -ENODEV;
+@@ -323,9 +330,9 @@ static int xen_initial_domain_console_init(void)
+       info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
+       info->vtermno = HVC_COOKIE;
+-      spin_lock(&xencons_lock);
++      spin_lock_irqsave(&xencons_lock, flags);
+       list_add_tail(&info->list, &xenconsoles);
+-      spin_unlock(&xencons_lock);
++      spin_unlock_irqrestore(&xencons_lock, flags);
+       return 0;
+ }
+@@ -380,10 +387,12 @@ static void xencons_free(struct xencons_info *info)
+ static int xen_console_remove(struct xencons_info *info)
+ {
++      unsigned long flags;
++
+       xencons_disconnect_backend(info);
+-      spin_lock(&xencons_lock);
++      spin_lock_irqsave(&xencons_lock, flags);
+       list_del(&info->list);
+-      spin_unlock(&xencons_lock);
++      spin_unlock_irqrestore(&xencons_lock, flags);
+       if (info->xbdev != NULL)
+               xencons_free(info);
+       else {
+@@ -464,6 +473,7 @@ static int xencons_probe(struct xenbus_device *dev,
+ {
+       int ret, devid;
+       struct xencons_info *info;
++      unsigned long flags;
+       devid = dev->nodename[strlen(dev->nodename) - 1] - '0';
+       if (devid == 0)
+@@ -482,9 +492,9 @@ static int xencons_probe(struct xenbus_device *dev,
+       ret = xencons_connect_backend(dev, info);
+       if (ret < 0)
+               goto error;
+-      spin_lock(&xencons_lock);
++      spin_lock_irqsave(&xencons_lock, flags);
+       list_add_tail(&info->list, &xenconsoles);
+-      spin_unlock(&xencons_lock);
++      spin_unlock_irqrestore(&xencons_lock, flags);
+       return 0;
+@@ -583,10 +593,12 @@ static int __init xen_hvc_init(void)
+       info->hvc = hvc_alloc(HVC_COOKIE, info->irq, ops, 256);
+       if (IS_ERR(info->hvc)) {
++              unsigned long flags;
++
+               r = PTR_ERR(info->hvc);
+-              spin_lock(&xencons_lock);
++              spin_lock_irqsave(&xencons_lock, flags);
+               list_del(&info->list);
+-              spin_unlock(&xencons_lock);
++              spin_unlock_irqrestore(&xencons_lock, flags);
+               if (info->irq)
+                       unbind_from_irqhandler(info->irq, NULL);
+               kfree(info);
+-- 
+2.35.1
+
diff --git a/queue-5.10/iommu-mediatek-v1-add-error-handle-for-mtk_iommu_pro.patch b/queue-5.10/iommu-mediatek-v1-add-error-handle-for-mtk_iommu_pro.patch
new file mode 100644 (file)
index 0000000..3003ab8
--- /dev/null
@@ -0,0 +1,58 @@
+From d31f627cd9477e096e2e81045343bb83b5fc31eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Apr 2021 14:48:43 +0800
+Subject: iommu/mediatek-v1: Add error handle for mtk_iommu_probe
+
+From: Yong Wu <yong.wu@mediatek.com>
+
+[ Upstream commit ac304c070c54413efabf29f9e73c54576d329774 ]
+
+In the original code, we lack the error handle. This patch adds them.
+
+Signed-off-by: Yong Wu <yong.wu@mediatek.com>
+Link: https://lore.kernel.org/r/20210412064843.11614-2-yong.wu@mediatek.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Stable-dep-of: 142e821f68cf ("iommu/mediatek-v1: Fix an error handling path in mtk_iommu_v1_probe()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/mtk_iommu_v1.c | 22 ++++++++++++++++++----
+ 1 file changed, 18 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
+index 82ddfe9170d4..4ed8bc755f5c 100644
+--- a/drivers/iommu/mtk_iommu_v1.c
++++ b/drivers/iommu/mtk_iommu_v1.c
+@@ -624,12 +624,26 @@ static int mtk_iommu_probe(struct platform_device *pdev)
+       ret = iommu_device_register(&data->iommu);
+       if (ret)
+-              return ret;
++              goto out_sysfs_remove;
+-      if (!iommu_present(&platform_bus_type))
+-              bus_set_iommu(&platform_bus_type,  &mtk_iommu_ops);
++      if (!iommu_present(&platform_bus_type)) {
++              ret = bus_set_iommu(&platform_bus_type,  &mtk_iommu_ops);
++              if (ret)
++                      goto out_dev_unreg;
++      }
+-      return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
++      ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
++      if (ret)
++              goto out_bus_set_null;
++      return ret;
++
++out_bus_set_null:
++      bus_set_iommu(&platform_bus_type, NULL);
++out_dev_unreg:
++      iommu_device_unregister(&data->iommu);
++out_sysfs_remove:
++      iommu_device_sysfs_remove(&data->iommu);
++      return ret;
+ }
+ static int mtk_iommu_remove(struct platform_device *pdev)
+-- 
+2.35.1
+
diff --git a/queue-5.10/iommu-mediatek-v1-fix-an-error-handling-path-in-mtk_.patch b/queue-5.10/iommu-mediatek-v1-fix-an-error-handling-path-in-mtk_.patch
new file mode 100644 (file)
index 0000000..e9c5823
--- /dev/null
@@ -0,0 +1,52 @@
+From 7f0ac7476321abca3ec318aa68b2bbe4471ae174 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 19 Dec 2022 19:06:22 +0100
+Subject: iommu/mediatek-v1: Fix an error handling path in mtk_iommu_v1_probe()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 142e821f68cf5da79ce722cb9c1323afae30e185 ]
+
+A clk, prepared and enabled in mtk_iommu_v1_hw_init(), is not released in
+the error handling path of mtk_iommu_v1_probe().
+
+Add the corresponding clk_disable_unprepare(), as already done in the
+remove function.
+
+Fixes: b17336c55d89 ("iommu/mediatek: add support for mtk iommu generation one HW")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Reviewed-by: Yong Wu <yong.wu@mediatek.com>
+Reviewed-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Reviewed-by: Matthias Brugger <matthias.bgg@gmail.com>
+Link: https://lore.kernel.org/r/593e7b7d97c6e064b29716b091a9d4fd122241fb.1671473163.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/mtk_iommu_v1.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
+index 4ed8bc755f5c..2abbdd71d8d9 100644
+--- a/drivers/iommu/mtk_iommu_v1.c
++++ b/drivers/iommu/mtk_iommu_v1.c
+@@ -618,7 +618,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
+       ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
+                                    dev_name(&pdev->dev));
+       if (ret)
+-              return ret;
++              goto out_clk_unprepare;
+       iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
+@@ -643,6 +643,8 @@ static int mtk_iommu_probe(struct platform_device *pdev)
+       iommu_device_unregister(&data->iommu);
+ out_sysfs_remove:
+       iommu_device_sysfs_remove(&data->iommu);
++out_clk_unprepare:
++      clk_disable_unprepare(data->bclk);
+       return ret;
+ }
+-- 
+2.35.1
+
diff --git a/queue-5.10/kvm-x86-do-not-return-host-topology-information-from.patch b/queue-5.10/kvm-x86-do-not-return-host-topology-information-from.patch
new file mode 100644 (file)
index 0000000..128c6be
--- /dev/null
@@ -0,0 +1,123 @@
+From 15e39add84b9a5e1f10cc3629d1a0406313e2208 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 22 Oct 2022 04:17:53 -0400
+Subject: KVM: x86: Do not return host topology information from
+ KVM_GET_SUPPORTED_CPUID
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+[ Upstream commit 45e966fcca03ecdcccac7cb236e16eea38cc18af ]
+
+Passing the host topology to the guest is almost certainly wrong
+and will confuse the scheduler.  In addition, several fields of
+these CPUID leaves vary on each processor; it is simply impossible to
+return the right values from KVM_GET_SUPPORTED_CPUID in such a way that
+they can be passed to KVM_SET_CPUID2.
+
+The values that will most likely prevent confusion are all zeroes.
+Userspace will have to override it anyway if it wishes to present a
+specific topology to the guest.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/virt/kvm/api.rst | 14 ++++++++++++++
+ arch/x86/kvm/cpuid.c           | 32 ++++++++++++++++----------------
+ 2 files changed, 30 insertions(+), 16 deletions(-)
+
+diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
+index d807994360d4..2b4b64797191 100644
+--- a/Documentation/virt/kvm/api.rst
++++ b/Documentation/virt/kvm/api.rst
+@@ -6433,6 +6433,20 @@ CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``
+ It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel
+ has enabled in-kernel emulation of the local APIC.
++CPU topology
++~~~~~~~~~~~~
++
++Several CPUID values include topology information for the host CPU:
++0x0b and 0x1f for Intel systems, 0x8000001e for AMD systems.  Different
++versions of KVM return different values for this information and userspace
++should not rely on it.  Currently they return all zeroes.
++
++If userspace wishes to set up a guest topology, it should be careful that
++the values of these three leaves differ for each CPU.  In particular,
++the APIC ID is found in EDX for all subleaves of 0x0b and 0x1f, and in EAX
++for 0x8000001e; the latter also encodes the core id and node id in bits
++7:0 of EBX and ECX respectively.
++
+ Obsolete ioctls and capabilities
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 06a776fdb90c..de4b171cb76b 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -511,16 +511,22 @@ struct kvm_cpuid_array {
+       int nent;
+ };
++static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array)
++{
++      if (array->nent >= array->maxnent)
++              return NULL;
++
++      return &array->entries[array->nent++];
++}
++
+ static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
+                                             u32 function, u32 index)
+ {
+-      struct kvm_cpuid_entry2 *entry;
++      struct kvm_cpuid_entry2 *entry = get_next_cpuid(array);
+-      if (array->nent >= array->maxnent)
++      if (!entry)
+               return NULL;
+-      entry = &array->entries[array->nent++];
+-
+       entry->function = function;
+       entry->index = index;
+       entry->flags = 0;
+@@ -698,22 +704,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+               entry->edx = edx.full;
+               break;
+       }
+-      /*
+-       * Per Intel's SDM, the 0x1f is a superset of 0xb,
+-       * thus they can be handled by common code.
+-       */
+       case 0x1f:
+       case 0xb:
+               /*
+-               * Populate entries until the level type (ECX[15:8]) of the
+-               * previous entry is zero.  Note, CPUID EAX.{0x1f,0xb}.0 is
+-               * the starting entry, filled by the primary do_host_cpuid().
++               * No topology; a valid topology is indicated by the presence
++               * of subleaf 1.
+                */
+-              for (i = 1; entry->ecx & 0xff00; ++i) {
+-                      entry = do_host_cpuid(array, function, i);
+-                      if (!entry)
+-                              goto out;
+-              }
++              entry->eax = entry->ebx = entry->ecx = 0;
+               break;
+       case 0xd:
+               entry->eax &= supported_xcr0;
+@@ -866,6 +863,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
+               entry->ebx = entry->ecx = entry->edx = 0;
+               break;
+       case 0x8000001e:
++              /* Do not return host topology information.  */
++              entry->eax = entry->ebx = entry->ecx = 0;
++              entry->edx = 0; /* reserved */
+               break;
+       /* Support memory encryption cpuid if host supports it */
+       case 0x8000001F:
+-- 
+2.35.1
+
diff --git a/queue-5.10/mm-always-release-pages-to-the-buddy-allocator-in-me.patch b/queue-5.10/mm-always-release-pages-to-the-buddy-allocator-in-me.patch
new file mode 100644 (file)
index 0000000..e1a15e3
--- /dev/null
@@ -0,0 +1,96 @@
+From 4e69da8be823c653cb6f91ccaf9ba5318fed4d4f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Jan 2023 22:22:44 +0000
+Subject: mm: Always release pages to the buddy allocator in
+ memblock_free_late().
+
+From: Aaron Thompson <dev@aaront.org>
+
+[ Upstream commit 115d9d77bb0f9152c60b6e8646369fa7f6167593 ]
+
+If CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled, memblock_free_pages()
+only releases pages to the buddy allocator if they are not in the
+deferred range. This is correct for free pages (as defined by
+for_each_free_mem_pfn_range_in_zone()) because free pages in the
+deferred range will be initialized and released as part of the deferred
+init process. memblock_free_pages() is called by memblock_free_late(),
+which is used to free reserved ranges after memblock_free_all() has
+run. All pages in reserved ranges have been initialized at that point,
+and accordingly, those pages are not touched by the deferred init
+process. This means that currently, if the pages that
+memblock_free_late() intends to release are in the deferred range, they
+will never be released to the buddy allocator. They will forever be
+reserved.
+
+In addition, memblock_free_pages() calls kmsan_memblock_free_pages(),
+which is also correct for free pages but is not correct for reserved
+pages. KMSAN metadata for reserved pages is initialized by
+kmsan_init_shadow(), which runs shortly before memblock_free_all().
+
+For both of these reasons, memblock_free_pages() should only be called
+for free pages, and memblock_free_late() should call __free_pages_core()
+directly instead.
+
+One case where this issue can occur in the wild is EFI boot on
+x86_64. The x86 EFI code reserves all EFI boot services memory ranges
+via memblock_reserve() and frees them later via memblock_free_late()
+(efi_reserve_boot_services() and efi_free_boot_services(),
+respectively). If any of those ranges happens to fall within the
+deferred init range, the pages will not be released and that memory will
+be unavailable.
+
+For example, on an Amazon EC2 t3.micro VM (1 GB) booting via EFI:
+
+v6.2-rc2:
+  # grep -E 'Node|spanned|present|managed' /proc/zoneinfo
+  Node 0, zone      DMA
+          spanned  4095
+          present  3999
+          managed  3840
+  Node 0, zone    DMA32
+          spanned  246652
+          present  245868
+          managed  178867
+
+v6.2-rc2 + patch:
+  # grep -E 'Node|spanned|present|managed' /proc/zoneinfo
+  Node 0, zone      DMA
+          spanned  4095
+          present  3999
+          managed  3840
+  Node 0, zone    DMA32
+          spanned  246652
+          present  245868
+          managed  222816   # +43,949 pages
+
+Fixes: 3a80a7fa7989 ("mm: meminit: initialise a subset of struct pages if CONFIG_DEFERRED_STRUCT_PAGE_INIT is set")
+Signed-off-by: Aaron Thompson <dev@aaront.org>
+Link: https://lore.kernel.org/r/01010185892de53e-e379acfb-7044-4b24-b30a-e2657c1ba989-000000@us-west-2.amazonses.com
+Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/memblock.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/mm/memblock.c b/mm/memblock.c
+index f72d53957033..f6a4dffb9a88 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -1597,7 +1597,13 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
+       end = PFN_DOWN(base + size);
+       for (; cursor < end; cursor++) {
+-              memblock_free_pages(pfn_to_page(cursor), cursor, 0);
++              /*
++               * Reserved pages are always initialized by the end of
++               * memblock_free_all() (by memmap_init() and, if deferred
++               * initialization is enabled, memmap_init_reserved_pages()), so
++               * these pages can be released directly to the buddy allocator.
++               */
++              __free_pages_core(pfn_to_page(cursor), 0);
+               totalram_pages_inc();
+       }
+ }
+-- 
+2.35.1
+
diff --git a/queue-5.10/net-mlx5-fix-ptp-max-frequency-adjustment-range.patch b/queue-5.10/net-mlx5-fix-ptp-max-frequency-adjustment-range.patch
new file mode 100644 (file)
index 0000000..8bb1f6f
--- /dev/null
@@ -0,0 +1,41 @@
+From f92d21a61f695e84042f1802b7ea700f118664e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Dec 2022 14:26:09 -0800
+Subject: net/mlx5: Fix ptp max frequency adjustment range
+
+From: Rahul Rameshbabu <rrameshbabu@nvidia.com>
+
+[ Upstream commit fe91d57277eef8bb4aca05acfa337b4a51d0bba4 ]
+
+.max_adj of ptp_clock_info acts as an absolute value for the amount in ppb
+that can be set for a single call of .adjfine. This means that a single
+call to .getfine cannot be greater than .max_adj or less than -(.max_adj).
+Provides correct value for max frequency adjustment value supported by
+devices.
+
+Fixes: 3d8c38af1493 ("net/mlx5e: Add PTP Hardware Clock (PHC) support")
+Signed-off-by: Rahul Rameshbabu <rrameshbabu@nvidia.com>
+Reviewed-by: Gal Pressman <gal@nvidia.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index c70c1f0ca0c1..44a434b1178b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -440,7 +440,7 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ static const struct ptp_clock_info mlx5_ptp_clock_info = {
+       .owner          = THIS_MODULE,
+       .name           = "mlx5_ptp",
+-      .max_adj        = 100000000,
++      .max_adj        = 50000000,
+       .n_alarm        = 0,
+       .n_ext_ts       = 0,
+       .n_per_out      = 0,
+-- 
+2.35.1
+
diff --git a/queue-5.10/net-mlx5e-don-t-support-encap-rules-with-gbp-option.patch b/queue-5.10/net-mlx5e-don-t-support-encap-rules-with-gbp-option.patch
new file mode 100644 (file)
index 0000000..437eac0
--- /dev/null
@@ -0,0 +1,40 @@
+From d8a56ec1d0fc9a7d42e44d10e594584db1d26e87 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Dec 2022 04:54:09 +0200
+Subject: net/mlx5e: Don't support encap rules with gbp option
+
+From: Gavin Li <gavinl@nvidia.com>
+
+[ Upstream commit d515d63cae2cd186acf40deaa8ef33067bb7f637 ]
+
+Previously, encap rules with gbp option would be offloaded by mistake but
+driver does not support gbp option offload.
+
+To fix this issue, check if the encap rule has gbp option and don't
+offload the rule
+
+Fixes: d8f9dfae49ce ("net: sched: allow flower to match vxlan options")
+Signed-off-by: Gavin Li <gavinl@nvidia.com>
+Reviewed-by: Maor Dickman <maord@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+index 038a0f1cecec..e44281ae570d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+@@ -88,6 +88,8 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
+       struct udphdr *udp = (struct udphdr *)(buf);
+       struct vxlanhdr *vxh;
++      if (tun_key->tun_flags & TUNNEL_VXLAN_OPT)
++              return -EOPNOTSUPP;
+       vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
+       *ip_proto = IPPROTO_UDP;
+-- 
+2.35.1
+
diff --git a/queue-5.10/net-sched-act_mpls-fix-warning-during-failed-attribu.patch b/queue-5.10/net-sched-act_mpls-fix-warning-during-failed-attribu.patch
new file mode 100644 (file)
index 0000000..adeff74
--- /dev/null
@@ -0,0 +1,109 @@
+From c27d99c5027dd6a991c5a7a20d536b48a0d1b0e4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Jan 2023 19:10:04 +0200
+Subject: net/sched: act_mpls: Fix warning during failed attribute validation
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit 9e17f99220d111ea031b44153fdfe364b0024ff2 ]
+
+The 'TCA_MPLS_LABEL' attribute is of 'NLA_U32' type, but has a
+validation type of 'NLA_VALIDATE_FUNCTION'. This is an invalid
+combination according to the comment above 'struct nla_policy':
+
+"
+Meaning of `validate' field, use via NLA_POLICY_VALIDATE_FN:
+   NLA_BINARY           Validation function called for the attribute.
+   All other            Unused - but note that it's a union
+"
+
+This can trigger the warning [1] in nla_get_range_unsigned() when
+validation of the attribute fails. Despite being of 'NLA_U32' type, the
+associated 'min'/'max' fields in the policy are negative as they are
+aliased by the 'validate' field.
+
+Fix by changing the attribute type to 'NLA_BINARY' which is consistent
+with the above comment and all other users of NLA_POLICY_VALIDATE_FN().
+As a result, move the length validation to the validation function.
+
+No regressions in MPLS tests:
+
+ # ./tdc.py -f tc-tests/actions/mpls.json
+ [...]
+ # echo $?
+ 0
+
+[1]
+WARNING: CPU: 0 PID: 17743 at lib/nlattr.c:118
+nla_get_range_unsigned+0x1d8/0x1e0 lib/nlattr.c:117
+Modules linked in:
+CPU: 0 PID: 17743 Comm: syz-executor.0 Not tainted 6.1.0-rc8 #3
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
+rel-1.13.0-48-gd9c812dda519-prebuilt.qemu.org 04/01/2014
+RIP: 0010:nla_get_range_unsigned+0x1d8/0x1e0 lib/nlattr.c:117
+[...]
+Call Trace:
+ <TASK>
+ __netlink_policy_dump_write_attr+0x23d/0x990 net/netlink/policy.c:310
+ netlink_policy_dump_write_attr+0x22/0x30 net/netlink/policy.c:411
+ netlink_ack_tlv_fill net/netlink/af_netlink.c:2454 [inline]
+ netlink_ack+0x546/0x760 net/netlink/af_netlink.c:2506
+ netlink_rcv_skb+0x1b7/0x240 net/netlink/af_netlink.c:2546
+ rtnetlink_rcv+0x18/0x20 net/core/rtnetlink.c:6109
+ netlink_unicast_kernel net/netlink/af_netlink.c:1319 [inline]
+ netlink_unicast+0x5e9/0x6b0 net/netlink/af_netlink.c:1345
+ netlink_sendmsg+0x739/0x860 net/netlink/af_netlink.c:1921
+ sock_sendmsg_nosec net/socket.c:714 [inline]
+ sock_sendmsg net/socket.c:734 [inline]
+ ____sys_sendmsg+0x38f/0x500 net/socket.c:2482
+ ___sys_sendmsg net/socket.c:2536 [inline]
+ __sys_sendmsg+0x197/0x230 net/socket.c:2565
+ __do_sys_sendmsg net/socket.c:2574 [inline]
+ __se_sys_sendmsg net/socket.c:2572 [inline]
+ __x64_sys_sendmsg+0x42/0x50 net/socket.c:2572
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x2b/0x70 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Link: https://lore.kernel.org/netdev/CAO4mrfdmjvRUNbDyP0R03_DrD_eFCLCguz6OxZ2TYRSv0K9gxA@mail.gmail.com/
+Fixes: 2a2ea50870ba ("net: sched: add mpls manipulation actions to TC")
+Reported-by: Wei Chen <harperchen1110@gmail.com>
+Tested-by: Wei Chen <harperchen1110@gmail.com>
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
+Link: https://lore.kernel.org/r/20230107171004.608436-1-idosch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/act_mpls.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
+index d1486ea496a2..09799412b248 100644
+--- a/net/sched/act_mpls.c
++++ b/net/sched/act_mpls.c
+@@ -133,6 +133,11 @@ static int valid_label(const struct nlattr *attr,
+ {
+       const u32 *label = nla_data(attr);
++      if (nla_len(attr) != sizeof(*label)) {
++              NL_SET_ERR_MSG_MOD(extack, "Invalid MPLS label length");
++              return -EINVAL;
++      }
++
+       if (*label & ~MPLS_LABEL_MASK || *label == MPLS_LABEL_IMPLNULL) {
+               NL_SET_ERR_MSG_MOD(extack, "MPLS label out of range");
+               return -EINVAL;
+@@ -144,7 +149,8 @@ static int valid_label(const struct nlattr *attr,
+ static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = {
+       [TCA_MPLS_PARMS]        = NLA_POLICY_EXACT_LEN(sizeof(struct tc_mpls)),
+       [TCA_MPLS_PROTO]        = { .type = NLA_U16 },
+-      [TCA_MPLS_LABEL]        = NLA_POLICY_VALIDATE_FN(NLA_U32, valid_label),
++      [TCA_MPLS_LABEL]        = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
++                                                       valid_label),
+       [TCA_MPLS_TC]           = NLA_POLICY_RANGE(NLA_U8, 0, 7),
+       [TCA_MPLS_TTL]          = NLA_POLICY_MIN(NLA_U8, 1),
+       [TCA_MPLS_BOS]          = NLA_POLICY_RANGE(NLA_U8, 0, 1),
+-- 
+2.35.1
+
diff --git a/queue-5.10/nfc-pn533-wait-for-out_urb-s-completion-in-pn533_usb.patch b/queue-5.10/nfc-pn533-wait-for-out_urb-s-completion-in-pn533_usb.patch
new file mode 100644 (file)
index 0000000..647c922
--- /dev/null
@@ -0,0 +1,129 @@
+From 2d193a72dda1b553eb0b2ddaeed649d87af121c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Jan 2023 17:23:44 +0900
+Subject: nfc: pn533: Wait for out_urb's completion in pn533_usb_send_frame()
+
+From: Minsuk Kang <linuxlovemin@yonsei.ac.kr>
+
+[ Upstream commit 9dab880d675b9d0dd56c6428e4e8352a3339371d ]
+
+Fix a use-after-free that occurs in hcd when in_urb sent from
+pn533_usb_send_frame() is completed earlier than out_urb. Its callback
+frees the skb data in pn533_send_async_complete() that is used as a
+transfer buffer of out_urb. Wait before sending in_urb until the
+callback of out_urb is called. To modify the callback of out_urb alone,
+separate the complete function of out_urb and ack_urb.
+
+Found by a modified version of syzkaller.
+
+BUG: KASAN: use-after-free in dummy_timer
+Call Trace:
+ memcpy (mm/kasan/shadow.c:65)
+ dummy_perform_transfer (drivers/usb/gadget/udc/dummy_hcd.c:1352)
+ transfer (drivers/usb/gadget/udc/dummy_hcd.c:1453)
+ dummy_timer (drivers/usb/gadget/udc/dummy_hcd.c:1972)
+ arch_static_branch (arch/x86/include/asm/jump_label.h:27)
+ static_key_false (include/linux/jump_label.h:207)
+ timer_expire_exit (include/trace/events/timer.h:127)
+ call_timer_fn (kernel/time/timer.c:1475)
+ expire_timers (kernel/time/timer.c:1519)
+ __run_timers (kernel/time/timer.c:1790)
+ run_timer_softirq (kernel/time/timer.c:1803)
+
+Fixes: c46ee38620a2 ("NFC: pn533: add NXP pn533 nfc device driver")
+Signed-off-by: Minsuk Kang <linuxlovemin@yonsei.ac.kr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nfc/pn533/usb.c | 44 ++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 41 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
+index 84f2983bf384..57b07446bb76 100644
+--- a/drivers/nfc/pn533/usb.c
++++ b/drivers/nfc/pn533/usb.c
+@@ -153,10 +153,17 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags)
+       return usb_submit_urb(phy->ack_urb, flags);
+ }
++struct pn533_out_arg {
++      struct pn533_usb_phy *phy;
++      struct completion done;
++};
++
+ static int pn533_usb_send_frame(struct pn533 *dev,
+                               struct sk_buff *out)
+ {
+       struct pn533_usb_phy *phy = dev->phy;
++      struct pn533_out_arg arg;
++      void *cntx;
+       int rc;
+       if (phy->priv == NULL)
+@@ -168,10 +175,17 @@ static int pn533_usb_send_frame(struct pn533 *dev,
+       print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
+                            out->data, out->len, false);
++      init_completion(&arg.done);
++      cntx = phy->out_urb->context;
++      phy->out_urb->context = &arg;
++
+       rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
+       if (rc)
+               return rc;
++      wait_for_completion(&arg.done);
++      phy->out_urb->context = cntx;
++
+       if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
+               /* request for response for sent packet directly */
+               rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
+@@ -412,7 +426,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
+       return arg.rc;
+ }
+-static void pn533_send_complete(struct urb *urb)
++static void pn533_out_complete(struct urb *urb)
++{
++      struct pn533_out_arg *arg = urb->context;
++      struct pn533_usb_phy *phy = arg->phy;
++
++      switch (urb->status) {
++      case 0:
++              break; /* success */
++      case -ECONNRESET:
++      case -ENOENT:
++              dev_dbg(&phy->udev->dev,
++                      "The urb has been stopped (status %d)\n",
++                      urb->status);
++              break;
++      case -ESHUTDOWN:
++      default:
++              nfc_err(&phy->udev->dev,
++                      "Urb failure (status %d)\n",
++                      urb->status);
++      }
++
++      complete(&arg->done);
++}
++
++static void pn533_ack_complete(struct urb *urb)
+ {
+       struct pn533_usb_phy *phy = urb->context;
+@@ -500,10 +538,10 @@ static int pn533_usb_probe(struct usb_interface *interface,
+       usb_fill_bulk_urb(phy->out_urb, phy->udev,
+                         usb_sndbulkpipe(phy->udev, out_endpoint),
+-                        NULL, 0, pn533_send_complete, phy);
++                        NULL, 0, pn533_out_complete, phy);
+       usb_fill_bulk_urb(phy->ack_urb, phy->udev,
+                         usb_sndbulkpipe(phy->udev, out_endpoint),
+-                        NULL, 0, pn533_send_complete, phy);
++                        NULL, 0, pn533_ack_complete, phy);
+       switch (id->driver_info) {
+       case PN533_DEVICE_STD:
+-- 
+2.35.1
+
diff --git a/queue-5.10/octeontx2-af-fix-lmac-config-in-cgx_lmac_rx_tx_enabl.patch b/queue-5.10/octeontx2-af-fix-lmac-config-in-cgx_lmac_rx_tx_enabl.patch
new file mode 100644 (file)
index 0000000..7876fe2
--- /dev/null
@@ -0,0 +1,57 @@
+From 73c4a59aadf03c60bf80debc60d96e410fdca2e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Jan 2023 21:31:07 +0530
+Subject: octeontx2-af: Fix LMAC config in cgx_lmac_rx_tx_enable
+
+From: Angela Czubak <aczubak@marvell.com>
+
+[ Upstream commit b4e9b8763e417db31c7088103cc557d55cb7a8f5 ]
+
+PF netdev can request AF to enable or disable reception and transmission
+on assigned CGX::LMAC. The current code instead of disabling or enabling
+'reception and transmission' also disables/enable the LMAC. This patch
+fixes this issue.
+
+Fixes: 1435f66a28b4 ("octeontx2-af: CGX Rx/Tx enable/disable mbox handlers")
+Signed-off-by: Angela Czubak <aczubak@marvell.com>
+Signed-off-by: Hariprasad Kelam <hkelam@marvell.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Link: https://lore.kernel.org/r/20230105160107.17638-1-hkelam@marvell.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/af/cgx.c | 4 ++--
+ drivers/net/ethernet/marvell/octeontx2/af/cgx.h | 1 -
+ 2 files changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+index 1a8f5a039d50..c0a0a31272cc 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+@@ -350,9 +350,9 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
+       cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+       if (enable)
+-              cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
++              cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
+       else
+-              cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
++              cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
+       cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+       return 0;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+index bcfc3e5f66bb..e176a6c654ef 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+@@ -31,7 +31,6 @@
+ #define CMR_P2X_SEL_SHIFT             59ULL
+ #define CMR_P2X_SEL_NIX0              1ULL
+ #define CMR_P2X_SEL_NIX1              2ULL
+-#define CMR_EN                                BIT_ULL(55)
+ #define DATA_PKT_TX_EN                        BIT_ULL(53)
+ #define DATA_PKT_RX_EN                        BIT_ULL(54)
+ #define CGX_LMAC_TYPE_SHIFT           40
+-- 
+2.35.1
+
diff --git a/queue-5.10/octeontx2-af-map-nix-block-from-cgx-connection.patch b/queue-5.10/octeontx2-af-map-nix-block-from-cgx-connection.patch
new file mode 100644 (file)
index 0000000..8f899cc
--- /dev/null
@@ -0,0 +1,278 @@
+From dd9358d1837cd2f26764de29ea9023956781132a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Oct 2020 10:45:43 +0530
+Subject: octeontx2-af: Map NIX block from CGX connection
+
+From: Subbaraya Sundeep <sbhatta@marvell.com>
+
+[ Upstream commit c5a73b632b901c4b07d156bb8a8a2c5517678f35 ]
+
+Firmware configures NIX block mapping for all CGXs
+to achieve maximum throughput. This patch reads
+the configuration and create mapping between RVU
+PF and NIX blocks. And for LBK VFs assign NIX0 for
+even numbered VFs and NIX1 for odd numbered VFs.
+
+Signed-off-by: Subbaraya Sundeep <sbhatta@marvell.com>
+Signed-off-by: Sunil Goutham <sgoutham@marvell.com>
+Signed-off-by: Rakesh Babu <rsaladi2@marvell.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: b4e9b8763e41 ("octeontx2-af: Fix LMAC config in cgx_lmac_rx_tx_enable")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/marvell/octeontx2/af/cgx.c   | 13 +++-
+ .../net/ethernet/marvell/octeontx2/af/cgx.h   |  5 ++
+ .../net/ethernet/marvell/octeontx2/af/rvu.c   | 61 ++++++++++++++++++-
+ .../net/ethernet/marvell/octeontx2/af/rvu.h   |  2 +
+ .../ethernet/marvell/octeontx2/af/rvu_cgx.c   | 15 +++++
+ .../ethernet/marvell/octeontx2/af/rvu_nix.c   | 21 +++++--
+ 6 files changed, 107 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+index fc27a40202c6..1a8f5a039d50 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+@@ -145,6 +145,16 @@ int cgx_get_cgxid(void *cgxd)
+       return cgx->cgx_id;
+ }
++u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
++{
++      struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
++      u64 cfg;
++
++      cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
++
++      return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
++}
++
+ /* Ensure the required lock for event queue(where asynchronous events are
+  * posted) is acquired before calling this API. Else an asynchronous event(with
+  * latest link status) can reach the destination before this function returns
+@@ -814,8 +824,7 @@ static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
+       minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
+       dev_dbg(dev, "Firmware command interface version = %d.%d\n",
+               major_ver, minor_ver);
+-      if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
+-          minor_ver != CGX_FIRMWARE_MINOR_VER)
++      if (major_ver != CGX_FIRMWARE_MAJOR_VER)
+               return -EIO;
+       else
+               return 0;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+index 27ca3291682b..bcfc3e5f66bb 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+@@ -27,6 +27,10 @@
+ /* Registers */
+ #define CGXX_CMRX_CFG                 0x00
++#define CMR_P2X_SEL_MASK              GENMASK_ULL(61, 59)
++#define CMR_P2X_SEL_SHIFT             59ULL
++#define CMR_P2X_SEL_NIX0              1ULL
++#define CMR_P2X_SEL_NIX1              2ULL
+ #define CMR_EN                                BIT_ULL(55)
+ #define DATA_PKT_TX_EN                        BIT_ULL(53)
+ #define DATA_PKT_RX_EN                        BIT_ULL(54)
+@@ -142,5 +146,6 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
+ int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
+                          u8 tx_pause, u8 rx_pause);
+ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
++u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id);
+ #endif /* CGX_H */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index 9bab525ecd86..acbc67074f59 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -1208,6 +1208,58 @@ int rvu_mbox_handler_detach_resources(struct rvu *rvu,
+       return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
+ }
++static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
++{
++      struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
++      int blkaddr = BLKADDR_NIX0, vf;
++      struct rvu_pfvf *pf;
++
++      /* All CGX mapped PFs are set with assigned NIX block during init */
++      if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
++              pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
++              blkaddr = pf->nix_blkaddr;
++      } else if (is_afvf(pcifunc)) {
++              vf = pcifunc - 1;
++              /* Assign NIX based on VF number. All even numbered VFs get
++               * NIX0 and odd numbered gets NIX1
++               */
++              blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
++              /* NIX1 is not present on all silicons */
++              if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
++                      blkaddr = BLKADDR_NIX0;
++      }
++
++      switch (blkaddr) {
++      case BLKADDR_NIX1:
++              pfvf->nix_blkaddr = BLKADDR_NIX1;
++              break;
++      case BLKADDR_NIX0:
++      default:
++              pfvf->nix_blkaddr = BLKADDR_NIX0;
++              break;
++      }
++
++      return pfvf->nix_blkaddr;
++}
++
++static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
++{
++      int blkaddr;
++
++      switch (blktype) {
++      case BLKTYPE_NIX:
++              blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
++              break;
++      default:
++              return rvu_get_blkaddr(rvu, blktype, 0);
++      };
++
++      if (is_block_implemented(rvu->hw, blkaddr))
++              return blkaddr;
++
++      return -ENODEV;
++}
++
+ static void rvu_attach_block(struct rvu *rvu, int pcifunc,
+                            int blktype, int num_lfs)
+ {
+@@ -1221,7 +1273,7 @@ static void rvu_attach_block(struct rvu *rvu, int pcifunc,
+       if (!num_lfs)
+               return;
+-      blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
++      blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc);
+       if (blkaddr < 0)
+               return;
+@@ -1250,9 +1302,9 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
+                                      struct rsrc_attach *req, u16 pcifunc)
+ {
+       struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
++      int free_lfs, mappedlfs, blkaddr;
+       struct rvu_hwinfo *hw = rvu->hw;
+       struct rvu_block *block;
+-      int free_lfs, mappedlfs;
+       /* Only one NPA LF can be attached */
+       if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
+@@ -1269,7 +1321,10 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
+       /* Only one NIX LF can be attached */
+       if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
+-              block = &hw->block[BLKADDR_NIX0];
++              blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
++              if (blkaddr < 0)
++                      return blkaddr;
++              block = &hw->block[blkaddr];
+               free_lfs = rvu_rsrc_free_count(&block->lf);
+               if (!free_lfs)
+                       goto fail;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index 0cb5093744fe..fc6d785b98dd 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -183,6 +183,8 @@ struct rvu_pfvf {
+       bool    cgx_in_use; /* this PF/VF using CGX? */
+       int     cgx_users;  /* number of cgx users - used only by PFs */
++
++      u8      nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
+ };
+ struct nix_txsch {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+index f4ecc755eaff..6c6b411e78fd 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+@@ -74,6 +74,20 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
+       return rvu->cgx_idmap[cgx_id];
+ }
++/* Based on P2X connectivity find mapped NIX block for a PF */
++static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
++                                int cgx_id, int lmac_id)
++{
++      struct rvu_pfvf *pfvf = &rvu->pf[pf];
++      u8 p2x;
++
++      p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
++      /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
++      pfvf->nix_blkaddr = BLKADDR_NIX0;
++      if (p2x == CMR_P2X_SEL_NIX1)
++              pfvf->nix_blkaddr = BLKADDR_NIX1;
++}
++
+ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+ {
+       struct npc_pkind *pkind = &rvu->hw->pkind;
+@@ -117,6 +131,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+                       rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
+                       free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
+                       pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
++                      rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
+                       rvu->cgx_mapped_pfs++;
+               }
+       }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index f6a3cf3e6f23..9886a30e9723 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -187,8 +187,8 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
+ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
+ {
+       struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
++      int pkind, pf, vf, lbkid;
+       u8 cgx_id, lmac_id;
+-      int pkind, pf, vf;
+       int err;
+       pf = rvu_get_pf(pcifunc);
+@@ -221,13 +221,24 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
+       case NIX_INTF_TYPE_LBK:
+               vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
++              /* If NIX1 block is present on the silicon then NIXes are
++               * assigned alternatively for lbk interfaces. NIX0 should
++               * send packets on lbk link 1 channels and NIX1 should send
++               * on lbk link 0 channels for the communication between
++               * NIX0 and NIX1.
++               */
++              lbkid = 0;
++              if (rvu->hw->lbk_links > 1)
++                      lbkid = vf & 0x1 ? 0 : 1;
++
+               /* Note that AF's VFs work in pairs and talk over consecutive
+                * loopback channels.Therefore if odd number of AF VFs are
+                * enabled then the last VF remains with no pair.
+                */
+-              pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
+-              pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
+-                                              NIX_CHAN_LBK_CHX(0, vf + 1);
++              pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(lbkid, vf);
++              pfvf->tx_chan_base = vf & 0x1 ?
++                                      NIX_CHAN_LBK_CHX(lbkid, vf - 1) :
++                                      NIX_CHAN_LBK_CHX(lbkid, vf + 1);
+               pfvf->rx_chan_cnt = 1;
+               pfvf->tx_chan_cnt = 1;
+               rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+@@ -3157,7 +3168,7 @@ int rvu_nix_init(struct rvu *rvu)
+       hw->cgx = (cfg >> 12) & 0xF;
+       hw->lmac_per_cgx = (cfg >> 8) & 0xF;
+       hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
+-      hw->lbk_links = 1;
++      hw->lbk_links = (cfg >> 24) & 0xF;
+       hw->sdp_links = 1;
+       /* Initialize admin queue */
+-- 
+2.35.1
+
diff --git a/queue-5.10/octeontx2-af-update-get-set-resource-count-functions.patch b/queue-5.10/octeontx2-af-update-get-set-resource-count-functions.patch
new file mode 100644 (file)
index 0000000..0f9e7fb
--- /dev/null
@@ -0,0 +1,232 @@
+From f7e09a28d539e8df62beb1e738f8f147b1fa1353 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Oct 2020 10:45:40 +0530
+Subject: octeontx2-af: Update get/set resource count functions
+
+From: Subbaraya Sundeep <sbhatta@marvell.com>
+
+[ Upstream commit cdd41e878526797df29903fe592d6a26b096ac7d ]
+
+Since multiple blocks of same type are present in
+98xx, modify functions which get resource count and
+which update resource count to work with individual
+block address instead of block type.
+
+Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Signed-off-by: Subbaraya Sundeep <sbhatta@marvell.com>
+Signed-off-by: Sunil Goutham <sgoutham@marvell.com>
+Signed-off-by: Rakesh Babu <rsaladi2@marvell.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: b4e9b8763e41 ("octeontx2-af: Fix LMAC config in cgx_lmac_rx_tx_enable")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/marvell/octeontx2/af/rvu.c   | 73 +++++++++++++------
+ .../net/ethernet/marvell/octeontx2/af/rvu.h   |  2 +
+ 2 files changed, 53 insertions(+), 22 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index c26652436c53..9bab525ecd86 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -316,31 +316,36 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
+       block->fn_map[lf] = attach ? pcifunc : 0;
+-      switch (block->type) {
+-      case BLKTYPE_NPA:
++      switch (block->addr) {
++      case BLKADDR_NPA:
+               pfvf->npalf = attach ? true : false;
+               num_lfs = pfvf->npalf;
+               break;
+-      case BLKTYPE_NIX:
++      case BLKADDR_NIX0:
++      case BLKADDR_NIX1:
+               pfvf->nixlf = attach ? true : false;
+               num_lfs = pfvf->nixlf;
+               break;
+-      case BLKTYPE_SSO:
++      case BLKADDR_SSO:
+               attach ? pfvf->sso++ : pfvf->sso--;
+               num_lfs = pfvf->sso;
+               break;
+-      case BLKTYPE_SSOW:
++      case BLKADDR_SSOW:
+               attach ? pfvf->ssow++ : pfvf->ssow--;
+               num_lfs = pfvf->ssow;
+               break;
+-      case BLKTYPE_TIM:
++      case BLKADDR_TIM:
+               attach ? pfvf->timlfs++ : pfvf->timlfs--;
+               num_lfs = pfvf->timlfs;
+               break;
+-      case BLKTYPE_CPT:
++      case BLKADDR_CPT0:
+               attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
+               num_lfs = pfvf->cptlfs;
+               break;
++      case BLKADDR_CPT1:
++              attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
++              num_lfs = pfvf->cpt1_lfs;
++              break;
+       }
+       reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
+@@ -1035,7 +1040,30 @@ int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
+ /* Get current count of a RVU block's LF/slots
+  * provisioned to a given RVU func.
+  */
+-static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
++u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
++{
++      switch (blkaddr) {
++      case BLKADDR_NPA:
++              return pfvf->npalf ? 1 : 0;
++      case BLKADDR_NIX0:
++      case BLKADDR_NIX1:
++              return pfvf->nixlf ? 1 : 0;
++      case BLKADDR_SSO:
++              return pfvf->sso;
++      case BLKADDR_SSOW:
++              return pfvf->ssow;
++      case BLKADDR_TIM:
++              return pfvf->timlfs;
++      case BLKADDR_CPT0:
++              return pfvf->cptlfs;
++      case BLKADDR_CPT1:
++              return pfvf->cpt1_lfs;
++      }
++      return 0;
++}
++
++/* Return true if LFs of block type are attached to pcifunc */
++static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
+ {
+       switch (blktype) {
+       case BLKTYPE_NPA:
+@@ -1043,15 +1071,16 @@ static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
+       case BLKTYPE_NIX:
+               return pfvf->nixlf ? 1 : 0;
+       case BLKTYPE_SSO:
+-              return pfvf->sso;
++              return !!pfvf->sso;
+       case BLKTYPE_SSOW:
+-              return pfvf->ssow;
++              return !!pfvf->ssow;
+       case BLKTYPE_TIM:
+-              return pfvf->timlfs;
++              return !!pfvf->timlfs;
+       case BLKTYPE_CPT:
+-              return pfvf->cptlfs;
++              return pfvf->cptlfs || pfvf->cpt1_lfs;
+       }
+-      return 0;
++
++      return false;
+ }
+ bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
+@@ -1064,7 +1093,7 @@ bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
+       pfvf = rvu_get_pfvf(rvu, pcifunc);
+       /* Check if this PFFUNC has a LF of type blktype attached */
+-      if (!rvu_get_rsrc_mapcount(pfvf, blktype))
++      if (!is_blktype_attached(pfvf, blktype))
+               return false;
+       return true;
+@@ -1105,7 +1134,7 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
+       block = &hw->block[blkaddr];
+-      num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
++      num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+       if (!num_lfs)
+               return;
+@@ -1226,7 +1255,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
+       int free_lfs, mappedlfs;
+       /* Only one NPA LF can be attached */
+-      if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) {
++      if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
+               block = &hw->block[BLKADDR_NPA];
+               free_lfs = rvu_rsrc_free_count(&block->lf);
+               if (!free_lfs)
+@@ -1239,7 +1268,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
+       }
+       /* Only one NIX LF can be attached */
+-      if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) {
++      if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
+               block = &hw->block[BLKADDR_NIX0];
+               free_lfs = rvu_rsrc_free_count(&block->lf);
+               if (!free_lfs)
+@@ -1260,7 +1289,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
+                                pcifunc, req->sso, block->lf.max);
+                       return -EINVAL;
+               }
+-              mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
++              mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+               free_lfs = rvu_rsrc_free_count(&block->lf);
+               /* Check if additional resources are available */
+               if (req->sso > mappedlfs &&
+@@ -1276,7 +1305,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
+                                pcifunc, req->sso, block->lf.max);
+                       return -EINVAL;
+               }
+-              mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
++              mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+               free_lfs = rvu_rsrc_free_count(&block->lf);
+               if (req->ssow > mappedlfs &&
+                   ((req->ssow - mappedlfs) > free_lfs))
+@@ -1291,7 +1320,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
+                                pcifunc, req->timlfs, block->lf.max);
+                       return -EINVAL;
+               }
+-              mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
++              mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+               free_lfs = rvu_rsrc_free_count(&block->lf);
+               if (req->timlfs > mappedlfs &&
+                   ((req->timlfs - mappedlfs) > free_lfs))
+@@ -1306,7 +1335,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
+                                pcifunc, req->cptlfs, block->lf.max);
+                       return -EINVAL;
+               }
+-              mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
++              mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+               free_lfs = rvu_rsrc_free_count(&block->lf);
+               if (req->cptlfs > mappedlfs &&
+                   ((req->cptlfs - mappedlfs) > free_lfs))
+@@ -1942,7 +1971,7 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
+       block = &rvu->hw->block[blkaddr];
+       num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+-                                      block->type);
++                                      block->addr);
+       if (!num_lfs)
+               return;
+       for (slot = 0; slot < num_lfs; slot++) {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index 90eed3160915..0cb5093744fe 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -137,6 +137,7 @@ struct rvu_pfvf {
+       u16             ssow;
+       u16             cptlfs;
+       u16             timlfs;
++      u16             cpt1_lfs;
+       u8              cgx_lmac;
+       /* Block LF's MSIX vector info */
+@@ -420,6 +421,7 @@ void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
+ int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
+ int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
+ bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
++u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
+ int rvu_get_pf(u16 pcifunc);
+ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
+ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
+-- 
+2.35.1
+
diff --git a/queue-5.10/regulator-da9211-use-irq-handler-when-ready.patch b/queue-5.10/regulator-da9211-use-irq-handler-when-ready.patch
new file mode 100644 (file)
index 0000000..135ab3b
--- /dev/null
@@ -0,0 +1,66 @@
+From 3253c8ea3a85d3ca52fe539806817aa9ca45d735 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 27 Nov 2022 22:06:02 +0100
+Subject: regulator: da9211: Use irq handler when ready
+
+From: Ricardo Ribalda <ribalda@chromium.org>
+
+[ Upstream commit 02228f6aa6a64d588bc31e3267d05ff184d772eb ]
+
+If the system does not come from reset (like when it is kexec()), the
+regulator might have an IRQ waiting for us.
+
+If we enable the IRQ handler before its structures are ready, we crash.
+
+This patch fixes:
+
+[    1.141839] Unable to handle kernel read from unreadable memory at virtual address 0000000000000078
+[    1.316096] Call trace:
+[    1.316101]  blocking_notifier_call_chain+0x20/0xa8
+[    1.322757] cpu cpu0: dummy supplies not allowed for exclusive requests
+[    1.327823]  regulator_notifier_call_chain+0x1c/0x2c
+[    1.327825]  da9211_irq_handler+0x68/0xf8
+[    1.327829]  irq_thread+0x11c/0x234
+[    1.327833]  kthread+0x13c/0x154
+
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Reviewed-by: Adam Ward <DLG-Adam.Ward.opensource@dm.renesas.com>
+Link: https://lore.kernel.org/r/20221124-da9211-v2-0-1779e3c5d491@chromium.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/da9211-regulator.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
+index e01b32d1fa17..00828f5baa97 100644
+--- a/drivers/regulator/da9211-regulator.c
++++ b/drivers/regulator/da9211-regulator.c
+@@ -498,6 +498,12 @@ static int da9211_i2c_probe(struct i2c_client *i2c)
+       chip->chip_irq = i2c->irq;
++      ret = da9211_regulator_init(chip);
++      if (ret < 0) {
++              dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
++              return ret;
++      }
++
+       if (chip->chip_irq != 0) {
+               ret = devm_request_threaded_irq(chip->dev, chip->chip_irq, NULL,
+                                       da9211_irq_handler,
+@@ -512,11 +518,6 @@ static int da9211_i2c_probe(struct i2c_client *i2c)
+               dev_warn(chip->dev, "No IRQ configured\n");
+       }
+-      ret = da9211_regulator_init(chip);
+-
+-      if (ret < 0)
+-              dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
+-
+       return ret;
+ }
+-- 
+2.35.1
+
index 4a738b7502ee7d90c0302012093a572663cd67f4..f9c59c49a48e2b56d4279d1eaf6d32004ebfee80 100644 (file)
@@ -36,3 +36,26 @@ netfilter-ipset-fix-overflow-before-widen-in-the-bitmap_ip_create-function.patch
 powerpc-imc-pmu-fix-use-of-mutex-in-irqs-disabled-section.patch
 x86-boot-avoid-using-intel-mnemonics-in-at-t-syntax-asm.patch
 edac-device-fix-period-calculation-in-edac_device_reset_delay_period.patch
+regulator-da9211-use-irq-handler-when-ready.patch
+asoc-wm8904-fix-wrong-outputs-volume-after-power-rea.patch
+tipc-fix-unexpected-link-reset-due-to-discovery-mess.patch
+octeontx2-af-update-get-set-resource-count-functions.patch
+octeontx2-af-map-nix-block-from-cgx-connection.patch
+octeontx2-af-fix-lmac-config-in-cgx_lmac_rx_tx_enabl.patch
+hvc-xen-lock-console-list-traversal.patch
+nfc-pn533-wait-for-out_urb-s-completion-in-pn533_usb.patch
+net-sched-act_mpls-fix-warning-during-failed-attribu.patch
+net-mlx5-fix-ptp-max-frequency-adjustment-range.patch
+net-mlx5e-don-t-support-encap-rules-with-gbp-option.patch
+mm-always-release-pages-to-the-buddy-allocator-in-me.patch
+iommu-mediatek-v1-add-error-handle-for-mtk_iommu_pro.patch
+iommu-mediatek-v1-fix-an-error-handling-path-in-mtk_.patch
+documentation-kvm-add-api-issues-section.patch
+kvm-x86-do-not-return-host-topology-information-from.patch
+x86-resctrl-use-task_curr-instead-of-task_struct-on_.patch
+x86-resctrl-fix-task-closid-rmid-update-race.patch
+arm64-atomics-format-whitespace-consistently.patch
+arm64-atomics-remove-ll-sc-trampolines.patch
+arm64-cmpxchg_double-hazard-against-entire-exchange-.patch
+efi-fix-null-deref-in-init-error-path.patch
+drm-virtio-fix-gem-handle-creation-uaf.patch
diff --git a/queue-5.10/tipc-fix-unexpected-link-reset-due-to-discovery-mess.patch b/queue-5.10/tipc-fix-unexpected-link-reset-due-to-discovery-mess.patch
new file mode 100644 (file)
index 0000000..54c5115
--- /dev/null
@@ -0,0 +1,111 @@
+From c6e1a2b9e0edc263607dfeacff540ed4fbf4b0c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Jan 2023 06:02:51 +0000
+Subject: tipc: fix unexpected link reset due to discovery messages
+
+From: Tung Nguyen <tung.q.nguyen@dektech.com.au>
+
+[ Upstream commit c244c092f1ed2acfb5af3d3da81e22367d3dd733 ]
+
+This unexpected behavior is observed:
+
+node 1                    | node 2
+------                    | ------
+link is established       | link is established
+reboot                    | link is reset
+up                        | send discovery message
+receive discovery message |
+link is established       | link is established
+send discovery message    |
+                          | receive discovery message
+                          | link is reset (unexpected)
+                          | send reset message
+link is reset             |
+
+It is due to delayed re-discovery as described in function
+tipc_node_check_dest(): "this link endpoint has already reset
+and re-established contact with the peer, before receiving a
+discovery message from that node."
+
+However, commit 598411d70f85 has changed the condition for calling
+tipc_node_link_down() which was the acceptance of new media address.
+
+This commit fixes this by restoring the old and correct behavior.
+
+Fixes: 598411d70f85 ("tipc: make resetting of links non-atomic")
+Acked-by: Jon Maloy <jmaloy@redhat.com>
+Signed-off-by: Tung Nguyen <tung.q.nguyen@dektech.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tipc/node.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 7589f2ac6fd0..38f61dccb855 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -1152,8 +1152,9 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+       bool addr_match = false;
+       bool sign_match = false;
+       bool link_up = false;
++      bool link_is_reset = false;
+       bool accept_addr = false;
+-      bool reset = true;
++      bool reset = false;
+       char *if_name;
+       unsigned long intv;
+       u16 session;
+@@ -1173,14 +1174,14 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+       /* Prepare to validate requesting node's signature and media address */
+       l = le->link;
+       link_up = l && tipc_link_is_up(l);
++      link_is_reset = l && tipc_link_is_reset(l);
+       addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
+       sign_match = (signature == n->signature);
+       /* These three flags give us eight permutations: */
+       if (sign_match && addr_match && link_up) {
+-              /* All is fine. Do nothing. */
+-              reset = false;
++              /* All is fine. Ignore requests. */
+               /* Peer node is not a container/local namespace */
+               if (!n->peer_hash_mix)
+                       n->peer_hash_mix = hash_mixes;
+@@ -1205,6 +1206,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+                */
+               accept_addr = true;
+               *respond = true;
++              reset = true;
+       } else if (!sign_match && addr_match && link_up) {
+               /* Peer node rebooted. Two possibilities:
+                *  - Delayed re-discovery; this link endpoint has already
+@@ -1236,6 +1238,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+               n->signature = signature;
+               accept_addr = true;
+               *respond = true;
++              reset = true;
+       }
+       if (!accept_addr)
+@@ -1264,6 +1267,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+               tipc_link_fsm_evt(l, LINK_RESET_EVT);
+               if (n->state == NODE_FAILINGOVER)
+                       tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
++              link_is_reset = tipc_link_is_reset(l);
+               le->link = l;
+               n->link_cnt++;
+               tipc_node_calculate_timer(n, l);
+@@ -1276,7 +1280,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+       memcpy(&le->maddr, maddr, sizeof(*maddr));
+ exit:
+       tipc_node_write_unlock(n);
+-      if (reset && l && !tipc_link_is_reset(l))
++      if (reset && !link_is_reset)
+               tipc_node_link_down(n, b->identity, false);
+       tipc_node_put(n);
+ }
+-- 
+2.35.1
+
diff --git a/queue-5.10/x86-resctrl-fix-task-closid-rmid-update-race.patch b/queue-5.10/x86-resctrl-fix-task-closid-rmid-update-race.patch
new file mode 100644 (file)
index 0000000..8df3309
--- /dev/null
@@ -0,0 +1,114 @@
+From f32e6342fde016cfd3a2b896c55ef8c274193380 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Dec 2022 17:11:23 +0100
+Subject: x86/resctrl: Fix task CLOSID/RMID update race
+
+From: Peter Newman <peternewman@google.com>
+
+[ Upstream commit fe1f0714385fbcf76b0cbceb02b7277d842014fc ]
+
+When the user moves a running task to a new rdtgroup using the task's
+file interface or by deleting its rdtgroup, the resulting change in
+CLOSID/RMID must be immediately propagated to the PQR_ASSOC MSR on the
+task(s) CPUs.
+
+x86 allows reordering loads with prior stores, so if the task starts
+running between a task_curr() check that the CPU hoisted before the
+stores in the CLOSID/RMID update then it can start running with the old
+CLOSID/RMID until it is switched again because __rdtgroup_move_task()
+failed to determine that it needs to be interrupted to obtain the new
+CLOSID/RMID.
+
+Refer to the diagram below:
+
+CPU 0                                   CPU 1
+-----                                   -----
+__rdtgroup_move_task():
+  curr <- t1->cpu->rq->curr
+                                        __schedule():
+                                          rq->curr <- t1
+                                        resctrl_sched_in():
+                                          t1->{closid,rmid} -> {1,1}
+  t1->{closid,rmid} <- {2,2}
+  if (curr == t1) // false
+   IPI(t1->cpu)
+
+A similar race impacts rdt_move_group_tasks(), which updates tasks in a
+deleted rdtgroup.
+
+In both cases, use smp_mb() to order the task_struct::{closid,rmid}
+stores before the loads in task_curr().  In particular, in the
+rdt_move_group_tasks() case, simply execute an smp_mb() on every
+iteration with a matching task.
+
+It is possible to use a single smp_mb() in rdt_move_group_tasks(), but
+this would require two passes and a means of remembering which
+task_structs were updated in the first loop. However, benchmarking
+results below showed too little performance impact in the simple
+approach to justify implementing the two-pass approach.
+
+Times below were collected using `perf stat` to measure the time to
+remove a group containing a 1600-task, parallel workload.
+
+CPU: Intel(R) Xeon(R) Platinum P-8136 CPU @ 2.00GHz (112 threads)
+
+  # mkdir /sys/fs/resctrl/test
+  # echo $$ > /sys/fs/resctrl/test/tasks
+  # perf bench sched messaging -g 40 -l 100000
+
+task-clock time ranges collected using:
+
+  # perf stat rmdir /sys/fs/resctrl/test
+
+Baseline:                     1.54 - 1.60 ms
+smp_mb() every matching task: 1.57 - 1.67 ms
+
+  [ bp: Massage commit message. ]
+
+Fixes: ae28d1aae48a ("x86/resctrl: Use an IPI instead of task_work_add() to update PQR_ASSOC MSR")
+Fixes: 0efc89be9471 ("x86/intel_rdt: Update task closid immediately on CPU in rmdir and unmount")
+Signed-off-by: Peter Newman <peternewman@google.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
+Reviewed-by: Babu Moger <babu.moger@amd.com>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/20221220161123.432120-1-peternewman@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index f2ad3c117426..ff26de11b3f1 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -577,8 +577,10 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
+       /*
+        * Ensure the task's closid and rmid are written before determining if
+        * the task is current that will decide if it will be interrupted.
++       * This pairs with the full barrier between the rq->curr update and
++       * resctrl_sched_in() during context switch.
+        */
+-      barrier();
++      smp_mb();
+       /*
+        * By now, the task's closid and rmid are set. If the task is current
+@@ -2313,6 +2315,14 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
+                       t->closid = to->closid;
+                       t->rmid = to->mon.rmid;
++                      /*
++                       * Order the closid/rmid stores above before the loads
++                       * in task_curr(). This pairs with the full barrier
++                       * between the rq->curr update and resctrl_sched_in()
++                       * during context switch.
++                       */
++                      smp_mb();
++
+                       /*
+                        * If the task is on a CPU, set the CPU in the mask.
+                        * The detection is inaccurate as tasks might move or
+-- 
+2.35.1
+
diff --git a/queue-5.10/x86-resctrl-use-task_curr-instead-of-task_struct-on_.patch b/queue-5.10/x86-resctrl-use-task_curr-instead-of-task_struct-on_.patch
new file mode 100644 (file)
index 0000000..8e859d9
--- /dev/null
@@ -0,0 +1,64 @@
+From 185559e7f1e88f4e77035712683d7a30e0fe4087 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Dec 2020 14:31:20 -0800
+Subject: x86/resctrl: Use task_curr() instead of task_struct->on_cpu to
+ prevent unnecessary IPI
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+[ Upstream commit e0ad6dc8969f790f14bddcfd7ea284b7e5f88a16 ]
+
+James reported in [1] that there could be two tasks running on the same CPU
+with task_struct->on_cpu set. Using task_struct->on_cpu as a test if a task
+is running on a CPU may thus match the old task for a CPU while the
+scheduler is running and IPI it unnecessarily.
+
+task_curr() is the correct helper to use. While doing so move the #ifdef
+check of the CONFIG_SMP symbol to be a C conditional used to determine
+if this helper should be used to ensure the code is always checked for
+correctness by the compiler.
+
+[1] https://lore.kernel.org/lkml/a782d2f3-d2f6-795f-f4b1-9462205fd581@arm.com
+
+Reported-by: James Morse <james.morse@arm.com>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Link: https://lkml.kernel.org/r/e9e68ce1441a73401e08b641cc3b9a3cf13fe6d4.1608243147.git.reinette.chatre@intel.com
+Stable-dep-of: fe1f0714385f ("x86/resctrl: Fix task CLOSID/RMID update race")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 14 +++++---------
+ 1 file changed, 5 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index 5a59e3315b34..f2ad3c117426 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -2313,19 +2313,15 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
+                       t->closid = to->closid;
+                       t->rmid = to->mon.rmid;
+-#ifdef CONFIG_SMP
+                       /*
+-                       * This is safe on x86 w/o barriers as the ordering
+-                       * of writing to task_cpu() and t->on_cpu is
+-                       * reverse to the reading here. The detection is
+-                       * inaccurate as tasks might move or schedule
+-                       * before the smp function call takes place. In
+-                       * such a case the function call is pointless, but
++                       * If the task is on a CPU, set the CPU in the mask.
++                       * The detection is inaccurate as tasks might move or
++                       * schedule before the smp function call takes place.
++                       * In such a case the function call is pointless, but
+                        * there is no other side effect.
+                        */
+-                      if (mask && t->on_cpu)
++                      if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
+                               cpumask_set_cpu(task_cpu(t), mask);
+-#endif
+               }
+       }
+       read_unlock(&tasklist_lock);
+-- 
+2.35.1
+