--- /dev/null
+From foo@baz Thu Mar 4 02:48:46 PM CET 2021
+From: Will Deacon <will.deacon@arm.com>
+Date: Thu, 13 Sep 2018 13:30:45 +0100
+Subject: arm64: Avoid redundant type conversions in xchg() and cmpxchg()
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 5ef3fe4cecdf82fdd71ce78988403963d01444d4 upstream.
+
+Our atomic instructions (either LSE atomics of LDXR/STXR sequences)
+natively support byte, half-word, word and double-word memory accesses
+so there is no need to mask the data register prior to being stored.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+[bwh: Backported to 4.19: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/atomic_ll_sc.h | 53 +++++++--------
+ arch/arm64/include/asm/atomic_lse.h | 46 ++++++-------
+ arch/arm64/include/asm/cmpxchg.h | 116 +++++++++++++++++-----------------
+ 3 files changed, 108 insertions(+), 107 deletions(-)
+
+--- a/arch/arm64/include/asm/atomic_ll_sc.h
++++ b/arch/arm64/include/asm/atomic_ll_sc.h
+@@ -248,48 +248,49 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(
+ }
+ __LL_SC_EXPORT(atomic64_dec_if_positive);
+
+-#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \
+-__LL_SC_INLINE unsigned long \
+-__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
+- unsigned long old, \
+- unsigned long new)) \
++#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \
++__LL_SC_INLINE u##sz \
++__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
++ unsigned long old, \
++ u##sz new)) \
+ { \
+- unsigned long tmp, oldval; \
++ unsigned long tmp; \
++ u##sz oldval; \
+ \
+ asm volatile( \
+ " prfm pstl1strm, %[v]\n" \
+- "1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \
++ "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
+ " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
+ " cbnz %" #w "[tmp], 2f\n" \
+- " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
++ " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
+ " cbnz %w[tmp], 1b\n" \
+ " " #mb "\n" \
+ "2:" \
+ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
+- [v] "+Q" (*(unsigned long *)ptr) \
++ [v] "+Q" (*(u##sz *)ptr) \
+ : [old] "Lr" (old), [new] "r" (new) \
+ : cl); \
+ \
+ return oldval; \
+ } \
+-__LL_SC_EXPORT(__cmpxchg_case_##name);
++__LL_SC_EXPORT(__cmpxchg_case_##name##sz);
+
+-__CMPXCHG_CASE(w, b, 1, , , , )
+-__CMPXCHG_CASE(w, h, 2, , , , )
+-__CMPXCHG_CASE(w, , 4, , , , )
+-__CMPXCHG_CASE( , , 8, , , , )
+-__CMPXCHG_CASE(w, b, acq_1, , a, , "memory")
+-__CMPXCHG_CASE(w, h, acq_2, , a, , "memory")
+-__CMPXCHG_CASE(w, , acq_4, , a, , "memory")
+-__CMPXCHG_CASE( , , acq_8, , a, , "memory")
+-__CMPXCHG_CASE(w, b, rel_1, , , l, "memory")
+-__CMPXCHG_CASE(w, h, rel_2, , , l, "memory")
+-__CMPXCHG_CASE(w, , rel_4, , , l, "memory")
+-__CMPXCHG_CASE( , , rel_8, , , l, "memory")
+-__CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory")
+-__CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory")
+-__CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory")
+-__CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory")
++__CMPXCHG_CASE(w, b, , 8, , , , )
++__CMPXCHG_CASE(w, h, , 16, , , , )
++__CMPXCHG_CASE(w, , , 32, , , , )
++__CMPXCHG_CASE( , , , 64, , , , )
++__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory")
++__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory")
++__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory")
++__CMPXCHG_CASE( , , acq_, 64, , a, , "memory")
++__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory")
++__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory")
++__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory")
++__CMPXCHG_CASE( , , rel_, 64, , , l, "memory")
++__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory")
++__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory")
++__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory")
++__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory")
+
+ #undef __CMPXCHG_CASE
+
+--- a/arch/arm64/include/asm/atomic_lse.h
++++ b/arch/arm64/include/asm/atomic_lse.h
+@@ -480,24 +480,24 @@ static inline long atomic64_dec_if_posit
+
+ #define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
+
+-#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
+-static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
+- unsigned long old, \
+- unsigned long new) \
++#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
++static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \
++ unsigned long old, \
++ u##sz new) \
+ { \
+ register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
+ register unsigned long x1 asm ("x1") = old; \
+- register unsigned long x2 asm ("x2") = new; \
++ register u##sz x2 asm ("x2") = new; \
+ \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
+ /* LL/SC */ \
+- __LL_SC_CMPXCHG(name) \
++ __LL_SC_CMPXCHG(name##sz) \
+ __nops(2), \
+ /* LSE atomics */ \
+ " mov " #w "30, %" #w "[old]\n" \
+- " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
++ " cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n" \
+ " mov %" #w "[ret], " #w "30") \
+ : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
+ : [old] "r" (x1), [new] "r" (x2) \
+@@ -506,22 +506,22 @@ static inline unsigned long __cmpxchg_ca
+ return x0; \
+ }
+
+-__CMPXCHG_CASE(w, b, 1, )
+-__CMPXCHG_CASE(w, h, 2, )
+-__CMPXCHG_CASE(w, , 4, )
+-__CMPXCHG_CASE(x, , 8, )
+-__CMPXCHG_CASE(w, b, acq_1, a, "memory")
+-__CMPXCHG_CASE(w, h, acq_2, a, "memory")
+-__CMPXCHG_CASE(w, , acq_4, a, "memory")
+-__CMPXCHG_CASE(x, , acq_8, a, "memory")
+-__CMPXCHG_CASE(w, b, rel_1, l, "memory")
+-__CMPXCHG_CASE(w, h, rel_2, l, "memory")
+-__CMPXCHG_CASE(w, , rel_4, l, "memory")
+-__CMPXCHG_CASE(x, , rel_8, l, "memory")
+-__CMPXCHG_CASE(w, b, mb_1, al, "memory")
+-__CMPXCHG_CASE(w, h, mb_2, al, "memory")
+-__CMPXCHG_CASE(w, , mb_4, al, "memory")
+-__CMPXCHG_CASE(x, , mb_8, al, "memory")
++__CMPXCHG_CASE(w, b, , 8, )
++__CMPXCHG_CASE(w, h, , 16, )
++__CMPXCHG_CASE(w, , , 32, )
++__CMPXCHG_CASE(x, , , 64, )
++__CMPXCHG_CASE(w, b, acq_, 8, a, "memory")
++__CMPXCHG_CASE(w, h, acq_, 16, a, "memory")
++__CMPXCHG_CASE(w, , acq_, 32, a, "memory")
++__CMPXCHG_CASE(x, , acq_, 64, a, "memory")
++__CMPXCHG_CASE(w, b, rel_, 8, l, "memory")
++__CMPXCHG_CASE(w, h, rel_, 16, l, "memory")
++__CMPXCHG_CASE(w, , rel_, 32, l, "memory")
++__CMPXCHG_CASE(x, , rel_, 64, l, "memory")
++__CMPXCHG_CASE(w, b, mb_, 8, al, "memory")
++__CMPXCHG_CASE(w, h, mb_, 16, al, "memory")
++__CMPXCHG_CASE(w, , mb_, 32, al, "memory")
++__CMPXCHG_CASE(x, , mb_, 64, al, "memory")
+
+ #undef __LL_SC_CMPXCHG
+ #undef __CMPXCHG_CASE
+--- a/arch/arm64/include/asm/cmpxchg.h
++++ b/arch/arm64/include/asm/cmpxchg.h
+@@ -30,46 +30,46 @@
+ * barrier case is generated as release+dmb for the former and
+ * acquire+release for the latter.
+ */
+-#define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl) \
+-static inline unsigned long __xchg_case_##name(unsigned long x, \
+- volatile void *ptr) \
+-{ \
+- unsigned long ret, tmp; \
+- \
+- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+- /* LL/SC */ \
+- " prfm pstl1strm, %2\n" \
+- "1: ld" #acq "xr" #sz "\t%" #w "0, %2\n" \
+- " st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n" \
+- " cbnz %w1, 1b\n" \
+- " " #mb, \
+- /* LSE atomics */ \
+- " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
+- __nops(3) \
+- " " #nop_lse) \
+- : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \
+- : "r" (x) \
+- : cl); \
+- \
+- return ret; \
++#define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \
++static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr) \
++{ \
++ u##sz ret; \
++ unsigned long tmp; \
++ \
++ asm volatile(ARM64_LSE_ATOMIC_INSN( \
++ /* LL/SC */ \
++ " prfm pstl1strm, %2\n" \
++ "1: ld" #acq "xr" #sfx "\t%" #w "0, %2\n" \
++ " st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n" \
++ " cbnz %w1, 1b\n" \
++ " " #mb, \
++ /* LSE atomics */ \
++ " swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n" \
++ __nops(3) \
++ " " #nop_lse) \
++ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr) \
++ : "r" (x) \
++ : cl); \
++ \
++ return ret; \
+ }
+
+-__XCHG_CASE(w, b, 1, , , , , , )
+-__XCHG_CASE(w, h, 2, , , , , , )
+-__XCHG_CASE(w, , 4, , , , , , )
+-__XCHG_CASE( , , 8, , , , , , )
+-__XCHG_CASE(w, b, acq_1, , , a, a, , "memory")
+-__XCHG_CASE(w, h, acq_2, , , a, a, , "memory")
+-__XCHG_CASE(w, , acq_4, , , a, a, , "memory")
+-__XCHG_CASE( , , acq_8, , , a, a, , "memory")
+-__XCHG_CASE(w, b, rel_1, , , , , l, "memory")
+-__XCHG_CASE(w, h, rel_2, , , , , l, "memory")
+-__XCHG_CASE(w, , rel_4, , , , , l, "memory")
+-__XCHG_CASE( , , rel_8, , , , , l, "memory")
+-__XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory")
+-__XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory")
+-__XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory")
+-__XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory")
++__XCHG_CASE(w, b, , 8, , , , , , )
++__XCHG_CASE(w, h, , 16, , , , , , )
++__XCHG_CASE(w, , , 32, , , , , , )
++__XCHG_CASE( , , , 64, , , , , , )
++__XCHG_CASE(w, b, acq_, 8, , , a, a, , "memory")
++__XCHG_CASE(w, h, acq_, 16, , , a, a, , "memory")
++__XCHG_CASE(w, , acq_, 32, , , a, a, , "memory")
++__XCHG_CASE( , , acq_, 64, , , a, a, , "memory")
++__XCHG_CASE(w, b, rel_, 8, , , , , l, "memory")
++__XCHG_CASE(w, h, rel_, 16, , , , , l, "memory")
++__XCHG_CASE(w, , rel_, 32, , , , , l, "memory")
++__XCHG_CASE( , , rel_, 64, , , , , l, "memory")
++__XCHG_CASE(w, b, mb_, 8, dmb ish, nop, , a, l, "memory")
++__XCHG_CASE(w, h, mb_, 16, dmb ish, nop, , a, l, "memory")
++__XCHG_CASE(w, , mb_, 32, dmb ish, nop, , a, l, "memory")
++__XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory")
+
+ #undef __XCHG_CASE
+
+@@ -80,13 +80,13 @@ static __always_inline unsigned long __
+ { \
+ switch (size) { \
+ case 1: \
+- return __xchg_case##sfx##_1(x, ptr); \
++ return __xchg_case##sfx##_8(x, ptr); \
+ case 2: \
+- return __xchg_case##sfx##_2(x, ptr); \
++ return __xchg_case##sfx##_16(x, ptr); \
+ case 4: \
+- return __xchg_case##sfx##_4(x, ptr); \
++ return __xchg_case##sfx##_32(x, ptr); \
+ case 8: \
+- return __xchg_case##sfx##_8(x, ptr); \
++ return __xchg_case##sfx##_64(x, ptr); \
+ default: \
+ BUILD_BUG(); \
+ } \
+@@ -123,13 +123,13 @@ static __always_inline unsigned long __c
+ { \
+ switch (size) { \
+ case 1: \
+- return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \
++ return __cmpxchg_case##sfx##_8(ptr, (u8)old, new); \
+ case 2: \
+- return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \
++ return __cmpxchg_case##sfx##_16(ptr, (u16)old, new); \
+ case 4: \
+- return __cmpxchg_case##sfx##_4(ptr, old, new); \
++ return __cmpxchg_case##sfx##_32(ptr, old, new); \
+ case 8: \
+- return __cmpxchg_case##sfx##_8(ptr, old, new); \
++ return __cmpxchg_case##sfx##_64(ptr, old, new); \
+ default: \
+ BUILD_BUG(); \
+ } \
+@@ -197,16 +197,16 @@ __CMPXCHG_GEN(_mb)
+ __ret; \
+ })
+
+-#define __CMPWAIT_CASE(w, sz, name) \
+-static inline void __cmpwait_case_##name(volatile void *ptr, \
+- unsigned long val) \
++#define __CMPWAIT_CASE(w, sfx, sz) \
++static inline void __cmpwait_case_##sz(volatile void *ptr, \
++ unsigned long val) \
+ { \
+ unsigned long tmp; \
+ \
+ asm volatile( \
+ " sevl\n" \
+ " wfe\n" \
+- " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
++ " ldxr" #sfx "\t%" #w "[tmp], %[v]\n" \
+ " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
+ " cbnz %" #w "[tmp], 1f\n" \
+ " wfe\n" \
+@@ -215,10 +215,10 @@ static inline void __cmpwait_case_##name
+ : [val] "r" (val)); \
+ }
+
+-__CMPWAIT_CASE(w, b, 1);
+-__CMPWAIT_CASE(w, h, 2);
+-__CMPWAIT_CASE(w, , 4);
+-__CMPWAIT_CASE( , , 8);
++__CMPWAIT_CASE(w, b, 8);
++__CMPWAIT_CASE(w, h, 16);
++__CMPWAIT_CASE(w, , 32);
++__CMPWAIT_CASE( , , 64);
+
+ #undef __CMPWAIT_CASE
+
+@@ -229,13 +229,13 @@ static __always_inline void __cmpwait##s
+ { \
+ switch (size) { \
+ case 1: \
+- return __cmpwait_case##sfx##_1(ptr, (u8)val); \
++ return __cmpwait_case##sfx##_8(ptr, (u8)val); \
+ case 2: \
+- return __cmpwait_case##sfx##_2(ptr, (u16)val); \
++ return __cmpwait_case##sfx##_16(ptr, (u16)val); \
+ case 4: \
+- return __cmpwait_case##sfx##_4(ptr, val); \
++ return __cmpwait_case##sfx##_32(ptr, val); \
+ case 8: \
+- return __cmpwait_case##sfx##_8(ptr, val); \
++ return __cmpwait_case##sfx##_64(ptr, val); \
+ default: \
+ BUILD_BUG(); \
+ } \
--- /dev/null
+From foo@baz Thu Mar 4 02:48:46 PM CET 2021
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 18 Sep 2018 09:39:55 +0100
+Subject: arm64: cmpxchg: Use "K" instead of "L" for ll/sc immediate constraint
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 4230509978f2921182da4e9197964dccdbe463c3 upstream.
+
+The "L" AArch64 machine constraint, which we use for the "old" value in
+an LL/SC cmpxchg(), generates an immediate that is suitable for a 64-bit
+logical instruction. However, for cmpxchg() operations on types smaller
+than 64 bits, this constraint can result in an invalid instruction which
+is correctly rejected by GAS, such as EOR W1, W1, #0xffffffff.
+
+Whilst we could special-case the constraint based on the cmpxchg size,
+it's far easier to change the constraint to "K" and put up with using
+a register for large 64-bit immediates. For out-of-line LL/SC atomics,
+this is all moot anyway.
+
+Reported-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/atomic_ll_sc.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/atomic_ll_sc.h
++++ b/arch/arm64/include/asm/atomic_ll_sc.h
+@@ -268,7 +268,7 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz
+ "2:" \
+ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
+ [v] "+Q" (*(u##sz *)ptr) \
+- : [old] "Lr" (old), [new] "r" (new) \
++ : [old] "Kr" (old), [new] "r" (new) \
+ : cl); \
+ \
+ return oldval; \
--- /dev/null
+From f5c6d0fcf90ce07ee0d686d465b19b247ebd5ed7 Mon Sep 17 00:00:00 2001
+From: Shaoying Xu <shaoyi@amazon.com>
+Date: Tue, 16 Feb 2021 18:32:34 +0000
+Subject: arm64 module: set plt* section addresses to 0x0
+
+From: Shaoying Xu <shaoyi@amazon.com>
+
+commit f5c6d0fcf90ce07ee0d686d465b19b247ebd5ed7 upstream.
+
+These plt* and .text.ftrace_trampoline sections specified for arm64 have
+non-zero addressses. Non-zero section addresses in a relocatable ELF would
+confuse GDB when it tries to compute the section offsets and it ends up
+printing wrong symbol addresses. Therefore, set them to zero, which mirrors
+the change in commit 5d8591bc0fba ("module: set ksymtab/kcrctab* section
+addresses to 0x0").
+
+Reported-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Shaoying Xu <shaoyi@amazon.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20210216183234.GA23876@amazon.com
+Signed-off-by: Will Deacon <will@kernel.org>
+[shaoyi@amazon.com: made same changes in arch/arm64/kernel/module.lds for 5.4]
+Signed-off-by: Shaoying Xu <shaoyi@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+arch/arm64/include/asm/module.lds.h was renamed from arch/arm64/kernel/module.lds
+by commit 596b0474d3d9 ("kbuild: preprocess module linker script") since v5.10.
+Therefore, made same changes in arch/arm64/kernel/module.lds for 5.4.
+
+ arch/arm64/kernel/module.lds | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/kernel/module.lds
++++ b/arch/arm64/kernel/module.lds
+@@ -1,5 +1,5 @@
+ SECTIONS {
+- .plt (NOLOAD) : { BYTE(0) }
+- .init.plt (NOLOAD) : { BYTE(0) }
+- .text.ftrace_trampoline (NOLOAD) : { BYTE(0) }
++ .plt 0 (NOLOAD) : { BYTE(0) }
++ .init.plt 0 (NOLOAD) : { BYTE(0) }
++ .text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) }
+ }
--- /dev/null
+From foo@baz Thu Mar 4 02:48:46 PM CET 2021
+From: Andrew Murray <andrew.murray@arm.com>
+Date: Wed, 28 Aug 2019 18:50:06 +0100
+Subject: arm64: Use correct ll/sc atomic constraints
+
+From: Andrew Murray <andrew.murray@arm.com>
+
+commit 580fa1b874711d633f9b145b7777b0e83ebf3787 upstream.
+
+The A64 ISA accepts distinct (but overlapping) ranges of immediates for:
+
+ * add arithmetic instructions ('I' machine constraint)
+ * sub arithmetic instructions ('J' machine constraint)
+ * 32-bit logical instructions ('K' machine constraint)
+ * 64-bit logical instructions ('L' machine constraint)
+
+... but we currently use the 'I' constraint for many atomic operations
+using sub or logical instructions, which is not always valid.
+
+When CONFIG_ARM64_LSE_ATOMICS is not set, this allows invalid immediates
+to be passed to instructions, potentially resulting in a build failure.
+When CONFIG_ARM64_LSE_ATOMICS is selected the out-of-line ll/sc atomics
+always use a register as they have no visibility of the value passed by
+the caller.
+
+This patch adds a constraint parameter to the ATOMIC_xx and
+__CMPXCHG_CASE macros so that we can pass appropriate constraints for
+each case, with uses updated accordingly.
+
+Unfortunately prior to GCC 8.1.0 the 'K' constraint erroneously accepted
+'4294967295', so we must instead force the use of a register.
+
+Signed-off-by: Andrew Murray <andrew.murray@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+[bwh: Backported to 4.19: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/atomic_ll_sc.h | 89 +++++++++++++++++-----------------
+ 1 file changed, 47 insertions(+), 42 deletions(-)
+
+--- a/arch/arm64/include/asm/atomic_ll_sc.h
++++ b/arch/arm64/include/asm/atomic_ll_sc.h
+@@ -37,7 +37,7 @@
+ * (the optimize attribute silently ignores these options).
+ */
+
+-#define ATOMIC_OP(op, asm_op) \
++#define ATOMIC_OP(op, asm_op, constraint) \
+ __LL_SC_INLINE void \
+ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
+ { \
+@@ -51,11 +51,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic
+ " stxr %w1, %w0, %2\n" \
+ " cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+- : "Ir" (i)); \
++ : #constraint "r" (i)); \
+ } \
+ __LL_SC_EXPORT(atomic_##op);
+
+-#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
++#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
+ __LL_SC_INLINE int \
+ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
+ { \
+@@ -70,14 +70,14 @@ __LL_SC_PREFIX(atomic_##op##_return##nam
+ " cbnz %w1, 1b\n" \
+ " " #mb \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+- : "Ir" (i) \
++ : #constraint "r" (i) \
+ : cl); \
+ \
+ return result; \
+ } \
+ __LL_SC_EXPORT(atomic_##op##_return##name);
+
+-#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
++#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
+ __LL_SC_INLINE int \
+ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
+ { \
+@@ -92,7 +92,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(i
+ " cbnz %w2, 1b\n" \
+ " " #mb \
+ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
+- : "Ir" (i) \
++ : #constraint "r" (i) \
+ : cl); \
+ \
+ return result; \
+@@ -110,8 +110,8 @@ __LL_SC_EXPORT(atomic_fetch_##op##name);
+ ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
+ ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
+
+-ATOMIC_OPS(add, add)
+-ATOMIC_OPS(sub, sub)
++ATOMIC_OPS(add, add, I)
++ATOMIC_OPS(sub, sub, J)
+
+ #undef ATOMIC_OPS
+ #define ATOMIC_OPS(...) \
+@@ -121,17 +121,17 @@ ATOMIC_OPS(sub, sub)
+ ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
+ ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
+
+-ATOMIC_OPS(and, and)
+-ATOMIC_OPS(andnot, bic)
+-ATOMIC_OPS(or, orr)
+-ATOMIC_OPS(xor, eor)
++ATOMIC_OPS(and, and, )
++ATOMIC_OPS(andnot, bic, )
++ATOMIC_OPS(or, orr, )
++ATOMIC_OPS(xor, eor, )
+
+ #undef ATOMIC_OPS
+ #undef ATOMIC_FETCH_OP
+ #undef ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
+
+-#define ATOMIC64_OP(op, asm_op) \
++#define ATOMIC64_OP(op, asm_op, constraint) \
+ __LL_SC_INLINE void \
+ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
+ { \
+@@ -145,11 +145,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, ato
+ " stxr %w1, %0, %2\n" \
+ " cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+- : "Ir" (i)); \
++ : #constraint "r" (i)); \
+ } \
+ __LL_SC_EXPORT(atomic64_##op);
+
+-#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
++#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
+ __LL_SC_INLINE long \
+ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
+ { \
+@@ -164,14 +164,14 @@ __LL_SC_PREFIX(atomic64_##op##_return##n
+ " cbnz %w1, 1b\n" \
+ " " #mb \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+- : "Ir" (i) \
++ : #constraint "r" (i) \
+ : cl); \
+ \
+ return result; \
+ } \
+ __LL_SC_EXPORT(atomic64_##op##_return##name);
+
+-#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
++#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
+ __LL_SC_INLINE long \
+ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
+ { \
+@@ -186,7 +186,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name
+ " cbnz %w2, 1b\n" \
+ " " #mb \
+ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
+- : "Ir" (i) \
++ : #constraint "r" (i) \
+ : cl); \
+ \
+ return result; \
+@@ -204,8 +204,8 @@ __LL_SC_EXPORT(atomic64_fetch_##op##name
+ ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
+ ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
+
+-ATOMIC64_OPS(add, add)
+-ATOMIC64_OPS(sub, sub)
++ATOMIC64_OPS(add, add, I)
++ATOMIC64_OPS(sub, sub, J)
+
+ #undef ATOMIC64_OPS
+ #define ATOMIC64_OPS(...) \
+@@ -215,10 +215,10 @@ ATOMIC64_OPS(sub, sub)
+ ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
+ ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
+
+-ATOMIC64_OPS(and, and)
+-ATOMIC64_OPS(andnot, bic)
+-ATOMIC64_OPS(or, orr)
+-ATOMIC64_OPS(xor, eor)
++ATOMIC64_OPS(and, and, L)
++ATOMIC64_OPS(andnot, bic, )
++ATOMIC64_OPS(or, orr, L)
++ATOMIC64_OPS(xor, eor, L)
+
+ #undef ATOMIC64_OPS
+ #undef ATOMIC64_FETCH_OP
+@@ -248,7 +248,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(
+ }
+ __LL_SC_EXPORT(atomic64_dec_if_positive);
+
+-#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \
++#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \
+ __LL_SC_INLINE u##sz \
+ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
+ unsigned long old, \
+@@ -268,29 +268,34 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz
+ "2:" \
+ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
+ [v] "+Q" (*(u##sz *)ptr) \
+- : [old] "Kr" (old), [new] "r" (new) \
++ : [old] #constraint "r" (old), [new] "r" (new) \
+ : cl); \
+ \
+ return oldval; \
+ } \
+ __LL_SC_EXPORT(__cmpxchg_case_##name##sz);
+
+-__CMPXCHG_CASE(w, b, , 8, , , , )
+-__CMPXCHG_CASE(w, h, , 16, , , , )
+-__CMPXCHG_CASE(w, , , 32, , , , )
+-__CMPXCHG_CASE( , , , 64, , , , )
+-__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory")
+-__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory")
+-__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory")
+-__CMPXCHG_CASE( , , acq_, 64, , a, , "memory")
+-__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory")
+-__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory")
+-__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory")
+-__CMPXCHG_CASE( , , rel_, 64, , , l, "memory")
+-__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory")
+-__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory")
+-__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory")
+-__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory")
++/*
++ * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly
++ * handle the 'K' constraint for the value 4294967295 - thus we use no
++ * constraint for 32 bit operations.
++ */
++__CMPXCHG_CASE(w, b, , 8, , , , , )
++__CMPXCHG_CASE(w, h, , 16, , , , , )
++__CMPXCHG_CASE(w, , , 32, , , , , )
++__CMPXCHG_CASE( , , , 64, , , , , L)
++__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", )
++__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", )
++__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", )
++__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L)
++__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", )
++__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", )
++__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", )
++__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L)
++__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", )
++__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", )
++__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", )
++__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
+
+ #undef __CMPXCHG_CASE
+
--- /dev/null
+From ea86f3defd55f141a44146e66cbf8ffb683d60da Mon Sep 17 00:00:00 2001
+From: Sergey Senozhatsky <senozhatsky@chromium.org>
+Date: Thu, 5 Nov 2020 10:47:44 +0900
+Subject: drm/virtio: use kvmalloc for large allocations
+
+From: Sergey Senozhatsky <senozhatsky@chromium.org>
+
+commit ea86f3defd55f141a44146e66cbf8ffb683d60da upstream.
+
+We observed that some of virtio_gpu_object_shmem_init() allocations
+can be rather costly - order 6 - which can be difficult to fulfill
+under memory pressure conditions. Switch to kvmalloc_array() in
+virtio_gpu_object_shmem_init() and let the kernel vmalloc the entries
+array.
+
+Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Link: http://patchwork.freedesktop.org/patch/msgid/20201105014744.1662226-1-senozhatsky@chromium.org
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Signed-off-by: Doug Horn <doughorn@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/virtio/virtgpu_vq.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
+@@ -868,9 +868,9 @@ int virtio_gpu_object_attach(struct virt
+ }
+
+ /* gets freed when the ring has consumed it */
+- ents = kmalloc_array(obj->pages->nents,
+- sizeof(struct virtio_gpu_mem_entry),
+- GFP_KERNEL);
++ ents = kvmalloc_array(obj->pages->nents,
++ sizeof(struct virtio_gpu_mem_entry),
++ GFP_KERNEL);
+ if (!ents) {
+ DRM_ERROR("failed to allocate ent list\n");
+ return -ENOMEM;
--- /dev/null
+From dbfee5aee7e54f83d96ceb8e3e80717fac62ad63 Mon Sep 17 00:00:00 2001
+From: Mike Kravetz <mike.kravetz@oracle.com>
+Date: Wed, 24 Feb 2021 12:07:50 -0800
+Subject: hugetlb: fix update_and_free_page contig page struct assumption
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mike Kravetz <mike.kravetz@oracle.com>
+
+commit dbfee5aee7e54f83d96ceb8e3e80717fac62ad63 upstream.
+
+page structs are not guaranteed to be contiguous for gigantic pages. The
+routine update_and_free_page can encounter a gigantic page, yet it assumes
+page structs are contiguous when setting page flags in subpages.
+
+If update_and_free_page encounters non-contiguous page structs, we can see
+“BUG: Bad page state in process …” errors.
+
+Non-contiguous page structs are generally not an issue. However, they can
+exist with a specific kernel configuration and hotplug operations. For
+example: Configure the kernel with CONFIG_SPARSEMEM and
+!CONFIG_SPARSEMEM_VMEMMAP. Then, hotplug add memory for the area where
+the gigantic page will be allocated. Zi Yan outlined steps to reproduce
+here [1].
+
+[1] https://lore.kernel.org/linux-mm/16F7C58B-4D79-41C5-9B64-A1A1628F4AF2@nvidia.com/
+
+Link: https://lkml.kernel.org/r/20210217184926.33567-1-mike.kravetz@oracle.com
+Fixes: 944d9fec8d7a ("hugetlb: add support for gigantic page allocation at runtime")
+Signed-off-by: Zi Yan <ziy@nvidia.com>
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: Davidlohr Bueso <dbueso@suse.de>
+Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Joao Martins <joao.m.martins@oracle.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+---
+ mm/hugetlb.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1171,14 +1171,16 @@ static inline void destroy_compound_giga
+ static void update_and_free_page(struct hstate *h, struct page *page)
+ {
+ int i;
++ struct page *subpage = page;
+
+ if (hstate_is_gigantic(h) && !gigantic_page_supported())
+ return;
+
+ h->nr_huge_pages--;
+ h->nr_huge_pages_node[page_to_nid(page)]--;
+- for (i = 0; i < pages_per_huge_page(h); i++) {
+- page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
++ for (i = 0; i < pages_per_huge_page(h);
++ i++, subpage = mem_map_next(subpage, page, i)) {
++ subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
+ 1 << PG_referenced | 1 << PG_dirty |
+ 1 << PG_active | 1 << PG_private |
+ 1 << PG_writeback);
--- /dev/null
+From foo@baz Thu Mar 4 02:51:21 PM CET 2021
+From: Nathan Chancellor <natechancellor@gmail.com>
+Date: Fri, 15 Jan 2021 12:26:22 -0700
+Subject: MIPS: VDSO: Use CLANG_FLAGS instead of filtering out '--target='
+
+From: Nathan Chancellor <natechancellor@gmail.com>
+
+commit 76d7fff22be3e4185ee5f9da2eecbd8188e76b2c upstream.
+
+Commit ee67855ecd9d ("MIPS: vdso: Allow clang's --target flag in VDSO
+cflags") allowed the '--target=' flag from the main Makefile to filter
+through to the vDSO. However, it did not bring any of the other clang
+specific flags for controlling the integrated assembler and the GNU
+tools locations (--prefix=, --gcc-toolchain=, and -no-integrated-as).
+Without these, we will get a warning (visible with tinyconfig):
+
+arch/mips/vdso/elf.S:14:1: warning: DWARF2 only supports one section per
+compilation unit
+.pushsection .note.Linux, "a",@note ; .balign 4 ; .long 2f - 1f ; .long
+4484f - 3f ; .long 0 ; 1:.asciz "Linux" ; 2:.balign 4 ; 3:
+^
+arch/mips/vdso/elf.S:34:2: warning: DWARF2 only supports one section per
+compilation unit
+ .section .mips_abiflags, "a"
+ ^
+
+All of these flags are bundled up under CLANG_FLAGS in the main Makefile
+and exported so that they can be added to Makefiles that set their own
+CFLAGS. Use this value instead of filtering out '--target=' so there is
+no warning and all of the tools are properly used.
+
+Cc: stable@vger.kernel.org
+Fixes: ee67855ecd9d ("MIPS: vdso: Allow clang's --target flag in VDSO cflags")
+Link: https://github.com/ClangBuiltLinux/linux/issues/1256
+Reported-by: Anders Roxell <anders.roxell@linaro.org>
+Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
+Tested-by: Anders Roxell <anders.roxell@linaro.org>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+[nc: Fix conflict due to lack of 99570c3da96a and 076f421da5d4 in 4.19]
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/mips/vdso/Makefile | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/arch/mips/vdso/Makefile
++++ b/arch/mips/vdso/Makefile
+@@ -10,12 +10,9 @@ ccflags-vdso := \
+ $(filter -march=%,$(KBUILD_CFLAGS)) \
+ $(filter -m%-float,$(KBUILD_CFLAGS)) \
+ $(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \
++ $(CLANG_FLAGS) \
+ -D__VDSO__
+
+-ifeq ($(cc-name),clang)
+-ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS))
+-endif
+-
+ cflags-vdso := $(ccflags-vdso) \
+ $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
+ -O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
--- /dev/null
+From 88eee9b7b42e69fb622ddb3ff6f37e8e4347f5b2 Mon Sep 17 00:00:00 2001
+From: Lech Perczak <lech.perczak@gmail.com>
+Date: Tue, 23 Feb 2021 19:34:56 +0100
+Subject: net: usb: qmi_wwan: support ZTE P685M modem
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lech Perczak <lech.perczak@gmail.com>
+
+commit 88eee9b7b42e69fb622ddb3ff6f37e8e4347f5b2 upstream.
+
+Now that interface 3 in "option" driver is no longer mapped, add device
+ID matching it to qmi_wwan.
+
+The modem is used inside ZTE MF283+ router and carriers identify it as
+such.
+Interface mapping is:
+0: QCDM, 1: AT (PCUI), 2: AT (Modem), 3: QMI, 4: ADB
+
+T: Bus=02 Lev=02 Prnt=02 Port=05 Cnt=01 Dev#= 3 Spd=480 MxCh= 0
+D: Ver= 2.01 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1
+P: Vendor=19d2 ProdID=1275 Rev=f0.00
+S: Manufacturer=ZTE,Incorporated
+S: Product=ZTE Technologies MSM
+S: SerialNumber=P685M510ZTED0000CP&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&0
+C:* #Ifs= 5 Cfg#= 1 Atr=a0 MxPwr=500mA
+I:* If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option
+E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 1 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=83(I) Atr=03(Int.) MxPS= 10 Ivl=32ms
+E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+E: Ad=85(I) Atr=03(Int.) MxPS= 10 Ivl=32ms
+E: Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan
+E: Ad=87(I) Atr=03(Int.) MxPS= 8 Ivl=32ms
+E: Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+I:* If#= 4 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=(none)
+E: Ad=88(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+E: Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: Lech Perczak <lech.perczak@gmail.com>
+Link: https://lore.kernel.org/r/20210223183456.6377-1-lech.perczak@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1217,6 +1217,7 @@ static const struct usb_device_id produc
+ {QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */
++ {QMI_FIXED_INTF(0x19d2, 0x1275, 3)}, /* ZTE P685M */
+ {QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
+ {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
+ {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
--- /dev/null
+From 182f709c5cff683e6732d04c78e328de0532284f Mon Sep 17 00:00:00 2001
+From: Cornelia Huck <cohuck@redhat.com>
+Date: Tue, 16 Feb 2021 12:06:45 +0100
+Subject: virtio/s390: implement virtio-ccw revision 2 correctly
+
+From: Cornelia Huck <cohuck@redhat.com>
+
+commit 182f709c5cff683e6732d04c78e328de0532284f upstream.
+
+CCW_CMD_READ_STATUS was introduced with revision 2 of virtio-ccw,
+and drivers should only rely on it being implemented when they
+negotiated at least that revision with the device.
+
+However, virtio_ccw_get_status() issued READ_STATUS for any
+device operating at least at revision 1. If the device accepts
+READ_STATUS regardless of the negotiated revision (which some
+implementations like QEMU do, even though the spec currently does
+not allow it), everything works as intended. While a device
+rejecting the command should also be handled gracefully, we will
+not be able to see any changes the device makes to the status,
+such as setting NEEDS_RESET or setting the status to zero after
+a completed reset.
+
+We negotiated the revision to at most 1, as we never bumped the
+maximum revision; let's do that now and properly send READ_STATUS
+only if we are operating at least at revision 2.
+
+Cc: stable@vger.kernel.org
+Fixes: 7d3ce5ab9430 ("virtio/s390: support READ_STATUS command for virtio-ccw")
+Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
+Signed-off-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Link: https://lore.kernel.org/r/20210216110645.1087321-1-cohuck@redhat.com
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/virtio/virtio_ccw.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -103,7 +103,7 @@ struct virtio_rev_info {
+ };
+
+ /* the highest virtio-ccw revision we support */
+-#define VIRTIO_CCW_REV_MAX 1
++#define VIRTIO_CCW_REV_MAX 2
+
+ struct virtio_ccw_vq_info {
+ struct virtqueue *vq;
+@@ -911,7 +911,7 @@ static u8 virtio_ccw_get_status(struct v
+ u8 old_status = *vcdev->status;
+ struct ccw1 *ccw;
+
+- if (vcdev->revision < 1)
++ if (vcdev->revision < 2)
+ return *vcdev->status;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);