From: Greg Kroah-Hartman Date: Sun, 23 Aug 2020 11:32:30 +0000 (+0200) Subject: 4.9-stable patches X-Git-Tag: v4.4.234~56 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=3ea0b532d1ef76e256a87f9a611b5fb71d7c1204;p=thirdparty%2Fkernel%2Fstable-queue.git 4.9-stable patches added patches: x86-asm-add-instruction-suffixes-to-bitops.patch x86-asm-remove-unnecessary-n-t-in-front-of-cc_set-from-asm-templates.patch --- diff --git a/queue-4.9/series b/queue-4.9/series index d14f0d1657d..58da9b584cd 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -1,3 +1,5 @@ +x86-asm-remove-unnecessary-n-t-in-front-of-cc_set-from-asm-templates.patch +x86-asm-add-instruction-suffixes-to-bitops.patch drm-imx-imx-ldb-disable-both-channels-for-split-mode.patch perf-probe-fix-memory-leakage-when-the-probe-point-i.patch tracing-clean-up-the-hwlat-binding-code.patch diff --git a/queue-4.9/x86-asm-add-instruction-suffixes-to-bitops.patch b/queue-4.9/x86-asm-add-instruction-suffixes-to-bitops.patch new file mode 100644 index 00000000000..a2dc9a07613 --- /dev/null +++ b/queue-4.9/x86-asm-add-instruction-suffixes-to-bitops.patch @@ -0,0 +1,158 @@ +From 22636f8c9511245cb3c8412039f1dd95afb3aa59 Mon Sep 17 00:00:00 2001 +From: Jan Beulich +Date: Mon, 26 Feb 2018 04:11:51 -0700 +Subject: x86/asm: Add instruction suffixes to bitops + +From: Jan Beulich + +commit 22636f8c9511245cb3c8412039f1dd95afb3aa59 upstream. + +Omitting suffixes from instructions in AT&T mode is bad practice when +operand size cannot be determined by the assembler from register +operands, and is likely going to be warned about by upstream gas in the +future (mine does already). Add the missing suffixes here. Note that for +64-bit this means some operations change from being 32-bit to 64-bit. + +Signed-off-by: Jan Beulich +Signed-off-by: Thomas Gleixner +Link: https://lkml.kernel.org/r/5A93F98702000078001ABACC@prv-mh.provo.novell.com +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/include/asm/bitops.h | 29 ++++++++++++++++------------- + arch/x86/include/asm/percpu.h | 2 +- + 2 files changed, 17 insertions(+), 14 deletions(-) + +--- a/arch/x86/include/asm/bitops.h ++++ b/arch/x86/include/asm/bitops.h +@@ -77,7 +77,7 @@ set_bit(long nr, volatile unsigned long + : "iq" ((u8)CONST_MASK(nr)) + : "memory"); + } else { +- asm volatile(LOCK_PREFIX "bts %1,%0" ++ asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" + : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); + } + } +@@ -93,7 +93,7 @@ set_bit(long nr, volatile unsigned long + */ + static __always_inline void __set_bit(long nr, volatile unsigned long *addr) + { +- asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); ++ asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory"); + } + + /** +@@ -114,7 +114,7 @@ clear_bit(long nr, volatile unsigned lon + : CONST_MASK_ADDR(nr, addr) + : "iq" ((u8)~CONST_MASK(nr))); + } else { +- asm volatile(LOCK_PREFIX "btr %1,%0" ++ asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" + : BITOP_ADDR(addr) + : "Ir" (nr)); + } +@@ -136,7 +136,7 @@ static __always_inline void clear_bit_un + + static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) + { +- asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); ++ asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr)); + } + + /* +@@ -168,7 +168,7 @@ static __always_inline void __clear_bit_ + */ + static __always_inline void __change_bit(long nr, volatile unsigned long *addr) + { +- asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); ++ asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr)); + } + + /** +@@ -187,7 +187,7 @@ static __always_inline void change_bit(l + : CONST_MASK_ADDR(nr, addr) + : "iq" ((u8)CONST_MASK(nr))); + } else { +- asm volatile(LOCK_PREFIX "btc %1,%0" ++ asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" + : BITOP_ADDR(addr) + : "Ir" (nr)); + } +@@ -203,7 +203,8 @@ static __always_inline void change_bit(l + */ + static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) + { +- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c); ++ GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), ++ *addr, "Ir", nr, "%0", c); + } + + /** +@@ -232,7 +233,7 @@ static __always_inline bool __test_and_s + { + bool oldbit; + +- asm("bts %2,%1" ++ asm(__ASM_SIZE(bts) " %2,%1" + CC_SET(c) + : CC_OUT(c) (oldbit), ADDR + : "Ir" (nr)); +@@ -249,7 +250,8 @@ static __always_inline bool __test_and_s + */ + static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) + { +- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c); ++ GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), ++ *addr, "Ir", nr, "%0", c); + } + + /** +@@ -272,7 +274,7 @@ static __always_inline bool __test_and_c + { + bool oldbit; + +- asm volatile("btr %2,%1" ++ asm volatile(__ASM_SIZE(btr) " %2,%1" + CC_SET(c) + : CC_OUT(c) (oldbit), ADDR + : "Ir" (nr)); +@@ -284,7 +286,7 @@ static __always_inline bool __test_and_c + { + bool oldbit; + +- asm volatile("btc %2,%1" ++ asm volatile(__ASM_SIZE(btc) " %2,%1" + CC_SET(c) + : CC_OUT(c) (oldbit), ADDR + : "Ir" (nr) : "memory"); +@@ -302,7 +304,8 @@ static __always_inline bool __test_and_c + */ + static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) + { +- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c); ++ GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), ++ *addr, "Ir", nr, "%0", c); + } + + static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) +@@ -315,7 +318,7 @@ static __always_inline bool variable_tes + { + bool oldbit; + +- asm volatile("bt %2,%1" ++ asm volatile(__ASM_SIZE(bt) " %2,%1" + CC_SET(c) + : CC_OUT(c) (oldbit) + : "m" (*(unsigned long *)addr), "Ir" (nr)); +--- a/arch/x86/include/asm/percpu.h ++++ b/arch/x86/include/asm/percpu.h +@@ -536,7 +536,7 @@ static inline bool x86_this_cpu_variable + { + bool oldbit; + +- asm volatile("bt "__percpu_arg(2)",%1" ++ asm volatile("btl "__percpu_arg(2)",%1" + CC_SET(c) + : CC_OUT(c) (oldbit) + : "m" (*(unsigned long __percpu *)addr), "Ir" (nr)); diff --git a/queue-4.9/x86-asm-remove-unnecessary-n-t-in-front-of-cc_set-from-asm-templates.patch b/queue-4.9/x86-asm-remove-unnecessary-n-t-in-front-of-cc_set-from-asm-templates.patch new file mode 100644 index 00000000000..b686f960d89 --- /dev/null +++ b/queue-4.9/x86-asm-remove-unnecessary-n-t-in-front-of-cc_set-from-asm-templates.patch @@ -0,0 +1,112 @@ +From 3c52b5c64326d9dcfee4e10611c53ec1b1b20675 Mon Sep 17 00:00:00 2001 +From: Uros Bizjak +Date: Wed, 6 Sep 2017 17:18:08 +0200 +Subject: x86/asm: Remove unnecessary \n\t in front of CC_SET() from asm templates + +From: Uros Bizjak + +commit 3c52b5c64326d9dcfee4e10611c53ec1b1b20675 upstream. + +There is no need for \n\t in front of CC_SET(), as the macro already includes these two. + +Signed-off-by: Uros Bizjak +Cc: Linus Torvalds +Cc: Peter Zijlstra +Cc: Thomas Gleixner +Link: http://lkml.kernel.org/r/20170906151808.5634-1-ubizjak@gmail.com +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/include/asm/archrandom.h | 8 ++++---- + arch/x86/include/asm/bitops.h | 8 ++++---- + arch/x86/include/asm/percpu.h | 2 +- + 3 files changed, 9 insertions(+), 9 deletions(-) + +--- a/arch/x86/include/asm/archrandom.h ++++ b/arch/x86/include/asm/archrandom.h +@@ -45,7 +45,7 @@ static inline bool rdrand_long(unsigned + bool ok; + unsigned int retry = RDRAND_RETRY_LOOPS; + do { +- asm volatile(RDRAND_LONG "\n\t" ++ asm volatile(RDRAND_LONG + CC_SET(c) + : CC_OUT(c) (ok), "=a" (*v)); + if (ok) +@@ -59,7 +59,7 @@ static inline bool rdrand_int(unsigned i + bool ok; + unsigned int retry = RDRAND_RETRY_LOOPS; + do { +- asm volatile(RDRAND_INT "\n\t" ++ asm volatile(RDRAND_INT + CC_SET(c) + : CC_OUT(c) (ok), "=a" (*v)); + if (ok) +@@ -71,7 +71,7 @@ static inline bool rdrand_int(unsigned i + static inline bool rdseed_long(unsigned long *v) + { + bool ok; +- asm volatile(RDSEED_LONG "\n\t" ++ asm volatile(RDSEED_LONG + CC_SET(c) + : CC_OUT(c) (ok), "=a" (*v)); + return ok; +@@ -80,7 +80,7 @@ static inline bool rdseed_long(unsigned + static inline bool rdseed_int(unsigned int *v) + { + bool ok; +- asm volatile(RDSEED_INT "\n\t" ++ asm volatile(RDSEED_INT + CC_SET(c) + : CC_OUT(c) (ok), "=a" (*v)); + return ok; +--- a/arch/x86/include/asm/bitops.h ++++ b/arch/x86/include/asm/bitops.h +@@ -232,7 +232,7 @@ static __always_inline bool __test_and_s + { + bool oldbit; + +- asm("bts %2,%1\n\t" ++ asm("bts %2,%1" + CC_SET(c) + : CC_OUT(c) (oldbit), ADDR + : "Ir" (nr)); +@@ -272,7 +272,7 @@ static __always_inline bool __test_and_c + { + bool oldbit; + +- asm volatile("btr %2,%1\n\t" ++ asm volatile("btr %2,%1" + CC_SET(c) + : CC_OUT(c) (oldbit), ADDR + : "Ir" (nr)); +@@ -284,7 +284,7 @@ static __always_inline bool __test_and_c + { + bool oldbit; + +- asm volatile("btc %2,%1\n\t" ++ asm volatile("btc %2,%1" + CC_SET(c) + : CC_OUT(c) (oldbit), ADDR + : "Ir" (nr) : "memory"); +@@ -315,7 +315,7 @@ static __always_inline bool variable_tes + { + bool oldbit; + +- asm volatile("bt %2,%1\n\t" ++ asm volatile("bt %2,%1" + CC_SET(c) + : CC_OUT(c) (oldbit) + : "m" (*(unsigned long *)addr), "Ir" (nr)); +--- a/arch/x86/include/asm/percpu.h ++++ b/arch/x86/include/asm/percpu.h +@@ -536,7 +536,7 @@ static inline bool x86_this_cpu_variable + { + bool oldbit; + +- asm volatile("bt "__percpu_arg(2)",%1\n\t" ++ asm volatile("bt "__percpu_arg(2)",%1" + CC_SET(c) + : CC_OUT(c) (oldbit) + : "m" (*(unsigned long __percpu *)addr), "Ir" (nr));