From: Greg Kroah-Hartman Date: Mon, 15 Mar 2021 08:56:36 +0000 (+0100) Subject: 4.9-stable patches X-Git-Tag: v4.4.262~34 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=651430d792c3469c11bf773cc6b65dfe81263a55;p=thirdparty%2Fkernel%2Fstable-queue.git 4.9-stable patches added patches: alpha-add-src-rather-than-obj-to-make-source-file-path.patch alpha-make-short-build-log-available-for-division-routines.patch alpha-merge-build-rules-of-division-routines.patch alpha-package-string-routines-together.patch alpha-switch-__copy_user-and-__do_clean_user-to-normal-calling-conventions.patch binfmt_misc-fix-possible-deadlock-in-bm_register_write.patch powerpc-64s-fix-instruction-encoding-for-lis-in-ppc_function_entry.patch --- diff --git a/queue-4.9/alpha-add-src-rather-than-obj-to-make-source-file-path.patch b/queue-4.9/alpha-add-src-rather-than-obj-to-make-source-file-path.patch new file mode 100644 index 00000000000..943c6c7754c --- /dev/null +++ b/queue-4.9/alpha-add-src-rather-than-obj-to-make-source-file-path.patch @@ -0,0 +1,37 @@ +From 5ed78e5523fd9ba77b8444d380d54da1f88c53fc Mon Sep 17 00:00:00 2001 +From: Masahiro Yamada +Date: Sun, 11 Sep 2016 16:42:14 +0900 +Subject: alpha: add $(src)/ rather than $(obj)/ to make source file path + +From: Masahiro Yamada + +commit 5ed78e5523fd9ba77b8444d380d54da1f88c53fc upstream. + +$(ev6-y)divide.S is a source file, not a build-time generated file. +So, it should be prefixed with $(src)/ rather than $(obj)/. + +Signed-off-by: Masahiro Yamada +Cc: Guenter Roeck +Signed-off-by: Greg Kroah-Hartman +--- + arch/alpha/lib/Makefile | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/arch/alpha/lib/Makefile ++++ b/arch/alpha/lib/Makefile +@@ -46,11 +46,11 @@ AFLAGS___remqu.o = -DREM + AFLAGS___divlu.o = -DDIV -DINTSIZE + AFLAGS___remlu.o = -DREM -DINTSIZE + +-$(obj)/__divqu.o: $(obj)/$(ev6-y)divide.S ++$(obj)/__divqu.o: $(src)/$(ev6-y)divide.S + $(cmd_as_o_S) +-$(obj)/__remqu.o: $(obj)/$(ev6-y)divide.S ++$(obj)/__remqu.o: $(src)/$(ev6-y)divide.S + $(cmd_as_o_S) +-$(obj)/__divlu.o: $(obj)/$(ev6-y)divide.S ++$(obj)/__divlu.o: $(src)/$(ev6-y)divide.S + $(cmd_as_o_S) +-$(obj)/__remlu.o: $(obj)/$(ev6-y)divide.S ++$(obj)/__remlu.o: $(src)/$(ev6-y)divide.S + $(cmd_as_o_S) diff --git a/queue-4.9/alpha-make-short-build-log-available-for-division-routines.patch b/queue-4.9/alpha-make-short-build-log-available-for-division-routines.patch new file mode 100644 index 00000000000..06a75ab447f --- /dev/null +++ b/queue-4.9/alpha-make-short-build-log-available-for-division-routines.patch @@ -0,0 +1,33 @@ +From 3eec0291830e4c28d09f73bab247f3b59172022b Mon Sep 17 00:00:00 2001 +From: Masahiro Yamada +Date: Sun, 11 Sep 2016 16:42:16 +0900 +Subject: alpha: make short build log available for division routines + +From: Masahiro Yamada + +commit 3eec0291830e4c28d09f73bab247f3b59172022b upstream. + +This enables the Kbuild standard log style as follows: + + AS arch/alpha/lib/__divlu.o + AS arch/alpha/lib/__divqu.o + AS arch/alpha/lib/__remlu.o + AS arch/alpha/lib/__remqu.o + +Signed-off-by: Masahiro Yamada +Cc: Guenter Roeck +Signed-off-by: Greg Kroah-Hartman +--- + arch/alpha/lib/Makefile | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/arch/alpha/lib/Makefile ++++ b/arch/alpha/lib/Makefile +@@ -47,5 +47,5 @@ AFLAGS___divlu.o = -DDIV -DINTSIZE + AFLAGS___remlu.o = -DREM -DINTSIZE + + $(addprefix $(obj)/,__divqu.o __remqu.o __divlu.o __remlu.o): \ +- $(src)/$(ev6-y)divide.S +- $(cmd_as_o_S) ++ $(src)/$(ev6-y)divide.S FORCE ++ $(call if_changed_rule,as_o_S) diff --git a/queue-4.9/alpha-merge-build-rules-of-division-routines.patch b/queue-4.9/alpha-merge-build-rules-of-division-routines.patch new file mode 100644 index 00000000000..a4978237715 --- /dev/null +++ b/queue-4.9/alpha-merge-build-rules-of-division-routines.patch @@ -0,0 +1,35 @@ +From e19a4e3f1bffe45b8e2ea67fcfb0c9c88278c4cc Mon Sep 17 00:00:00 2001 +From: Masahiro Yamada +Date: Sun, 11 Sep 2016 16:42:15 +0900 +Subject: alpha: merge build rules of division routines + +From: Masahiro Yamada + +commit e19a4e3f1bffe45b8e2ea67fcfb0c9c88278c4cc upstream. + +These four objects are generated by the same build rule, with +different compile options. The build rules can be merged. + +Signed-off-by: Masahiro Yamada +Cc: Guenter Roeck +Signed-off-by: Greg Kroah-Hartman +--- + arch/alpha/lib/Makefile | 9 ++------- + 1 file changed, 2 insertions(+), 7 deletions(-) + +--- a/arch/alpha/lib/Makefile ++++ b/arch/alpha/lib/Makefile +@@ -46,11 +46,6 @@ AFLAGS___remqu.o = -DREM + AFLAGS___divlu.o = -DDIV -DINTSIZE + AFLAGS___remlu.o = -DREM -DINTSIZE + +-$(obj)/__divqu.o: $(src)/$(ev6-y)divide.S +- $(cmd_as_o_S) +-$(obj)/__remqu.o: $(src)/$(ev6-y)divide.S +- $(cmd_as_o_S) +-$(obj)/__divlu.o: $(src)/$(ev6-y)divide.S +- $(cmd_as_o_S) +-$(obj)/__remlu.o: $(src)/$(ev6-y)divide.S ++$(addprefix $(obj)/,__divqu.o __remqu.o __divlu.o __remlu.o): \ ++ $(src)/$(ev6-y)divide.S + $(cmd_as_o_S) diff --git a/queue-4.9/alpha-package-string-routines-together.patch b/queue-4.9/alpha-package-string-routines-together.patch new file mode 100644 index 00000000000..6b29e79f28a --- /dev/null +++ b/queue-4.9/alpha-package-string-routines-together.patch @@ -0,0 +1,55 @@ +From 4758ce82e66711b1a4557577e30a5f9b88d4a4b5 Mon Sep 17 00:00:00 2001 +From: Richard Henderson +Date: Fri, 23 Jun 2017 14:20:00 -0700 +Subject: alpha: Package string routines together + +From: Richard Henderson + +commit 4758ce82e66711b1a4557577e30a5f9b88d4a4b5 upstream. + +There are direct branches between {str*cpy,str*cat} and stx*cpy. +Ensure the branches are within range by merging these objects. + +Signed-off-by: Richard Henderson +Signed-off-by: Matt Turner +Cc: Guenter Roeck +Signed-off-by: Greg Kroah-Hartman +--- + arch/alpha/lib/Makefile | 22 ++++++++++++++++------ + 1 file changed, 16 insertions(+), 6 deletions(-) + +--- a/arch/alpha/lib/Makefile ++++ b/arch/alpha/lib/Makefile +@@ -20,12 +20,8 @@ lib-y = __divqu.o __remqu.o __divlu.o __ + checksum.o \ + csum_partial_copy.o \ + $(ev67-y)strlen.o \ +- $(ev67-y)strcat.o \ +- strcpy.o \ +- $(ev67-y)strncat.o \ +- strncpy.o \ +- $(ev6-y)stxcpy.o \ +- $(ev6-y)stxncpy.o \ ++ stycpy.o \ ++ styncpy.o \ + $(ev67-y)strchr.o \ + $(ev67-y)strrchr.o \ + $(ev6-y)memchr.o \ +@@ -49,3 +45,17 @@ AFLAGS___remlu.o = -DREM -DINTSIZE + $(addprefix $(obj)/,__divqu.o __remqu.o __divlu.o __remlu.o): \ + $(src)/$(ev6-y)divide.S FORCE + $(call if_changed_rule,as_o_S) ++ ++# There are direct branches between {str*cpy,str*cat} and stx*cpy. ++# Ensure the branches are within range by merging these objects. ++ ++LDFLAGS_stycpy.o := -r ++LDFLAGS_styncpy.o := -r ++ ++$(obj)/stycpy.o: $(obj)/strcpy.o $(obj)/$(ev67-y)strcat.o \ ++ $(obj)/$(ev6-y)stxcpy.o FORCE ++ $(call if_changed,ld) ++ ++$(obj)/styncpy.o: $(obj)/strncpy.o $(obj)/$(ev67-y)strncat.o \ ++ $(obj)/$(ev6-y)stxncpy.o FORCE ++ $(call if_changed,ld) diff --git a/queue-4.9/alpha-switch-__copy_user-and-__do_clean_user-to-normal-calling-conventions.patch b/queue-4.9/alpha-switch-__copy_user-and-__do_clean_user-to-normal-calling-conventions.patch new file mode 100644 index 00000000000..04690d9a108 --- /dev/null +++ b/queue-4.9/alpha-switch-__copy_user-and-__do_clean_user-to-normal-calling-conventions.patch @@ -0,0 +1,814 @@ +From 8525023121de4848b5f0a7d867ffeadbc477774d Mon Sep 17 00:00:00 2001 +From: Al Viro +Date: Sat, 24 Dec 2016 20:26:18 -0500 +Subject: alpha: switch __copy_user() and __do_clean_user() to normal calling conventions + +From: Al Viro + +commit 8525023121de4848b5f0a7d867ffeadbc477774d upstream. + +They used to need odd calling conventions due to old exception handling +mechanism, the last remnants of which had disappeared back in 2002. + +Signed-off-by: Al Viro +Cc: Guenter Roeck +Signed-off-by: Greg Kroah-Hartman +--- + arch/alpha/include/asm/uaccess.h | 67 ++++--------------------- + arch/alpha/lib/clear_user.S | 66 +++++++++--------------- + arch/alpha/lib/copy_user.S | 82 ++++++++++++------------------ + arch/alpha/lib/ev6-clear_user.S | 84 +++++++++++++------------------ + arch/alpha/lib/ev6-copy_user.S | 104 ++++++++++++++++----------------------- + 5 files changed, 151 insertions(+), 252 deletions(-) + +--- a/arch/alpha/include/asm/uaccess.h ++++ b/arch/alpha/include/asm/uaccess.h +@@ -341,45 +341,17 @@ __asm__ __volatile__("1: stb %r2,%1\n" + * Complex access routines + */ + +-/* This little bit of silliness is to get the GP loaded for a function +- that ordinarily wouldn't. Otherwise we could have it done by the macro +- directly, which can be optimized the linker. */ +-#ifdef MODULE +-#define __module_address(sym) "r"(sym), +-#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym +-#else +-#define __module_address(sym) +-#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp" +-#endif ++extern long __copy_user(void *to, const void *from, long len); + +-extern void __copy_user(void); +- +-extern inline long +-__copy_tofrom_user_nocheck(void *to, const void *from, long len) +-{ +- register void * __cu_to __asm__("$6") = to; +- register const void * __cu_from __asm__("$7") = from; +- register long __cu_len __asm__("$0") = len; +- +- __asm__ __volatile__( +- __module_call(28, 3, __copy_user) +- : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) +- : __module_address(__copy_user) +- "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) +- : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); +- +- return __cu_len; +-} +- +-#define __copy_to_user(to, from, n) \ +-({ \ +- __chk_user_ptr(to); \ +- __copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \ ++#define __copy_to_user(to, from, n) \ ++({ \ ++ __chk_user_ptr(to); \ ++ __copy_user((__force void *)(to), (from), (n)); \ + }) +-#define __copy_from_user(to, from, n) \ +-({ \ +- __chk_user_ptr(from); \ +- __copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \ ++#define __copy_from_user(to, from, n) \ ++({ \ ++ __chk_user_ptr(from); \ ++ __copy_user((to), (__force void *)(from), (n)); \ + }) + + #define __copy_to_user_inatomic __copy_to_user +@@ -389,7 +361,7 @@ extern inline long + copy_to_user(void __user *to, const void *from, long n) + { + if (likely(__access_ok((unsigned long)to, n, get_fs()))) +- n = __copy_tofrom_user_nocheck((__force void *)to, from, n); ++ n = __copy_user((__force void *)to, from, n); + return n; + } + +@@ -404,21 +376,7 @@ copy_from_user(void *to, const void __us + return res; + } + +-extern void __do_clear_user(void); +- +-extern inline long +-__clear_user(void __user *to, long len) +-{ +- register void __user * __cl_to __asm__("$6") = to; +- register long __cl_len __asm__("$0") = len; +- __asm__ __volatile__( +- __module_call(28, 2, __do_clear_user) +- : "=r"(__cl_len), "=r"(__cl_to) +- : __module_address(__do_clear_user) +- "0"(__cl_len), "1"(__cl_to) +- : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); +- return __cl_len; +-} ++extern long __clear_user(void __user *to, long len); + + extern inline long + clear_user(void __user *to, long len) +@@ -428,9 +386,6 @@ clear_user(void __user *to, long len) + return len; + } + +-#undef __module_address +-#undef __module_call +- + #define user_addr_max() \ + (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) + +--- a/arch/alpha/lib/clear_user.S ++++ b/arch/alpha/lib/clear_user.S +@@ -8,21 +8,6 @@ + * right "bytes left to zero" value (and that it is updated only _after_ + * a successful copy). There is also some rather minor exception setup + * stuff. +- * +- * NOTE! This is not directly C-callable, because the calling semantics +- * are different: +- * +- * Inputs: +- * length in $0 +- * destination address in $6 +- * exception pointer in $7 +- * return address in $28 (exceptions expect it there) +- * +- * Outputs: +- * bytes left to copy in $0 +- * +- * Clobbers: +- * $1,$2,$3,$4,$5,$6 + */ + #include + +@@ -38,62 +23,63 @@ + .set noreorder + .align 4 + +- .globl __do_clear_user +- .ent __do_clear_user +- .frame $30, 0, $28 ++ .globl __clear_user ++ .ent __clear_user ++ .frame $30, 0, $26 + .prologue 0 + + $loop: + and $1, 3, $4 # e0 : + beq $4, 1f # .. e1 : + +-0: EX( stq_u $31, 0($6) ) # e0 : zero one word ++0: EX( stq_u $31, 0($16) ) # e0 : zero one word + subq $0, 8, $0 # .. e1 : + subq $4, 1, $4 # e0 : +- addq $6, 8, $6 # .. e1 : ++ addq $16, 8, $16 # .. e1 : + bne $4, 0b # e1 : + unop # : + + 1: bic $1, 3, $1 # e0 : + beq $1, $tail # .. e1 : + +-2: EX( stq_u $31, 0($6) ) # e0 : zero four words ++2: EX( stq_u $31, 0($16) ) # e0 : zero four words + subq $0, 8, $0 # .. e1 : +- EX( stq_u $31, 8($6) ) # e0 : ++ EX( stq_u $31, 8($16) ) # e0 : + subq $0, 8, $0 # .. e1 : +- EX( stq_u $31, 16($6) ) # e0 : ++ EX( stq_u $31, 16($16) ) # e0 : + subq $0, 8, $0 # .. e1 : +- EX( stq_u $31, 24($6) ) # e0 : ++ EX( stq_u $31, 24($16) ) # e0 : + subq $0, 8, $0 # .. e1 : + subq $1, 4, $1 # e0 : +- addq $6, 32, $6 # .. e1 : ++ addq $16, 32, $16 # .. e1 : + bne $1, 2b # e1 : + + $tail: + bne $2, 1f # e1 : is there a tail to do? +- ret $31, ($28), 1 # .. e1 : ++ ret $31, ($26), 1 # .. e1 : + +-1: EX( ldq_u $5, 0($6) ) # e0 : ++1: EX( ldq_u $5, 0($16) ) # e0 : + clr $0 # .. e1 : + nop # e1 : + mskqh $5, $0, $5 # e0 : +- EX( stq_u $5, 0($6) ) # e0 : +- ret $31, ($28), 1 # .. e1 : ++ EX( stq_u $5, 0($16) ) # e0 : ++ ret $31, ($26), 1 # .. e1 : + +-__do_clear_user: +- and $6, 7, $4 # e0 : find dest misalignment ++__clear_user: ++ and $17, $17, $0 ++ and $16, 7, $4 # e0 : find dest misalignment + beq $0, $zerolength # .. e1 : + addq $0, $4, $1 # e0 : bias counter + and $1, 7, $2 # e1 : number of bytes in tail + srl $1, 3, $1 # e0 : + beq $4, $loop # .. e1 : + +- EX( ldq_u $5, 0($6) ) # e0 : load dst word to mask back in ++ EX( ldq_u $5, 0($16) ) # e0 : load dst word to mask back in + beq $1, $oneword # .. e1 : sub-word store? + +- mskql $5, $6, $5 # e0 : take care of misaligned head +- addq $6, 8, $6 # .. e1 : +- EX( stq_u $5, -8($6) ) # e0 : ++ mskql $5, $16, $5 # e0 : take care of misaligned head ++ addq $16, 8, $16 # .. e1 : ++ EX( stq_u $5, -8($16) ) # e0 : + addq $0, $4, $0 # .. e1 : bytes left -= 8 - misalignment + subq $1, 1, $1 # e0 : + subq $0, 8, $0 # .. e1 : +@@ -101,15 +87,15 @@ __do_clear_user: + unop # : + + $oneword: +- mskql $5, $6, $4 # e0 : ++ mskql $5, $16, $4 # e0 : + mskqh $5, $2, $5 # e0 : + or $5, $4, $5 # e1 : +- EX( stq_u $5, 0($6) ) # e0 : ++ EX( stq_u $5, 0($16) ) # e0 : + clr $0 # .. e1 : + + $zerolength: + $exception: +- ret $31, ($28), 1 # .. e1 : ++ ret $31, ($26), 1 # .. e1 : + +- .end __do_clear_user +- EXPORT_SYMBOL(__do_clear_user) ++ .end __clear_user ++ EXPORT_SYMBOL(__clear_user) +--- a/arch/alpha/lib/copy_user.S ++++ b/arch/alpha/lib/copy_user.S +@@ -9,21 +9,6 @@ + * contains the right "bytes left to copy" value (and that it is updated + * only _after_ a successful copy). There is also some rather minor + * exception setup stuff.. +- * +- * NOTE! This is not directly C-callable, because the calling semantics are +- * different: +- * +- * Inputs: +- * length in $0 +- * destination address in $6 +- * source address in $7 +- * return address in $28 +- * +- * Outputs: +- * bytes left to copy in $0 +- * +- * Clobbers: +- * $1,$2,$3,$4,$5,$6,$7 + */ + + #include +@@ -49,58 +34,59 @@ + .ent __copy_user + __copy_user: + .prologue 0 +- and $6,7,$3 ++ and $18,$18,$0 ++ and $16,7,$3 + beq $0,$35 + beq $3,$36 + subq $3,8,$3 + .align 4 + $37: +- EXI( ldq_u $1,0($7) ) +- EXO( ldq_u $2,0($6) ) +- extbl $1,$7,$1 +- mskbl $2,$6,$2 +- insbl $1,$6,$1 ++ EXI( ldq_u $1,0($17) ) ++ EXO( ldq_u $2,0($16) ) ++ extbl $1,$17,$1 ++ mskbl $2,$16,$2 ++ insbl $1,$16,$1 + addq $3,1,$3 + bis $1,$2,$1 +- EXO( stq_u $1,0($6) ) ++ EXO( stq_u $1,0($16) ) + subq $0,1,$0 +- addq $6,1,$6 +- addq $7,1,$7 ++ addq $16,1,$16 ++ addq $17,1,$17 + beq $0,$41 + bne $3,$37 + $36: +- and $7,7,$1 ++ and $17,7,$1 + bic $0,7,$4 + beq $1,$43 + beq $4,$48 +- EXI( ldq_u $3,0($7) ) ++ EXI( ldq_u $3,0($17) ) + .align 4 + $50: +- EXI( ldq_u $2,8($7) ) ++ EXI( ldq_u $2,8($17) ) + subq $4,8,$4 +- extql $3,$7,$3 +- extqh $2,$7,$1 ++ extql $3,$17,$3 ++ extqh $2,$17,$1 + bis $3,$1,$1 +- EXO( stq $1,0($6) ) +- addq $7,8,$7 ++ EXO( stq $1,0($16) ) ++ addq $17,8,$17 + subq $0,8,$0 +- addq $6,8,$6 ++ addq $16,8,$16 + bis $2,$2,$3 + bne $4,$50 + $48: + beq $0,$41 + .align 4 + $57: +- EXI( ldq_u $1,0($7) ) +- EXO( ldq_u $2,0($6) ) +- extbl $1,$7,$1 +- mskbl $2,$6,$2 +- insbl $1,$6,$1 ++ EXI( ldq_u $1,0($17) ) ++ EXO( ldq_u $2,0($16) ) ++ extbl $1,$17,$1 ++ mskbl $2,$16,$2 ++ insbl $1,$16,$1 + bis $1,$2,$1 +- EXO( stq_u $1,0($6) ) ++ EXO( stq_u $1,0($16) ) + subq $0,1,$0 +- addq $6,1,$6 +- addq $7,1,$7 ++ addq $16,1,$16 ++ addq $17,1,$17 + bne $0,$57 + br $31,$41 + .align 4 +@@ -108,27 +94,27 @@ $43: + beq $4,$65 + .align 4 + $66: +- EXI( ldq $1,0($7) ) ++ EXI( ldq $1,0($17) ) + subq $4,8,$4 +- EXO( stq $1,0($6) ) +- addq $7,8,$7 ++ EXO( stq $1,0($16) ) ++ addq $17,8,$17 + subq $0,8,$0 +- addq $6,8,$6 ++ addq $16,8,$16 + bne $4,$66 + $65: + beq $0,$41 +- EXI( ldq $2,0($7) ) +- EXO( ldq $1,0($6) ) ++ EXI( ldq $2,0($17) ) ++ EXO( ldq $1,0($16) ) + mskql $2,$0,$2 + mskqh $1,$0,$1 + bis $2,$1,$2 +- EXO( stq $2,0($6) ) ++ EXO( stq $2,0($16) ) + bis $31,$31,$0 + $41: + $35: + $exitin: + $exitout: +- ret $31,($28),1 ++ ret $31,($26),1 + + .end __copy_user + EXPORT_SYMBOL(__copy_user) +--- a/arch/alpha/lib/ev6-clear_user.S ++++ b/arch/alpha/lib/ev6-clear_user.S +@@ -9,21 +9,6 @@ + * a successful copy). There is also some rather minor exception setup + * stuff. + * +- * NOTE! This is not directly C-callable, because the calling semantics +- * are different: +- * +- * Inputs: +- * length in $0 +- * destination address in $6 +- * exception pointer in $7 +- * return address in $28 (exceptions expect it there) +- * +- * Outputs: +- * bytes left to copy in $0 +- * +- * Clobbers: +- * $1,$2,$3,$4,$5,$6 +- * + * Much of the information about 21264 scheduling/coding comes from: + * Compiler Writer's Guide for the Alpha 21264 + * abbreviated as 'CWG' in other comments here +@@ -56,14 +41,15 @@ + .set noreorder + .align 4 + +- .globl __do_clear_user +- .ent __do_clear_user +- .frame $30, 0, $28 ++ .globl __clear_user ++ .ent __clear_user ++ .frame $30, 0, $26 + .prologue 0 + + # Pipeline info : Slotting & Comments +-__do_clear_user: +- and $6, 7, $4 # .. E .. .. : find dest head misalignment ++__clear_user: ++ and $17, $17, $0 ++ and $16, 7, $4 # .. E .. .. : find dest head misalignment + beq $0, $zerolength # U .. .. .. : U L U L + + addq $0, $4, $1 # .. .. .. E : bias counter +@@ -75,14 +61,14 @@ __do_clear_user: + + /* + * Head is not aligned. Write (8 - $4) bytes to head of destination +- * This means $6 is known to be misaligned ++ * This means $16 is known to be misaligned + */ +- EX( ldq_u $5, 0($6) ) # .. .. .. L : load dst word to mask back in ++ EX( ldq_u $5, 0($16) ) # .. .. .. L : load dst word to mask back in + beq $1, $onebyte # .. .. U .. : sub-word store? +- mskql $5, $6, $5 # .. U .. .. : take care of misaligned head +- addq $6, 8, $6 # E .. .. .. : L U U L ++ mskql $5, $16, $5 # .. U .. .. : take care of misaligned head ++ addq $16, 8, $16 # E .. .. .. : L U U L + +- EX( stq_u $5, -8($6) ) # .. .. .. L : ++ EX( stq_u $5, -8($16) ) # .. .. .. L : + subq $1, 1, $1 # .. .. E .. : + addq $0, $4, $0 # .. E .. .. : bytes left -= 8 - misalignment + subq $0, 8, $0 # E .. .. .. : U L U L +@@ -93,11 +79,11 @@ __do_clear_user: + * values upon initial entry to the loop + * $1 is number of quadwords to clear (zero is a valid value) + * $2 is number of trailing bytes (0..7) ($2 never used...) +- * $6 is known to be aligned 0mod8 ++ * $16 is known to be aligned 0mod8 + */ + $headalign: + subq $1, 16, $4 # .. .. .. E : If < 16, we can not use the huge loop +- and $6, 0x3f, $2 # .. .. E .. : Forward work for huge loop ++ and $16, 0x3f, $2 # .. .. E .. : Forward work for huge loop + subq $2, 0x40, $3 # .. E .. .. : bias counter (huge loop) + blt $4, $trailquad # U .. .. .. : U L U L + +@@ -114,21 +100,21 @@ $headalign: + beq $3, $bigalign # U .. .. .. : U L U L : Aligned 0mod64 + + $alignmod64: +- EX( stq_u $31, 0($6) ) # .. .. .. L ++ EX( stq_u $31, 0($16) ) # .. .. .. L + addq $3, 8, $3 # .. .. E .. + subq $0, 8, $0 # .. E .. .. + nop # E .. .. .. : U L U L + + nop # .. .. .. E + subq $1, 1, $1 # .. .. E .. +- addq $6, 8, $6 # .. E .. .. ++ addq $16, 8, $16 # .. E .. .. + blt $3, $alignmod64 # U .. .. .. : U L U L + + $bigalign: + /* + * $0 is the number of bytes left + * $1 is the number of quads left +- * $6 is aligned 0mod64 ++ * $16 is aligned 0mod64 + * we know that we'll be taking a minimum of one trip through + * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle + * We are _not_ going to update $0 after every single store. That +@@ -145,39 +131,39 @@ $bigalign: + nop # E : + nop # E : + nop # E : +- bis $6,$6,$3 # E : U L U L : Initial wh64 address is dest ++ bis $16,$16,$3 # E : U L U L : Initial wh64 address is dest + /* This might actually help for the current trip... */ + + $do_wh64: + wh64 ($3) # .. .. .. L1 : memory subsystem hint + subq $1, 16, $4 # .. .. E .. : Forward calculation - repeat the loop? +- EX( stq_u $31, 0($6) ) # .. L .. .. ++ EX( stq_u $31, 0($16) ) # .. L .. .. + subq $0, 8, $0 # E .. .. .. : U L U L + +- addq $6, 128, $3 # E : Target address of wh64 +- EX( stq_u $31, 8($6) ) # L : +- EX( stq_u $31, 16($6) ) # L : ++ addq $16, 128, $3 # E : Target address of wh64 ++ EX( stq_u $31, 8($16) ) # L : ++ EX( stq_u $31, 16($16) ) # L : + subq $0, 16, $0 # E : U L L U + + nop # E : +- EX( stq_u $31, 24($6) ) # L : +- EX( stq_u $31, 32($6) ) # L : ++ EX( stq_u $31, 24($16) ) # L : ++ EX( stq_u $31, 32($16) ) # L : + subq $0, 168, $5 # E : U L L U : two trips through the loop left? + /* 168 = 192 - 24, since we've already completed some stores */ + + subq $0, 16, $0 # E : +- EX( stq_u $31, 40($6) ) # L : +- EX( stq_u $31, 48($6) ) # L : +- cmovlt $5, $6, $3 # E : U L L U : Latency 2, extra mapping cycle ++ EX( stq_u $31, 40($16) ) # L : ++ EX( stq_u $31, 48($16) ) # L : ++ cmovlt $5, $16, $3 # E : U L L U : Latency 2, extra mapping cycle + + subq $1, 8, $1 # E : + subq $0, 16, $0 # E : +- EX( stq_u $31, 56($6) ) # L : ++ EX( stq_u $31, 56($16) ) # L : + nop # E : U L U L + + nop # E : + subq $0, 8, $0 # E : +- addq $6, 64, $6 # E : ++ addq $16, 64, $16 # E : + bge $4, $do_wh64 # U : U L U L + + $trailquad: +@@ -190,14 +176,14 @@ $trailquad: + beq $1, $trailbytes # U .. .. .. : U L U L : Only 0..7 bytes to go + + $onequad: +- EX( stq_u $31, 0($6) ) # .. .. .. L ++ EX( stq_u $31, 0($16) ) # .. .. .. L + subq $1, 1, $1 # .. .. E .. + subq $0, 8, $0 # .. E .. .. + nop # E .. .. .. : U L U L + + nop # .. .. .. E + nop # .. .. E .. +- addq $6, 8, $6 # .. E .. .. ++ addq $16, 8, $16 # .. E .. .. + bgt $1, $onequad # U .. .. .. : U L U L + + # We have an unknown number of bytes left to go. +@@ -211,9 +197,9 @@ $trailbytes: + # so we will use $0 as the loop counter + # We know for a fact that $0 > 0 zero due to previous context + $onebyte: +- EX( stb $31, 0($6) ) # .. .. .. L ++ EX( stb $31, 0($16) ) # .. .. .. L + subq $0, 1, $0 # .. .. E .. : +- addq $6, 1, $6 # .. E .. .. : ++ addq $16, 1, $16 # .. E .. .. : + bgt $0, $onebyte # U .. .. .. : U L U L + + $zerolength: +@@ -221,6 +207,6 @@ $exception: # Destination for exceptio + nop # .. .. .. E : + nop # .. .. E .. : + nop # .. E .. .. : +- ret $31, ($28), 1 # L0 .. .. .. : L U L U +- .end __do_clear_user +- EXPORT_SYMBOL(__do_clear_user) ++ ret $31, ($26), 1 # L0 .. .. .. : L U L U ++ .end __clear_user ++ EXPORT_SYMBOL(__clear_user) +--- a/arch/alpha/lib/ev6-copy_user.S ++++ b/arch/alpha/lib/ev6-copy_user.S +@@ -12,21 +12,6 @@ + * only _after_ a successful copy). There is also some rather minor + * exception setup stuff.. + * +- * NOTE! This is not directly C-callable, because the calling semantics are +- * different: +- * +- * Inputs: +- * length in $0 +- * destination address in $6 +- * source address in $7 +- * return address in $28 +- * +- * Outputs: +- * bytes left to copy in $0 +- * +- * Clobbers: +- * $1,$2,$3,$4,$5,$6,$7 +- * + * Much of the information about 21264 scheduling/coding comes from: + * Compiler Writer's Guide for the Alpha 21264 + * abbreviated as 'CWG' in other comments here +@@ -60,10 +45,11 @@ + # Pipeline info: Slotting & Comments + __copy_user: + .prologue 0 +- subq $0, 32, $1 # .. E .. .. : Is this going to be a small copy? ++ andq $18, $18, $0 ++ subq $18, 32, $1 # .. E .. .. : Is this going to be a small copy? + beq $0, $zerolength # U .. .. .. : U L U L + +- and $6,7,$3 # .. .. .. E : is leading dest misalignment ++ and $16,7,$3 # .. .. .. E : is leading dest misalignment + ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data + beq $3, $destaligned # .. U .. .. : 2nd (one cycle fetcher stall) + subq $3, 8, $3 # E .. .. .. : L U U L : trip counter +@@ -73,17 +59,17 @@ __copy_user: + * We know we have at least one trip through this loop + */ + $aligndest: +- EXI( ldbu $1,0($7) ) # .. .. .. L : Keep loads separate from stores +- addq $6,1,$6 # .. .. E .. : Section 3.8 in the CWG ++ EXI( ldbu $1,0($17) ) # .. .. .. L : Keep loads separate from stores ++ addq $16,1,$16 # .. .. E .. : Section 3.8 in the CWG + addq $3,1,$3 # .. E .. .. : + nop # E .. .. .. : U L U L + + /* +- * the -1 is to compensate for the inc($6) done in a previous quadpack ++ * the -1 is to compensate for the inc($16) done in a previous quadpack + * which allows us zero dependencies within either quadpack in the loop + */ +- EXO( stb $1,-1($6) ) # .. .. .. L : +- addq $7,1,$7 # .. .. E .. : Section 3.8 in the CWG ++ EXO( stb $1,-1($16) ) # .. .. .. L : ++ addq $17,1,$17 # .. .. E .. : Section 3.8 in the CWG + subq $0,1,$0 # .. E .. .. : + bne $3, $aligndest # U .. .. .. : U L U L + +@@ -92,29 +78,29 @@ $aligndest: + * If we arrived via branch, we have a minimum of 32 bytes + */ + $destaligned: +- and $7,7,$1 # .. .. .. E : Check _current_ source alignment ++ and $17,7,$1 # .. .. .. E : Check _current_ source alignment + bic $0,7,$4 # .. .. E .. : number bytes as a quadword loop +- EXI( ldq_u $3,0($7) ) # .. L .. .. : Forward fetch for fallthrough code ++ EXI( ldq_u $3,0($17) ) # .. L .. .. : Forward fetch for fallthrough code + beq $1,$quadaligned # U .. .. .. : U L U L + + /* +- * In the worst case, we've just executed an ldq_u here from 0($7) ++ * In the worst case, we've just executed an ldq_u here from 0($17) + * and we'll repeat it once if we take the branch + */ + + /* Misaligned quadword loop - not unrolled. Leave it that way. */ + $misquad: +- EXI( ldq_u $2,8($7) ) # .. .. .. L : ++ EXI( ldq_u $2,8($17) ) # .. .. .. L : + subq $4,8,$4 # .. .. E .. : +- extql $3,$7,$3 # .. U .. .. : +- extqh $2,$7,$1 # U .. .. .. : U U L L ++ extql $3,$17,$3 # .. U .. .. : ++ extqh $2,$17,$1 # U .. .. .. : U U L L + + bis $3,$1,$1 # .. .. .. E : +- EXO( stq $1,0($6) ) # .. .. L .. : +- addq $7,8,$7 # .. E .. .. : ++ EXO( stq $1,0($16) ) # .. .. L .. : ++ addq $17,8,$17 # .. E .. .. : + subq $0,8,$0 # E .. .. .. : U L L U + +- addq $6,8,$6 # .. .. .. E : ++ addq $16,8,$16 # .. .. .. E : + bis $2,$2,$3 # .. .. E .. : + nop # .. E .. .. : + bne $4,$misquad # U .. .. .. : U L U L +@@ -125,8 +111,8 @@ $misquad: + beq $0,$zerolength # U .. .. .. : U L U L + + /* We know we have at least one trip through the byte loop */ +- EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad +- addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG) ++ EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad ++ addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG) + nop # .. E .. .. : + br $31, $dirtyentry # L0 .. .. .. : L U U L + /* Do the trailing byte loop load, then hop into the store part of the loop */ +@@ -136,8 +122,8 @@ $misquad: + * Based upon the usage context, it's worth the effort to unroll this loop + * $0 - number of bytes to be moved + * $4 - number of bytes to move as quadwords +- * $6 is current destination address +- * $7 is current source address ++ * $16 is current destination address ++ * $17 is current source address + */ + $quadaligned: + subq $4, 32, $2 # .. .. .. E : do not unroll for small stuff +@@ -155,29 +141,29 @@ $quadaligned: + * instruction memory hint instruction). + */ + $unroll4: +- EXI( ldq $1,0($7) ) # .. .. .. L +- EXI( ldq $2,8($7) ) # .. .. L .. ++ EXI( ldq $1,0($17) ) # .. .. .. L ++ EXI( ldq $2,8($17) ) # .. .. L .. + subq $4,32,$4 # .. E .. .. + nop # E .. .. .. : U U L L + +- addq $7,16,$7 # .. .. .. E +- EXO( stq $1,0($6) ) # .. .. L .. +- EXO( stq $2,8($6) ) # .. L .. .. ++ addq $17,16,$17 # .. .. .. E ++ EXO( stq $1,0($16) ) # .. .. L .. ++ EXO( stq $2,8($16) ) # .. L .. .. + subq $0,16,$0 # E .. .. .. : U L L U + +- addq $6,16,$6 # .. .. .. E +- EXI( ldq $1,0($7) ) # .. .. L .. +- EXI( ldq $2,8($7) ) # .. L .. .. ++ addq $16,16,$16 # .. .. .. E ++ EXI( ldq $1,0($17) ) # .. .. L .. ++ EXI( ldq $2,8($17) ) # .. L .. .. + subq $4, 32, $3 # E .. .. .. : U U L L : is there enough for another trip? + +- EXO( stq $1,0($6) ) # .. .. .. L +- EXO( stq $2,8($6) ) # .. .. L .. ++ EXO( stq $1,0($16) ) # .. .. .. L ++ EXO( stq $2,8($16) ) # .. .. L .. + subq $0,16,$0 # .. E .. .. +- addq $7,16,$7 # E .. .. .. : U L L U ++ addq $17,16,$17 # E .. .. .. : U L L U + + nop # .. .. .. E + nop # .. .. E .. +- addq $6,16,$6 # .. E .. .. ++ addq $16,16,$16 # .. E .. .. + bgt $3,$unroll4 # U .. .. .. : U L U L + + nop +@@ -186,14 +172,14 @@ $unroll4: + beq $4, $noquads + + $onequad: +- EXI( ldq $1,0($7) ) ++ EXI( ldq $1,0($17) ) + subq $4,8,$4 +- addq $7,8,$7 ++ addq $17,8,$17 + nop + +- EXO( stq $1,0($6) ) ++ EXO( stq $1,0($16) ) + subq $0,8,$0 +- addq $6,8,$6 ++ addq $16,8,$16 + bne $4,$onequad + + $noquads: +@@ -207,23 +193,23 @@ $noquads: + * There's no point in doing a lot of complex alignment calculations to try to + * to quadword stuff for a small amount of data. + * $0 - remaining number of bytes left to copy +- * $6 - current dest addr +- * $7 - current source addr ++ * $16 - current dest addr ++ * $17 - current source addr + */ + + $onebyteloop: +- EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad +- addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG) ++ EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad ++ addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG) + nop # .. E .. .. : + nop # E .. .. .. : U L U L + + $dirtyentry: + /* +- * the -1 is to compensate for the inc($6) done in a previous quadpack ++ * the -1 is to compensate for the inc($16) done in a previous quadpack + * which allows us zero dependencies within either quadpack in the loop + */ +- EXO ( stb $2,-1($6) ) # .. .. .. L : +- addq $7,1,$7 # .. .. E .. : quadpack as the load ++ EXO ( stb $2,-1($16) ) # .. .. .. L : ++ addq $17,1,$17 # .. .. E .. : quadpack as the load + subq $0,1,$0 # .. E .. .. : change count _after_ copy + bgt $0,$onebyteloop # U .. .. .. : U L U L + +@@ -233,7 +219,7 @@ $exitout: # Destination for exception + nop # .. .. .. E + nop # .. .. E .. + nop # .. E .. .. +- ret $31,($28),1 # L0 .. .. .. : L U L U ++ ret $31,($26),1 # L0 .. .. .. : L U L U + + .end __copy_user + EXPORT_SYMBOL(__copy_user) diff --git a/queue-4.9/binfmt_misc-fix-possible-deadlock-in-bm_register_write.patch b/queue-4.9/binfmt_misc-fix-possible-deadlock-in-bm_register_write.patch new file mode 100644 index 00000000000..0ca60ea499d --- /dev/null +++ b/queue-4.9/binfmt_misc-fix-possible-deadlock-in-bm_register_write.patch @@ -0,0 +1,118 @@ +From e7850f4d844e0acfac7e570af611d89deade3146 Mon Sep 17 00:00:00 2001 +From: Lior Ribak +Date: Fri, 12 Mar 2021 21:07:41 -0800 +Subject: binfmt_misc: fix possible deadlock in bm_register_write + +From: Lior Ribak + +commit e7850f4d844e0acfac7e570af611d89deade3146 upstream. + +There is a deadlock in bm_register_write: + +First, in the begining of the function, a lock is taken on the binfmt_misc +root inode with inode_lock(d_inode(root)). + +Then, if the user used the MISC_FMT_OPEN_FILE flag, the function will call +open_exec on the user-provided interpreter. + +open_exec will call a path lookup, and if the path lookup process includes +the root of binfmt_misc, it will try to take a shared lock on its inode +again, but it is already locked, and the code will get stuck in a deadlock + +To reproduce the bug: +$ echo ":iiiii:E::ii::/proc/sys/fs/binfmt_misc/bla:F" > /proc/sys/fs/binfmt_misc/register + +backtrace of where the lock occurs (#5): +0 schedule () at ./arch/x86/include/asm/current.h:15 +1 0xffffffff81b51237 in rwsem_down_read_slowpath (sem=0xffff888003b202e0, count=, state=state@entry=2) at kernel/locking/rwsem.c:992 +2 0xffffffff81b5150a in __down_read_common (state=2, sem=) at kernel/locking/rwsem.c:1213 +3 __down_read (sem=) at kernel/locking/rwsem.c:1222 +4 down_read (sem=) at kernel/locking/rwsem.c:1355 +5 0xffffffff811ee22a in inode_lock_shared (inode=) at ./include/linux/fs.h:783 +6 open_last_lookups (op=0xffffc9000022fe34, file=0xffff888004098600, nd=0xffffc9000022fd10) at fs/namei.c:3177 +7 path_openat (nd=nd@entry=0xffffc9000022fd10, op=op@entry=0xffffc9000022fe34, flags=flags@entry=65) at fs/namei.c:3366 +8 0xffffffff811efe1c in do_filp_open (dfd=, pathname=pathname@entry=0xffff8880031b9000, op=op@entry=0xffffc9000022fe34) at fs/namei.c:3396 +9 0xffffffff811e493f in do_open_execat (fd=fd@entry=-100, name=name@entry=0xffff8880031b9000, flags=, flags@entry=0) at fs/exec.c:913 +10 0xffffffff811e4a92 in open_exec (name=) at fs/exec.c:948 +11 0xffffffff8124aa84 in bm_register_write (file=, buffer=, count=19, ppos=) at fs/binfmt_misc.c:682 +12 0xffffffff811decd2 in vfs_write (file=file@entry=0xffff888004098500, buf=buf@entry=0xa758d0 ":iiiii:E::ii::i:CF +", count=count@entry=19, pos=pos@entry=0xffffc9000022ff10) at fs/read_write.c:603 +13 0xffffffff811defda in ksys_write (fd=, buf=0xa758d0 ":iiiii:E::ii::i:CF +", count=19) at fs/read_write.c:658 +14 0xffffffff81b49813 in do_syscall_64 (nr=, regs=0xffffc9000022ff58) at arch/x86/entry/common.c:46 +15 0xffffffff81c0007c in entry_SYSCALL_64 () at arch/x86/entry/entry_64.S:120 + +To solve the issue, the open_exec call is moved to before the write +lock is taken by bm_register_write + +Link: https://lkml.kernel.org/r/20210228224414.95962-1-liorribak@gmail.com +Fixes: 948b701a607f1 ("binfmt_misc: add persistent opened binary handler for containers") +Signed-off-by: Lior Ribak +Acked-by: Helge Deller +Cc: Al Viro +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +--- + fs/binfmt_misc.c | 29 ++++++++++++++--------------- + 1 file changed, 14 insertions(+), 15 deletions(-) + +--- a/fs/binfmt_misc.c ++++ b/fs/binfmt_misc.c +@@ -695,12 +695,24 @@ static ssize_t bm_register_write(struct + struct super_block *sb = file_inode(file)->i_sb; + struct dentry *root = sb->s_root, *dentry; + int err = 0; ++ struct file *f = NULL; + + e = create_entry(buffer, count); + + if (IS_ERR(e)) + return PTR_ERR(e); + ++ if (e->flags & MISC_FMT_OPEN_FILE) { ++ f = open_exec(e->interpreter); ++ if (IS_ERR(f)) { ++ pr_notice("register: failed to install interpreter file %s\n", ++ e->interpreter); ++ kfree(e); ++ return PTR_ERR(f); ++ } ++ e->interp_file = f; ++ } ++ + inode_lock(d_inode(root)); + dentry = lookup_one_len(e->name, root, strlen(e->name)); + err = PTR_ERR(dentry); +@@ -724,21 +736,6 @@ static ssize_t bm_register_write(struct + goto out2; + } + +- if (e->flags & MISC_FMT_OPEN_FILE) { +- struct file *f; +- +- f = open_exec(e->interpreter); +- if (IS_ERR(f)) { +- err = PTR_ERR(f); +- pr_notice("register: failed to install interpreter file %s\n", e->interpreter); +- simple_release_fs(&bm_mnt, &entry_count); +- iput(inode); +- inode = NULL; +- goto out2; +- } +- e->interp_file = f; +- } +- + e->dentry = dget(dentry); + inode->i_private = e; + inode->i_fop = &bm_entry_operations; +@@ -755,6 +752,8 @@ out: + inode_unlock(d_inode(root)); + + if (err) { ++ if (f) ++ filp_close(f, NULL); + kfree(e); + return err; + } diff --git a/queue-4.9/powerpc-64s-fix-instruction-encoding-for-lis-in-ppc_function_entry.patch b/queue-4.9/powerpc-64s-fix-instruction-encoding-for-lis-in-ppc_function_entry.patch new file mode 100644 index 00000000000..235d8ae5ead --- /dev/null +++ b/queue-4.9/powerpc-64s-fix-instruction-encoding-for-lis-in-ppc_function_entry.patch @@ -0,0 +1,36 @@ +From cea15316ceee2d4a51dfdecd79e08a438135416c Mon Sep 17 00:00:00 2001 +From: "Naveen N. Rao" +Date: Thu, 4 Mar 2021 07:34:11 +0530 +Subject: powerpc/64s: Fix instruction encoding for lis in ppc_function_entry() + +From: Naveen N. Rao + +commit cea15316ceee2d4a51dfdecd79e08a438135416c upstream. + +'lis r2,N' is 'addis r2,0,N' and the instruction encoding in the macro +LIS_R2 is incorrect (it currently maps to 'addis r0,r2,N'). Fix the +same. + +Fixes: c71b7eff426f ("powerpc: Add ABIv2 support to ppc_function_entry") +Cc: stable@vger.kernel.org # v3.16+ +Reported-by: Jiri Olsa +Signed-off-by: Naveen N. Rao +Acked-by: Segher Boessenkool +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/20210304020411.16796-1-naveen.n.rao@linux.vnet.ibm.com +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/include/asm/code-patching.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/powerpc/include/asm/code-patching.h ++++ b/arch/powerpc/include/asm/code-patching.h +@@ -46,7 +46,7 @@ void __patch_exception(int exc, unsigned + #endif + + #define OP_RT_RA_MASK 0xffff0000UL +-#define LIS_R2 0x3c020000UL ++#define LIS_R2 0x3c400000UL + #define ADDIS_R2_R12 0x3c4c0000UL + #define ADDI_R2_R2 0x38420000UL + diff --git a/queue-4.9/series b/queue-4.9/series index 12aceb2c8c1..f639913ecc5 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -62,3 +62,10 @@ nfsv4.2-fix-return-value-of-_nfs4_get_security_label.patch block-rsxx-fix-error-return-code-of-rsxx_pci_probe.patch configfs-fix-a-use-after-free-in-__configfs_open_fil.patch prctl-fix-pr_set_mm_auxv-kernel-stack-leak.patch +alpha-add-src-rather-than-obj-to-make-source-file-path.patch +alpha-merge-build-rules-of-division-routines.patch +alpha-make-short-build-log-available-for-division-routines.patch +alpha-package-string-routines-together.patch +alpha-switch-__copy_user-and-__do_clean_user-to-normal-calling-conventions.patch +powerpc-64s-fix-instruction-encoding-for-lis-in-ppc_function_entry.patch +binfmt_misc-fix-possible-deadlock-in-bm_register_write.patch