From: Greg Kroah-Hartman Date: Tue, 24 Oct 2017 10:26:36 +0000 (+0200) Subject: 4.4-stable patches X-Git-Tag: v3.18.78~9 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=9788ae574d364fe934d2308dad4b29c08a713e58;p=thirdparty%2Fkernel%2Fstable-queue.git 4.4-stable patches added patches: parisc-avoid-trashing-sr2-and-sr3-in-lws-code.patch parisc-fix-double-word-compare-and-exchange-in-lws-code-on-32-bit-kernels.patch sched-autogroup-fix-autogroup_move_group-to-never-skip-sched_move_task.patch --- diff --git a/queue-4.4/parisc-avoid-trashing-sr2-and-sr3-in-lws-code.patch b/queue-4.4/parisc-avoid-trashing-sr2-and-sr3-in-lws-code.patch new file mode 100644 index 00000000000..1469c2ecd68 --- /dev/null +++ b/queue-4.4/parisc-avoid-trashing-sr2-and-sr3-in-lws-code.patch @@ -0,0 +1,169 @@ +From f4125cfdb3008363137f744c101e5d76ead760ba Mon Sep 17 00:00:00 2001 +From: John David Anglin +Date: Fri, 28 Oct 2016 22:13:42 +0200 +Subject: parisc: Avoid trashing sr2 and sr3 in LWS code + +From: John David Anglin + +commit f4125cfdb3008363137f744c101e5d76ead760ba upstream. + +There is no need to trash sr2 and sr3 in the Light-weight syscall (LWS). sr2 +already points to kernel space (it's zero in userspace, otherwise syscalls +wouldn't work), and since the LWS code is executed in userspace, we can simply +ignore to preload sr3. + +Signed-off-by: John David Anglin +Signed-off-by: Helge Deller +Signed-off-by: Greg Kroah-Hartman + +--- + arch/parisc/kernel/syscall.S | 53 +++++++++++++++++++------------------------ + 1 file changed, 24 insertions(+), 29 deletions(-) + +--- a/arch/parisc/kernel/syscall.S ++++ b/arch/parisc/kernel/syscall.S +@@ -479,11 +479,6 @@ lws_start: + comiclr,>> __NR_lws_entries, %r20, %r0 + b,n lws_exit_nosys + +- /* WARNING: Trashing sr2 and sr3 */ +- mfsp %sr7,%r1 /* get userspace into sr3 */ +- mtsp %r1,%sr3 +- mtsp %r0,%sr2 /* get kernel space into sr2 */ +- + /* Load table start */ + ldil L%lws_table, %r1 + ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */ +@@ -632,9 +627,9 @@ cas_action: + stw %r1, 4(%sr2,%r20) + #endif + /* The load and store could fail */ +-1: ldw,ma 0(%sr3,%r26), %r28 ++1: ldw,ma 0(%r26), %r28 + sub,<> %r28, %r25, %r0 +-2: stw,ma %r24, 0(%sr3,%r26) ++2: stw,ma %r24, 0(%r26) + /* Free lock */ + stw,ma %r20, 0(%sr2,%r20) + #if ENABLE_LWS_DEBUG +@@ -711,9 +706,9 @@ lws_compare_and_swap_2: + nop + + /* 8bit load */ +-4: ldb 0(%sr3,%r25), %r25 ++4: ldb 0(%r25), %r25 + b cas2_lock_start +-5: ldb 0(%sr3,%r24), %r24 ++5: ldb 0(%r24), %r24 + nop + nop + nop +@@ -721,9 +716,9 @@ lws_compare_and_swap_2: + nop + + /* 16bit load */ +-6: ldh 0(%sr3,%r25), %r25 ++6: ldh 0(%r25), %r25 + b cas2_lock_start +-7: ldh 0(%sr3,%r24), %r24 ++7: ldh 0(%r24), %r24 + nop + nop + nop +@@ -731,9 +726,9 @@ lws_compare_and_swap_2: + nop + + /* 32bit load */ +-8: ldw 0(%sr3,%r25), %r25 ++8: ldw 0(%r25), %r25 + b cas2_lock_start +-9: ldw 0(%sr3,%r24), %r24 ++9: ldw 0(%r24), %r24 + nop + nop + nop +@@ -742,14 +737,14 @@ lws_compare_and_swap_2: + + /* 64bit load */ + #ifdef CONFIG_64BIT +-10: ldd 0(%sr3,%r25), %r25 +-11: ldd 0(%sr3,%r24), %r24 ++10: ldd 0(%r25), %r25 ++11: ldd 0(%r24), %r24 + #else + /* Load new value into r22/r23 - high/low */ +-10: ldw 0(%sr3,%r25), %r22 +-11: ldw 4(%sr3,%r25), %r23 ++10: ldw 0(%r25), %r22 ++11: ldw 4(%r25), %r23 + /* Load new value into fr4 for atomic store later */ +-12: flddx 0(%sr3,%r24), %fr4 ++12: flddx 0(%r24), %fr4 + #endif + + cas2_lock_start: +@@ -799,30 +794,30 @@ cas2_action: + ldo 1(%r0),%r28 + + /* 8bit CAS */ +-13: ldb,ma 0(%sr3,%r26), %r29 ++13: ldb,ma 0(%r26), %r29 + sub,= %r29, %r25, %r0 + b,n cas2_end +-14: stb,ma %r24, 0(%sr3,%r26) ++14: stb,ma %r24, 0(%r26) + b cas2_end + copy %r0, %r28 + nop + nop + + /* 16bit CAS */ +-15: ldh,ma 0(%sr3,%r26), %r29 ++15: ldh,ma 0(%r26), %r29 + sub,= %r29, %r25, %r0 + b,n cas2_end +-16: sth,ma %r24, 0(%sr3,%r26) ++16: sth,ma %r24, 0(%r26) + b cas2_end + copy %r0, %r28 + nop + nop + + /* 32bit CAS */ +-17: ldw,ma 0(%sr3,%r26), %r29 ++17: ldw,ma 0(%r26), %r29 + sub,= %r29, %r25, %r0 + b,n cas2_end +-18: stw,ma %r24, 0(%sr3,%r26) ++18: stw,ma %r24, 0(%r26) + b cas2_end + copy %r0, %r28 + nop +@@ -830,22 +825,22 @@ cas2_action: + + /* 64bit CAS */ + #ifdef CONFIG_64BIT +-19: ldd,ma 0(%sr3,%r26), %r29 ++19: ldd,ma 0(%r26), %r29 + sub,*= %r29, %r25, %r0 + b,n cas2_end +-20: std,ma %r24, 0(%sr3,%r26) ++20: std,ma %r24, 0(%r26) + copy %r0, %r28 + #else + /* Compare first word */ +-19: ldw,ma 0(%sr3,%r26), %r29 ++19: ldw,ma 0(%r26), %r29 + sub,= %r29, %r22, %r0 + b,n cas2_end + /* Compare second word */ +-20: ldw,ma 4(%sr3,%r26), %r29 ++20: ldw,ma 4(%r26), %r29 + sub,= %r29, %r23, %r0 + b,n cas2_end + /* Perform the store */ +-21: fstdx %fr4, 0(%sr3,%r26) ++21: fstdx %fr4, 0(%r26) + copy %r0, %r28 + #endif + diff --git a/queue-4.4/parisc-fix-double-word-compare-and-exchange-in-lws-code-on-32-bit-kernels.patch b/queue-4.4/parisc-fix-double-word-compare-and-exchange-in-lws-code-on-32-bit-kernels.patch new file mode 100644 index 00000000000..bad0dfc79a4 --- /dev/null +++ b/queue-4.4/parisc-fix-double-word-compare-and-exchange-in-lws-code-on-32-bit-kernels.patch @@ -0,0 +1,55 @@ +From 374b3bf8e8b519f61eb9775888074c6e46b3bf0c Mon Sep 17 00:00:00 2001 +From: John David Anglin +Date: Sat, 30 Sep 2017 17:24:23 -0400 +Subject: parisc: Fix double-word compare and exchange in LWS code on 32-bit kernels + +From: John David Anglin + +commit 374b3bf8e8b519f61eb9775888074c6e46b3bf0c upstream. + +As discussed on the debian-hppa list, double-wordcompare and exchange +operations fail on 32-bit kernels. Looking at the code, I realized that +the ",ma" completer does the wrong thing in the "ldw,ma 4(%r26), %r29" +instruction. This increments %r26 and causes the following store to +write to the wrong location. + +Note by Helge Deller: +The patch applies cleanly to stable kernel series if this upstream +commit is merged in advance: +f4125cfdb300 ("parisc: Avoid trashing sr2 and sr3 in LWS code"). + +Signed-off-by: John David Anglin +Tested-by: Christoph Biedl +Fixes: 89206491201c ("parisc: Implement new LWS CAS supporting 64 bit operations.") +Signed-off-by: Helge Deller +Signed-off-by: Greg Kroah-Hartman + +--- + arch/parisc/kernel/syscall.S | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/arch/parisc/kernel/syscall.S ++++ b/arch/parisc/kernel/syscall.S +@@ -740,7 +740,7 @@ lws_compare_and_swap_2: + 10: ldd 0(%r25), %r25 + 11: ldd 0(%r24), %r24 + #else +- /* Load new value into r22/r23 - high/low */ ++ /* Load old value into r22/r23 - high/low */ + 10: ldw 0(%r25), %r22 + 11: ldw 4(%r25), %r23 + /* Load new value into fr4 for atomic store later */ +@@ -832,11 +832,11 @@ cas2_action: + copy %r0, %r28 + #else + /* Compare first word */ +-19: ldw,ma 0(%r26), %r29 ++19: ldw 0(%r26), %r29 + sub,= %r29, %r22, %r0 + b,n cas2_end + /* Compare second word */ +-20: ldw,ma 4(%r26), %r29 ++20: ldw 4(%r26), %r29 + sub,= %r29, %r23, %r0 + b,n cas2_end + /* Perform the store */ diff --git a/queue-4.4/sched-autogroup-fix-autogroup_move_group-to-never-skip-sched_move_task.patch b/queue-4.4/sched-autogroup-fix-autogroup_move_group-to-never-skip-sched_move_task.patch new file mode 100644 index 00000000000..f817b10a8f4 --- /dev/null +++ b/queue-4.4/sched-autogroup-fix-autogroup_move_group-to-never-skip-sched_move_task.patch @@ -0,0 +1,111 @@ +From 18f649ef344127ef6de23a5a4272dbe2fdb73dde Mon Sep 17 00:00:00 2001 +From: Oleg Nesterov +Date: Mon, 14 Nov 2016 19:46:09 +0100 +Subject: sched/autogroup: Fix autogroup_move_group() to never skip sched_move_task() + +From: Oleg Nesterov + +commit 18f649ef344127ef6de23a5a4272dbe2fdb73dde upstream. + +The PF_EXITING check in task_wants_autogroup() is no longer needed. Remove +it, but see the next patch. + +However the comment is correct in that autogroup_move_group() must always +change task_group() for every thread so the sysctl_ check is very wrong; +we can race with cgroups and even sys_setsid() is not safe because a task +running with task_group() == ag->tg must participate in refcounting: + + int main(void) + { + int sctl = open("/proc/sys/kernel/sched_autogroup_enabled", O_WRONLY); + + assert(sctl > 0); + if (fork()) { + wait(NULL); // destroy the child's ag/tg + pause(); + } + + assert(pwrite(sctl, "1\n", 2, 0) == 2); + assert(setsid() > 0); + if (fork()) + pause(); + + kill(getppid(), SIGKILL); + sleep(1); + + // The child has gone, the grandchild runs with kref == 1 + assert(pwrite(sctl, "0\n", 2, 0) == 2); + assert(setsid() > 0); + + // runs with the freed ag/tg + for (;;) + sleep(1); + + return 0; + } + +crashes the kernel. It doesn't really need sleep(1), it doesn't matter if +autogroup_move_group() actually frees the task_group or this happens later. + +Reported-by: Vern Lovejoy +Signed-off-by: Oleg Nesterov +Signed-off-by: Peter Zijlstra (Intel) +Cc: Linus Torvalds +Cc: Mike Galbraith +Cc: Peter Zijlstra +Cc: Thomas Gleixner +Cc: hartsjc@redhat.com +Cc: vbendel@redhat.com +Link: http://lkml.kernel.org/r/20161114184609.GA15965@redhat.com +Signed-off-by: Ingo Molnar +Signed-off-by: Sumit Semwal + [sumits: submit to 4.4 LTS, post testing on Hikey] +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/sched/auto_group.c | 23 ++++++++++++----------- + 1 file changed, 12 insertions(+), 11 deletions(-) + +--- a/kernel/sched/auto_group.c ++++ b/kernel/sched/auto_group.c +@@ -111,14 +111,11 @@ bool task_wants_autogroup(struct task_st + { + if (tg != &root_task_group) + return false; +- + /* +- * We can only assume the task group can't go away on us if +- * autogroup_move_group() can see us on ->thread_group list. ++ * If we race with autogroup_move_group() the caller can use the old ++ * value of signal->autogroup but in this case sched_move_task() will ++ * be called again before autogroup_kref_put(). + */ +- if (p->flags & PF_EXITING) +- return false; +- + return true; + } + +@@ -138,13 +135,17 @@ autogroup_move_group(struct task_struct + } + + p->signal->autogroup = autogroup_kref_get(ag); +- +- if (!READ_ONCE(sysctl_sched_autogroup_enabled)) +- goto out; +- ++ /* ++ * We can't avoid sched_move_task() after we changed signal->autogroup, ++ * this process can already run with task_group() == prev->tg or we can ++ * race with cgroup code which can read autogroup = prev under rq->lock. ++ * In the latter case for_each_thread() can not miss a migrating thread, ++ * cpu_cgroup_attach() must not be possible after cgroup_exit() and it ++ * can't be removed from thread list, we hold ->siglock. ++ */ + for_each_thread(p, t) + sched_move_task(t); +-out: ++ + unlock_task_sighand(p, &flags); + autogroup_kref_put(prev); + } diff --git a/queue-4.4/series b/queue-4.4/series index 379c01578a1..1862dfdd54d 100644 --- a/queue-4.4/series +++ b/queue-4.4/series @@ -22,3 +22,6 @@ keys-encrypted-fix-dereference-of-null-user_key_payload.patch lib-digsig-fix-dereference-of-null-user_key_payload.patch keys-don-t-let-add_key-update-an-uninstantiated-key.patch pkcs7-prevent-null-pointer-dereference-since-sinfo-is-not-always-set.patch +parisc-avoid-trashing-sr2-and-sr3-in-lws-code.patch +parisc-fix-double-word-compare-and-exchange-in-lws-code-on-32-bit-kernels.patch +sched-autogroup-fix-autogroup_move_group-to-never-skip-sched_move_task.patch