--- /dev/null
+From f4125cfdb3008363137f744c101e5d76ead760ba Mon Sep 17 00:00:00 2001
+From: John David Anglin <dave.anglin@bell.net>
+Date: Fri, 28 Oct 2016 22:13:42 +0200
+Subject: parisc: Avoid trashing sr2 and sr3 in LWS code
+
+From: John David Anglin <dave.anglin@bell.net>
+
+commit f4125cfdb3008363137f744c101e5d76ead760ba upstream.
+
+There is no need to trash sr2 and sr3 in the Light-weight syscall (LWS). sr2
+already points to kernel space (it's zero in userspace, otherwise syscalls
+wouldn't work), and since the LWS code is executed in userspace, we can simply
+ignore to preload sr3.
+
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/kernel/syscall.S | 53 +++++++++++++++++++------------------------
+ 1 file changed, 24 insertions(+), 29 deletions(-)
+
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -479,11 +479,6 @@ lws_start:
+ comiclr,>> __NR_lws_entries, %r20, %r0
+ b,n lws_exit_nosys
+
+- /* WARNING: Trashing sr2 and sr3 */
+- mfsp %sr7,%r1 /* get userspace into sr3 */
+- mtsp %r1,%sr3
+- mtsp %r0,%sr2 /* get kernel space into sr2 */
+-
+ /* Load table start */
+ ldil L%lws_table, %r1
+ ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
+@@ -632,9 +627,9 @@ cas_action:
+ stw %r1, 4(%sr2,%r20)
+ #endif
+ /* The load and store could fail */
+-1: ldw,ma 0(%sr3,%r26), %r28
++1: ldw,ma 0(%r26), %r28
+ sub,<> %r28, %r25, %r0
+-2: stw,ma %r24, 0(%sr3,%r26)
++2: stw,ma %r24, 0(%r26)
+ /* Free lock */
+ stw,ma %r20, 0(%sr2,%r20)
+ #if ENABLE_LWS_DEBUG
+@@ -711,9 +706,9 @@ lws_compare_and_swap_2:
+ nop
+
+ /* 8bit load */
+-4: ldb 0(%sr3,%r25), %r25
++4: ldb 0(%r25), %r25
+ b cas2_lock_start
+-5: ldb 0(%sr3,%r24), %r24
++5: ldb 0(%r24), %r24
+ nop
+ nop
+ nop
+@@ -721,9 +716,9 @@ lws_compare_and_swap_2:
+ nop
+
+ /* 16bit load */
+-6: ldh 0(%sr3,%r25), %r25
++6: ldh 0(%r25), %r25
+ b cas2_lock_start
+-7: ldh 0(%sr3,%r24), %r24
++7: ldh 0(%r24), %r24
+ nop
+ nop
+ nop
+@@ -731,9 +726,9 @@ lws_compare_and_swap_2:
+ nop
+
+ /* 32bit load */
+-8: ldw 0(%sr3,%r25), %r25
++8: ldw 0(%r25), %r25
+ b cas2_lock_start
+-9: ldw 0(%sr3,%r24), %r24
++9: ldw 0(%r24), %r24
+ nop
+ nop
+ nop
+@@ -742,14 +737,14 @@ lws_compare_and_swap_2:
+
+ /* 64bit load */
+ #ifdef CONFIG_64BIT
+-10: ldd 0(%sr3,%r25), %r25
+-11: ldd 0(%sr3,%r24), %r24
++10: ldd 0(%r25), %r25
++11: ldd 0(%r24), %r24
+ #else
+ /* Load new value into r22/r23 - high/low */
+-10: ldw 0(%sr3,%r25), %r22
+-11: ldw 4(%sr3,%r25), %r23
++10: ldw 0(%r25), %r22
++11: ldw 4(%r25), %r23
+ /* Load new value into fr4 for atomic store later */
+-12: flddx 0(%sr3,%r24), %fr4
++12: flddx 0(%r24), %fr4
+ #endif
+
+ cas2_lock_start:
+@@ -799,30 +794,30 @@ cas2_action:
+ ldo 1(%r0),%r28
+
+ /* 8bit CAS */
+-13: ldb,ma 0(%sr3,%r26), %r29
++13: ldb,ma 0(%r26), %r29
+ sub,= %r29, %r25, %r0
+ b,n cas2_end
+-14: stb,ma %r24, 0(%sr3,%r26)
++14: stb,ma %r24, 0(%r26)
+ b cas2_end
+ copy %r0, %r28
+ nop
+ nop
+
+ /* 16bit CAS */
+-15: ldh,ma 0(%sr3,%r26), %r29
++15: ldh,ma 0(%r26), %r29
+ sub,= %r29, %r25, %r0
+ b,n cas2_end
+-16: sth,ma %r24, 0(%sr3,%r26)
++16: sth,ma %r24, 0(%r26)
+ b cas2_end
+ copy %r0, %r28
+ nop
+ nop
+
+ /* 32bit CAS */
+-17: ldw,ma 0(%sr3,%r26), %r29
++17: ldw,ma 0(%r26), %r29
+ sub,= %r29, %r25, %r0
+ b,n cas2_end
+-18: stw,ma %r24, 0(%sr3,%r26)
++18: stw,ma %r24, 0(%r26)
+ b cas2_end
+ copy %r0, %r28
+ nop
+@@ -830,22 +825,22 @@ cas2_action:
+
+ /* 64bit CAS */
+ #ifdef CONFIG_64BIT
+-19: ldd,ma 0(%sr3,%r26), %r29
++19: ldd,ma 0(%r26), %r29
+ sub,*= %r29, %r25, %r0
+ b,n cas2_end
+-20: std,ma %r24, 0(%sr3,%r26)
++20: std,ma %r24, 0(%r26)
+ copy %r0, %r28
+ #else
+ /* Compare first word */
+-19: ldw,ma 0(%sr3,%r26), %r29
++19: ldw,ma 0(%r26), %r29
+ sub,= %r29, %r22, %r0
+ b,n cas2_end
+ /* Compare second word */
+-20: ldw,ma 4(%sr3,%r26), %r29
++20: ldw,ma 4(%r26), %r29
+ sub,= %r29, %r23, %r0
+ b,n cas2_end
+ /* Perform the store */
+-21: fstdx %fr4, 0(%sr3,%r26)
++21: fstdx %fr4, 0(%r26)
+ copy %r0, %r28
+ #endif
+
--- /dev/null
+From 374b3bf8e8b519f61eb9775888074c6e46b3bf0c Mon Sep 17 00:00:00 2001
+From: John David Anglin <dave.anglin@bell.net>
+Date: Sat, 30 Sep 2017 17:24:23 -0400
+Subject: parisc: Fix double-word compare and exchange in LWS code on 32-bit kernels
+
+From: John David Anglin <dave.anglin@bell.net>
+
+commit 374b3bf8e8b519f61eb9775888074c6e46b3bf0c upstream.
+
+As discussed on the debian-hppa list, double-wordcompare and exchange
+operations fail on 32-bit kernels. Looking at the code, I realized that
+the ",ma" completer does the wrong thing in the "ldw,ma 4(%r26), %r29"
+instruction. This increments %r26 and causes the following store to
+write to the wrong location.
+
+Note by Helge Deller:
+The patch applies cleanly to stable kernel series if this upstream
+commit is merged in advance:
+f4125cfdb300 ("parisc: Avoid trashing sr2 and sr3 in LWS code").
+
+Signed-off-by: John David Anglin <dave.anglin@bell.net>
+Tested-by: Christoph Biedl <debian.axhn@manchmal.in-ulm.de>
+Fixes: 89206491201c ("parisc: Implement new LWS CAS supporting 64 bit operations.")
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/kernel/syscall.S | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -740,7 +740,7 @@ lws_compare_and_swap_2:
+ 10: ldd 0(%r25), %r25
+ 11: ldd 0(%r24), %r24
+ #else
+- /* Load new value into r22/r23 - high/low */
++ /* Load old value into r22/r23 - high/low */
+ 10: ldw 0(%r25), %r22
+ 11: ldw 4(%r25), %r23
+ /* Load new value into fr4 for atomic store later */
+@@ -832,11 +832,11 @@ cas2_action:
+ copy %r0, %r28
+ #else
+ /* Compare first word */
+-19: ldw,ma 0(%r26), %r29
++19: ldw 0(%r26), %r29
+ sub,= %r29, %r22, %r0
+ b,n cas2_end
+ /* Compare second word */
+-20: ldw,ma 4(%r26), %r29
++20: ldw 4(%r26), %r29
+ sub,= %r29, %r23, %r0
+ b,n cas2_end
+ /* Perform the store */
--- /dev/null
+From 18f649ef344127ef6de23a5a4272dbe2fdb73dde Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Mon, 14 Nov 2016 19:46:09 +0100
+Subject: sched/autogroup: Fix autogroup_move_group() to never skip sched_move_task()
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+commit 18f649ef344127ef6de23a5a4272dbe2fdb73dde upstream.
+
+The PF_EXITING check in task_wants_autogroup() is no longer needed. Remove
+it, but see the next patch.
+
+However the comment is correct in that autogroup_move_group() must always
+change task_group() for every thread so the sysctl_ check is very wrong;
+we can race with cgroups and even sys_setsid() is not safe because a task
+running with task_group() == ag->tg must participate in refcounting:
+
+ int main(void)
+ {
+ int sctl = open("/proc/sys/kernel/sched_autogroup_enabled", O_WRONLY);
+
+ assert(sctl > 0);
+ if (fork()) {
+ wait(NULL); // destroy the child's ag/tg
+ pause();
+ }
+
+ assert(pwrite(sctl, "1\n", 2, 0) == 2);
+ assert(setsid() > 0);
+ if (fork())
+ pause();
+
+ kill(getppid(), SIGKILL);
+ sleep(1);
+
+ // The child has gone, the grandchild runs with kref == 1
+ assert(pwrite(sctl, "0\n", 2, 0) == 2);
+ assert(setsid() > 0);
+
+ // runs with the freed ag/tg
+ for (;;)
+ sleep(1);
+
+ return 0;
+ }
+
+crashes the kernel. It doesn't really need sleep(1), it doesn't matter if
+autogroup_move_group() actually frees the task_group or this happens later.
+
+Reported-by: Vern Lovejoy <vlovejoy@redhat.com>
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: hartsjc@redhat.com
+Cc: vbendel@redhat.com
+Link: http://lkml.kernel.org/r/20161114184609.GA15965@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
+ [sumits: submit to 4.4 LTS, post testing on Hikey]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/auto_group.c | 23 ++++++++++++-----------
+ 1 file changed, 12 insertions(+), 11 deletions(-)
+
+--- a/kernel/sched/auto_group.c
++++ b/kernel/sched/auto_group.c
+@@ -111,14 +111,11 @@ bool task_wants_autogroup(struct task_st
+ {
+ if (tg != &root_task_group)
+ return false;
+-
+ /*
+- * We can only assume the task group can't go away on us if
+- * autogroup_move_group() can see us on ->thread_group list.
++ * If we race with autogroup_move_group() the caller can use the old
++ * value of signal->autogroup but in this case sched_move_task() will
++ * be called again before autogroup_kref_put().
+ */
+- if (p->flags & PF_EXITING)
+- return false;
+-
+ return true;
+ }
+
+@@ -138,13 +135,17 @@ autogroup_move_group(struct task_struct
+ }
+
+ p->signal->autogroup = autogroup_kref_get(ag);
+-
+- if (!READ_ONCE(sysctl_sched_autogroup_enabled))
+- goto out;
+-
++ /*
++ * We can't avoid sched_move_task() after we changed signal->autogroup,
++ * this process can already run with task_group() == prev->tg or we can
++ * race with cgroup code which can read autogroup = prev under rq->lock.
++ * In the latter case for_each_thread() can not miss a migrating thread,
++ * cpu_cgroup_attach() must not be possible after cgroup_exit() and it
++ * can't be removed from thread list, we hold ->siglock.
++ */
+ for_each_thread(p, t)
+ sched_move_task(t);
+-out:
++
+ unlock_task_sighand(p, &flags);
+ autogroup_kref_put(prev);
+ }