--- /dev/null
+From mhocko@suse.cz Fri Mar 1 09:05:37 2013
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Tue, 19 Feb 2013 14:56:52 +0100
+Subject: ptrace: ensure arch_ptrace/ptrace_request can never race with SIGKILL
+To: stable@vger.kernel.org
+Cc: Oleg Nesterov <oleg@redhat.com>, Linus Torvalds <torvalds@linux-foundation.org>
+Message-ID: <1361282213-17268-2-git-send-email-mhocko@suse.cz>
+
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+Upstream commit 9899d11f654474d2d54ea52ceaa2a1f4db3abd68.
+
+putreg() assumes that the tracee is not running and pt_regs_access() can
+safely play with its stack. However a killed tracee can return from
+ptrace_stop() to the low-level asm code and do RESTORE_REST, this means
+that debugger can actually read/modify the kernel stack until the tracee
+does SAVE_REST again.
+
+set_task_blockstep() can race with SIGKILL too and in some sense this
+race is even worse, the very fact the tracee can be woken up breaks the
+logic.
+
+As Linus suggested we can clear TASK_WAKEKILL around the arch_ptrace()
+call, this ensures that nobody can ever wakeup the tracee while the
+debugger looks at it. Not only this fixes the mentioned problems, we
+can do some cleanups/simplifications in arch_ptrace() paths.
+
+Probably ptrace_unfreeze_traced() needs more callers, for example it
+makes sense to make the tracee killable for oom-killer before
+access_process_vm().
+
+While at it, add the comment into may_ptrace_stop() to explain why
+ptrace_stop() still can't rely on SIGKILL and signal_pending_state().
+
+Reported-by: Salman Qazi <sqazi@google.com>
+Reported-by: Suleiman Souhlal <suleiman@google.com>
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Michal Hocko <mhocko@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/ptrace.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++---------
+ kernel/signal.c | 5 ++++
+ 2 files changed, 55 insertions(+), 9 deletions(-)
+
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -38,6 +38,36 @@ void __ptrace_link(struct task_struct *c
+ child->parent = new_parent;
+ }
+
++/* Ensure that nothing can wake it up, even SIGKILL */
++static bool ptrace_freeze_traced(struct task_struct *task)
++{
++ bool ret = false;
++
++ spin_lock_irq(&task->sighand->siglock);
++ if (task_is_traced(task) && !__fatal_signal_pending(task)) {
++ task->state = __TASK_TRACED;
++ ret = true;
++ }
++ spin_unlock_irq(&task->sighand->siglock);
++
++ return ret;
++}
++
++static void ptrace_unfreeze_traced(struct task_struct *task)
++{
++ if (task->state != __TASK_TRACED)
++ return;
++
++ WARN_ON(!task->ptrace || task->parent != current);
++
++ spin_lock_irq(&task->sighand->siglock);
++ if (__fatal_signal_pending(task))
++ wake_up_state(task, __TASK_TRACED);
++ else
++ task->state = TASK_TRACED;
++ spin_unlock_irq(&task->sighand->siglock);
++}
++
+ /**
+ * __ptrace_unlink - unlink ptracee and restore its execution state
+ * @child: ptracee to be unlinked
+@@ -112,23 +142,29 @@ int ptrace_check_attach(struct task_stru
+ * be changed by us so it's not changing right after this.
+ */
+ read_lock(&tasklist_lock);
+- if ((child->ptrace & PT_PTRACED) && child->parent == current) {
++ if (child->ptrace && child->parent == current) {
++ WARN_ON(child->state == __TASK_TRACED);
+ /*
+ * child->sighand can't be NULL, release_task()
+ * does ptrace_unlink() before __exit_signal().
+ */
+- spin_lock_irq(&child->sighand->siglock);
+- WARN_ON_ONCE(task_is_stopped(child));
+- if (task_is_traced(child) || kill)
++ if (kill || ptrace_freeze_traced(child))
+ ret = 0;
+- spin_unlock_irq(&child->sighand->siglock);
+ }
+ read_unlock(&tasklist_lock);
+
+- if (!ret && !kill)
+- ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
++ if (!ret && !kill) {
++ if (!wait_task_inactive(child, __TASK_TRACED)) {
++ /*
++ * This can only happen if may_ptrace_stop() fails and
++ * ptrace_stop() changes ->state back to TASK_RUNNING,
++ * so we should not worry about leaking __TASK_TRACED.
++ */
++ WARN_ON(child->state == __TASK_TRACED);
++ ret = -ESRCH;
++ }
++ }
+
+- /* All systems go.. */
+ return ret;
+ }
+
+@@ -777,6 +813,8 @@ SYSCALL_DEFINE4(ptrace, long, request, l
+ goto out_put_task_struct;
+
+ ret = arch_ptrace(child, request, addr, data);
++ if (ret || request != PTRACE_DETACH)
++ ptrace_unfreeze_traced(child);
+
+ out_put_task_struct:
+ put_task_struct(child);
+@@ -915,8 +953,11 @@ asmlinkage long compat_sys_ptrace(compat
+ }
+
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+- if (!ret)
++ if (!ret) {
+ ret = compat_arch_ptrace(child, request, addr, data);
++ if (ret || request != PTRACE_DETACH)
++ ptrace_unfreeze_traced(child);
++ }
+
+ out_put_task_struct:
+ put_task_struct(child);
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -1669,6 +1669,10 @@ static inline int may_ptrace_stop(void)
+ * If SIGKILL was already sent before the caller unlocked
+ * ->siglock we must see ->core_state != NULL. Otherwise it
+ * is safe to enter schedule().
++ *
++ * This is almost outdated, a task with the pending SIGKILL can't
++ * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
++ * after SIGKILL was already dequeued.
+ */
+ if (unlikely(current->mm->core_state) &&
+ unlikely(current->mm == current->parent->mm))
+@@ -1800,6 +1804,7 @@ static void ptrace_stop(int exit_code, i
+ if (gstop_done)
+ do_notify_parent_cldstop(current, false, why);
+
++ /* tasklist protects us from ptrace_freeze_traced() */
+ __set_current_state(TASK_RUNNING);
+ if (clear_code)
+ current->exit_code = 0;
--- /dev/null
+From mhocko@suse.cz Fri Mar 1 09:05:15 2013
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Tue, 19 Feb 2013 14:56:51 +0100
+Subject: ptrace: introduce signal_wake_up_state() and ptrace_signal_wake_up()
+To: stable@vger.kernel.org
+Cc: Oleg Nesterov <oleg@redhat.com>, Linus Torvalds <torvalds@linux-foundation.org>
+Message-ID: <1361282213-17268-1-git-send-email-mhocko@suse.cz>
+
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+Upstream commit 910ffdb18a6408e14febbb6e4b6840fd2c928c82.
+
+Cleanup and preparation for the next change.
+
+signal_wake_up(resume => true) is overused. None of ptrace/jctl callers
+actually want to wakeup a TASK_WAKEKILL task, but they can't specify the
+necessary mask.
+
+Turn signal_wake_up() into signal_wake_up_state(state), reintroduce
+signal_wake_up() as a trivial helper, and add ptrace_signal_wake_up()
+which adds __TASK_TRACED.
+
+This way ptrace_signal_wake_up() can work "inside" ptrace_request()
+even if the tracee doesn't have the TASK_WAKEKILL bit set.
+
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Michal Hocko <mhocko@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/sched.h | 11 ++++++++++-
+ kernel/ptrace.c | 4 ++--
+ kernel/signal.c | 12 +++---------
+ 3 files changed, 15 insertions(+), 12 deletions(-)
+
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2564,7 +2564,16 @@ static inline void thread_group_cputime_
+ extern void recalc_sigpending_and_wake(struct task_struct *t);
+ extern void recalc_sigpending(void);
+
+-extern void signal_wake_up(struct task_struct *t, int resume_stopped);
++extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
++
++static inline void signal_wake_up(struct task_struct *t, bool resume)
++{
++ signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
++}
++static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
++{
++ signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
++}
+
+ /*
+ * Wrappers for p->thread_info->cpu access. No-op on UP.
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -92,7 +92,7 @@ void __ptrace_unlink(struct task_struct
+ * TASK_KILLABLE sleeps.
+ */
+ if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child))
+- signal_wake_up(child, task_is_traced(child));
++ ptrace_signal_wake_up(child, true);
+
+ spin_unlock(&child->sighand->siglock);
+ }
+@@ -245,7 +245,7 @@ static int ptrace_attach(struct task_str
+ */
+ if (task_is_stopped(task)) {
+ task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING;
+- signal_wake_up(task, 1);
++ signal_wake_up_state(task, __TASK_STOPPED);
+ wait_trap = true;
+ }
+
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -631,23 +631,17 @@ int dequeue_signal(struct task_struct *t
+ * No need to set need_resched since signal event passing
+ * goes through ->blocked
+ */
+-void signal_wake_up(struct task_struct *t, int resume)
++void signal_wake_up_state(struct task_struct *t, unsigned int state)
+ {
+- unsigned int mask;
+-
+ set_tsk_thread_flag(t, TIF_SIGPENDING);
+-
+ /*
+- * For SIGKILL, we want to wake it up in the stopped/traced/killable
++ * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
+ * case. We don't check t->state here because there is a race with it
+ * executing another processor and just now entering stopped state.
+ * By using wake_up_state, we ensure the process will wake up and
+ * handle its death signal.
+ */
+- mask = TASK_INTERRUPTIBLE;
+- if (resume)
+- mask |= TASK_WAKEKILL;
+- if (!wake_up_state(t, mask))
++ if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
+ kick_process(t);
+ }
+
ext4-fix-race-in-ext4_mb_add_n_trim.patch
svcrpc-make-svc_age_temp_xprts-enqueue-under-sv_lock.patch
vhost-fix-length-for-cross-region-descriptor.patch
+ptrace-introduce-signal_wake_up_state-and-ptrace_signal_wake_up.patch
+ptrace-ensure-arch_ptrace-ptrace_request-can-never-race-with-sigkill.patch
+wake_up_process-should-be-never-used-to-wakeup-a-task_stopped-traced-task.patch
+unbreak-automounter-support-on-64-bit-kernel-with-32-bit-userspace-v2.patch
--- /dev/null
+From 4f4ffc3a5398ef9bdbb32db04756d7d34e356fcf Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Mon, 4 Feb 2013 19:39:52 +0000
+Subject: unbreak automounter support on 64-bit kernel with 32-bit userspace (v2)
+
+From: Helge Deller <deller@gmx.de>
+
+commit 4f4ffc3a5398ef9bdbb32db04756d7d34e356fcf upstream.
+
+automount-support is broken on the parisc architecture, because the existing
+#if list does not include a check for defined(__hppa__). The HPPA (parisc)
+architecture is similiar to other 64bit Linux targets where we have to define
+autofs_wqt_t (which is passed back and forth to user space) as int type which
+has a size of 32bit across 32 and 64bit kernels.
+
+During the discussion on the mailing list, H. Peter Anvin suggested to invert
+the #if list since only specific platforms (specifically those who do not have
+a 32bit userspace, like IA64 and Alpha) should have autofs_wqt_t as unsigned
+long type.
+
+This suggestion is probably the best way to go, since Arm64 (and maybe others?)
+seems to have a non-working automounter. So in the long run even for other new
+upcoming architectures this inverted check seem to be the best solution, since
+it will not require them to change this #if again (unless they are 64bit only).
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Acked-by: H. Peter Anvin <hpa@zytor.com>
+Acked-by: Ian Kent <raven@themaw.net>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+CC: James Bottomley <James.Bottomley@HansenPartnership.com>
+CC: Rolf Eike Beer <eike-kernel@sf-tec.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/auto_fs.h | 25 ++++++++-----------------
+ 1 file changed, 8 insertions(+), 17 deletions(-)
+
+--- a/include/linux/auto_fs.h
++++ b/include/linux/auto_fs.h
+@@ -31,25 +31,16 @@
+ #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION
+
+ /*
+- * Architectures where both 32- and 64-bit binaries can be executed
+- * on 64-bit kernels need this. This keeps the structure format
+- * uniform, and makes sure the wait_queue_token isn't too big to be
+- * passed back down to the kernel.
+- *
+- * This assumes that on these architectures:
+- * mode 32 bit 64 bit
+- * -------------------------
+- * int 32 bit 32 bit
+- * long 32 bit 64 bit
+- *
+- * If so, 32-bit user-space code should be backwards compatible.
++ * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
++ * back to the kernel via ioctl from userspace. On architectures where 32- and
++ * 64-bit userspace binaries can be executed it's important that the size of
++ * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we
++ * do not break the binary ABI interface by changing the structure size.
+ */
+-
+-#if defined(__sparc__) || defined(__mips__) || defined(__x86_64__) \
+- || defined(__powerpc__) || defined(__s390__)
+-typedef unsigned int autofs_wqt_t;
+-#else
++#if defined(__ia64__) || defined(__alpha__) /* pure 64bit architectures */
+ typedef unsigned long autofs_wqt_t;
++#else
++typedef unsigned int autofs_wqt_t;
+ #endif
+
+ /* Packet types */
--- /dev/null
+From mhocko@suse.cz Fri Mar 1 09:06:11 2013
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Tue, 19 Feb 2013 14:56:53 +0100
+Subject: wake_up_process() should be never used to wakeup a TASK_STOPPED/TRACED task
+To: stable@vger.kernel.org
+Cc: Oleg Nesterov <oleg@redhat.com>, Linus Torvalds <torvalds@linux-foundation.org>
+Message-ID: <1361282213-17268-3-git-send-email-mhocko@suse.cz>
+
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+Upstream commit 9067ac85d533651b98c2ff903182a20cbb361fcb.
+
+wake_up_process() should never wakeup a TASK_STOPPED/TRACED task.
+Change it to use TASK_NORMAL and add the WARN_ON().
+
+TASK_ALL has no other users, probably can be killed.
+
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Michal Hocko <mhocko@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -2778,7 +2778,8 @@ out:
+ */
+ int wake_up_process(struct task_struct *p)
+ {
+- return try_to_wake_up(p, TASK_ALL, 0);
++ WARN_ON(task_is_stopped_or_traced(p));
++ return try_to_wake_up(p, TASK_NORMAL, 0);
+ }
+ EXPORT_SYMBOL(wake_up_process);
+