--- /dev/null
+From 8f17fc20bfb75bcec4cfeda789738979c8338fdc Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@tv-sign.ru>
+Date: Thu, 15 Jun 2006 20:11:15 +0400
+Subject: check_process_timers: fix possible lockup
+
+If the local timer interrupt happens just after do_exit() sets PF_EXITING
+(and before it clears ->it_xxx_expires) run_posix_cpu_timers() will call
+check_process_timers() with tasklist_lock + ->siglock held and
+
+ check_process_timers:
+
+ t = tsk;
+ do {
+ ....
+
+ do {
+ t = next_thread(t);
+ } while (unlikely(t->flags & PF_EXITING));
+ } while (t != tsk);
+
+the outer loop will never stop.
+
+Actually, the window is bigger. Another process can attach the timer
+after ->it_xxx_expires was cleared (see the next commit) and the 'if
+(PF_EXITING)' check in arm_timer() is racy (see the one after that).
+
+Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
+Signed-off-by: Linus Torvalds <torvalds@osdl.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ kernel/posix-cpu-timers.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- linux-2.6.16.20.orig/kernel/posix-cpu-timers.c
++++ linux-2.6.16.20/kernel/posix-cpu-timers.c
+@@ -1173,6 +1173,9 @@ static void check_process_timers(struct
+ }
+ t = tsk;
+ do {
++ if (unlikely(t->flags & PF_EXITING))
++ continue;
++
+ ticks = cputime_add(cputime_add(t->utime, t->stime),
+ prof_left);
+ if (!cputime_eq(prof_expires, cputime_zero) &&
+@@ -1193,11 +1196,7 @@ static void check_process_timers(struct
+ t->it_sched_expires > sched)) {
+ t->it_sched_expires = sched;
+ }
+-
+- do {
+- t = next_thread(t);
+- } while (unlikely(t->flags & PF_EXITING));
+- } while (t != tsk);
++ } while ((t = next_thread(t)) != tsk);
+ }
+ }
+
--- /dev/null
+From 7c85d1f9d358b24c5b05c3a2783a78423775a080 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@samba.org>
+Date: Fri, 9 Jun 2006 13:02:59 +1000
+Subject: powerpc: Fix machine check problem on 32-bit kernels (CVE-2006-2448)
+
+This fixes a bug found by Dave Jones that means that it is possible
+for userspace to provoke a machine check on 32-bit kernels. This
+also fixes a couple of other places where I found similar problems
+by inspection.
+
+Signed-off-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ arch/powerpc/kernel/signal_32.c | 11 ++++++++++-
+ arch/powerpc/kernel/signal_64.c | 2 ++
+ 2 files changed, 12 insertions(+), 1 deletion(-)
+
+--- linux-2.6.16.20.orig/arch/powerpc/kernel/signal_32.c
++++ linux-2.6.16.20/arch/powerpc/kernel/signal_32.c
+@@ -802,10 +802,13 @@ static int do_setcontext(struct ucontext
+ if (__get_user(cmcp, &ucp->uc_regs))
+ return -EFAULT;
+ mcp = (struct mcontext __user *)(u64)cmcp;
++ /* no need to check access_ok(mcp), since mcp < 4GB */
+ }
+ #else
+ if (__get_user(mcp, &ucp->uc_regs))
+ return -EFAULT;
++ if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
++ return -EFAULT;
+ #endif
+ restore_sigmask(&set);
+ if (restore_user_regs(regs, mcp, sig))
+@@ -907,13 +910,14 @@ int sys_debug_setcontext(struct ucontext
+ {
+ struct sig_dbg_op op;
+ int i;
++ unsigned char tmp;
+ unsigned long new_msr = regs->msr;
+ #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+ unsigned long new_dbcr0 = current->thread.dbcr0;
+ #endif
+
+ for (i=0; i<ndbg; i++) {
+- if (__copy_from_user(&op, dbg, sizeof(op)))
++ if (copy_from_user(&op, dbg + i, sizeof(op)))
+ return -EFAULT;
+ switch (op.dbg_type) {
+ case SIG_DBG_SINGLE_STEPPING:
+@@ -958,6 +962,11 @@ int sys_debug_setcontext(struct ucontext
+ current->thread.dbcr0 = new_dbcr0;
+ #endif
+
++ if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
++ || __get_user(tmp, (u8 __user *) ctx)
++ || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
++ return -EFAULT;
++
+ /*
+ * If we get a fault copying the context into the kernel's
+ * image of the user's registers, we can't just return -EFAULT
+--- linux-2.6.16.20.orig/arch/powerpc/kernel/signal_64.c
++++ linux-2.6.16.20/arch/powerpc/kernel/signal_64.c
+@@ -183,6 +183,8 @@ static long restore_sigcontext(struct pt
+ err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
+ if (err)
+ return err;
++ if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
++ return -EFAULT;
+ /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
+ if (v_regs != 0 && (msr & MSR_VEC) != 0)
+ err |= __copy_from_user(current->thread.vr, v_regs,
--- /dev/null
+From 30f1e3dd8c72abda343bcf415f7d8894a02b4290 Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@tv-sign.ru>
+Date: Thu, 15 Jun 2006 20:11:43 +0400
+Subject: run_posix_cpu_timers: remove a bogus BUG_ON() (CVE-2006-2445)
+
+do_exit() clears ->it_##clock##_expires, but nothing prevents
+another cpu to attach the timer to exiting process after that.
+arm_timer() tries to protect against this race, but the check
+is racy.
+
+After exit_notify() does 'write_unlock_irq(&tasklist_lock)' and
+before do_exit() calls 'schedule() local timer interrupt can find
+tsk->exit_state != 0. If that state was EXIT_DEAD (or another cpu
+does sys_wait4) interrupted task has ->signal == NULL.
+
+At this moment exiting task has no pending cpu timers, they were
+cleanuped in __exit_signal()->posix_cpu_timers_exit{,_group}(),
+so we can just return from irq.
+
+John Stultz recently confirmed this bug, see
+
+ http://marc.theaimsgroup.com/?l=linux-kernel&m=115015841413687
+
+Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
+Signed-off-by: Linus Torvalds <torvalds@osdl.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ kernel/exit.c | 8 --------
+ kernel/posix-cpu-timers.c | 36 ++++++++++++++++++------------------
+ 2 files changed, 18 insertions(+), 26 deletions(-)
+
+--- linux-2.6.16.20.orig/kernel/exit.c
++++ linux-2.6.16.20/kernel/exit.c
+@@ -828,14 +828,6 @@ fastcall NORET_TYPE void do_exit(long co
+
+ tsk->flags |= PF_EXITING;
+
+- /*
+- * Make sure we don't try to process any timer firings
+- * while we are already exiting.
+- */
+- tsk->it_virt_expires = cputime_zero;
+- tsk->it_prof_expires = cputime_zero;
+- tsk->it_sched_expires = 0;
+-
+ if (unlikely(in_atomic()))
+ printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
+ current->comm, current->pid,
+--- linux-2.6.16.20.orig/kernel/posix-cpu-timers.c
++++ linux-2.6.16.20/kernel/posix-cpu-timers.c
+@@ -1288,30 +1288,30 @@ void run_posix_cpu_timers(struct task_st
+
+ #undef UNEXPIRED
+
+- BUG_ON(tsk->exit_state);
+-
+ /*
+ * Double-check with locks held.
+ */
+ read_lock(&tasklist_lock);
+- spin_lock(&tsk->sighand->siglock);
++ if (likely(tsk->signal != NULL)) {
++ spin_lock(&tsk->sighand->siglock);
+
+- /*
+- * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
+- * all the timers that are firing, and put them on the firing list.
+- */
+- check_thread_timers(tsk, &firing);
+- check_process_timers(tsk, &firing);
++ /*
++ * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
++ * all the timers that are firing, and put them on the firing list.
++ */
++ check_thread_timers(tsk, &firing);
++ check_process_timers(tsk, &firing);
+
+- /*
+- * We must release these locks before taking any timer's lock.
+- * There is a potential race with timer deletion here, as the
+- * siglock now protects our private firing list. We have set
+- * the firing flag in each timer, so that a deletion attempt
+- * that gets the timer lock before we do will give it up and
+- * spin until we've taken care of that timer below.
+- */
+- spin_unlock(&tsk->sighand->siglock);
++ /*
++ * We must release these locks before taking any timer's lock.
++ * There is a potential race with timer deletion here, as the
++ * siglock now protects our private firing list. We have set
++ * the firing flag in each timer, so that a deletion attempt
++ * that gets the timer lock before we do will give it up and
++ * spin until we've taken care of that timer below.
++ */
++ spin_unlock(&tsk->sighand->siglock);
++ }
+ read_unlock(&tasklist_lock);
+
+ /*
--- /dev/null
+powerpc-fix-machine-check-problem-on-32-bit-kernels.patch
+check_process_timers-fix-possible-lockup.patch
+run_posix_cpu_timers-remove-a-bogus-bug_on.patch
+xt_sctp-fix-endless-loop-caused-by-0-chunk-length.patch
--- /dev/null
+From stable-bounces@linux.kernel.org Mon Jun 19 10:18:30 2006
+Date: Mon, 19 Jun 2006 19:14:21 +0200
+From: Patrick McHardy <kaber@trash.net>
+To: "David S. Miller" <davem@davemloft.net>
+Cc: security@kernel.org, stable@kernel.org
+Subject: [NETFILTER]: xt_sctp: fix endless loop caused by 0 chunk length (CVE-2006-3085)
+
+Fix endless loop in the SCTP match similar to those already fixed in the
+SCTP conntrack helper (was CVE-2006-1527).
+
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+
+ net/netfilter/xt_sctp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- linux-2.6.16.20.orig/net/netfilter/xt_sctp.c
++++ linux-2.6.16.20/net/netfilter/xt_sctp.c
+@@ -62,7 +62,7 @@ match_packet(const struct sk_buff *skb,
+
+ do {
+ sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch);
+- if (sch == NULL) {
++ if (sch == NULL || sch->length == 0) {
+ duprintf("Dropping invalid SCTP packet.\n");
+ *hotdrop = 1;
+ return 0;