--- /dev/null
+From stable-bounces@linux.kernel.org Thu Dec 13 00:58:37 2007
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 13 Dec 2007 09:57:17 +0100
+Subject: clockevents: fix reprogramming decision in oneshot broadcast
+To: Greg Kroah-Hartman <gregkh@suse.de>
+Cc: Thomas Gleixner <tglx@linutronix.de>, stable@kernel.org
+Message-ID: <20071213085717.GA1926@elte.hu>
+Content-Disposition: inline
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+patch cdc6f27d9e3c2f7ca1a3e19c6eabb1ad6a2add5d in mainline.
+
+A previous version of the code did the reprogramming of the broadcast
+device in the return from idle code. This was removed, but the logic in
+tick_handle_oneshot_broadcast() was kept the same.
+
+When a broadcast interrupt happens we signal the expiry to all CPUs
+which have an expired event. If none of the CPUs has an expired event,
+which can happen in dyntick mode, then we reprogram the broadcast
+device. We do not reprogram otherwise, but this is only correct if all
+CPUs, which are in the idle broadcast state have been woken up.
+
+The code ignores, that there might be pending not yet expired events on
+other CPUs, which are in the idle broadcast state. So the delivery of
+those events can be delayed for quite a time.
+
+Change the tick_handle_oneshot_broadcast() function to check for CPUs,
+which are in broadcast state and are not woken up by the current event,
+and enforce the rearming of the broadcast device for those CPUs.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+
+---
+ kernel/time/tick-broadcast.c | 56 ++++++++++++++++---------------------------
+ 1 file changed, 21 insertions(+), 35 deletions(-)
+
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -387,45 +387,19 @@ int tick_resume_broadcast_oneshot(struct
+ }
+
+ /*
+- * Reprogram the broadcast device:
+- *
+- * Called with tick_broadcast_lock held and interrupts disabled.
+- */
+-static int tick_broadcast_reprogram(void)
+-{
+- ktime_t expires = { .tv64 = KTIME_MAX };
+- struct tick_device *td;
+- int cpu;
+-
+- /*
+- * Find the event which expires next:
+- */
+- for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS;
+- cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
+- td = &per_cpu(tick_cpu_device, cpu);
+- if (td->evtdev->next_event.tv64 < expires.tv64)
+- expires = td->evtdev->next_event;
+- }
+-
+- if (expires.tv64 == KTIME_MAX)
+- return 0;
+-
+- return tick_broadcast_set_event(expires, 0);
+-}
+-
+-/*
+ * Handle oneshot mode broadcasting
+ */
+ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
+ {
+ struct tick_device *td;
+ cpumask_t mask;
+- ktime_t now;
++ ktime_t now, next_event;
+ int cpu;
+
+ spin_lock(&tick_broadcast_lock);
+ again:
+ dev->next_event.tv64 = KTIME_MAX;
++ next_event.tv64 = KTIME_MAX;
+ mask = CPU_MASK_NONE;
+ now = ktime_get();
+ /* Find all expired events */
+@@ -434,19 +408,31 @@ again:
+ td = &per_cpu(tick_cpu_device, cpu);
+ if (td->evtdev->next_event.tv64 <= now.tv64)
+ cpu_set(cpu, mask);
++ else if (td->evtdev->next_event.tv64 < next_event.tv64)
++ next_event.tv64 = td->evtdev->next_event.tv64;
+ }
+
+ /*
+- * Wakeup the cpus which have an expired event. The broadcast
+- * device is reprogrammed in the return from idle code.
++ * Wakeup the cpus which have an expired event.
++ */
++ tick_do_broadcast(mask);
++
++ /*
++ * Two reasons for reprogram:
++ *
++ * - The global event did not expire any CPU local
++ * events. This happens in dyntick mode, as the maximum PIT
++ * delta is quite small.
++ *
++ * - There are pending events on sleeping CPUs which were not
++ * in the event mask
+ */
+- if (!tick_do_broadcast(mask)) {
++ if (next_event.tv64 != KTIME_MAX) {
+ /*
+- * The global event did not expire any CPU local
+- * events. This happens in dyntick mode, as the
+- * maximum PIT delta is quite small.
++ * Rearm the broadcast device. If event expired,
++ * repeat the above
+ */
+- if (tick_broadcast_reprogram())
++ if (tick_broadcast_set_event(next_event, 0))
+ goto again;
+ }
+ spin_unlock(&tick_broadcast_lock);
--- /dev/null
+From rjw@sisk.pl Sun Dec 16 15:44:47 2007
+From: "Rafael J. Wysocki" <rjw@sisk.pl>
+Date: Mon, 17 Dec 2007 01:03:46 +0100
+Subject: Freezer: Fix APM emulation breakage
+To: Greg KH <greg@kroah.com>
+Cc: Chuck Ebbert <cebbert@redhat.com>, Andrew Morton <akpm@linux-foundation.org>, Greg Kroah-Hartman <gregkh@suse.de>, torvalds@linux-foundation.org, linux-kernel@vger.kernel.org, stable@kernel.org
+Message-ID: <200712170103.47097.rjw@sisk.pl>
+Content-Disposition: inline
+
+From: Rafael J. Wysocki <rjw@sisk.pl>
+
+The APM emulation is currently broken as a result of commit
+831441862956fffa17b9801db37e6ea1650b0f69
+"Freezer: make kernel threads nonfreezable by default"
+that removed the PF_NOFREEZE annotations from apm_ioctl() without
+adding the appropriate freezer hooks. Fix it and remove the
+unnecessary variable flags from apm_ioctl().
+
+This problem has been fixed in the mainline by
+commit cb43c54ca05c01533c45e4d3abfe8f99b7acf624
+"Freezer: Fix APM emulation breakage".
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/apm-emulation.c | 15 ++++++++-------
+ include/linux/freezer.h | 23 +++++++++++++++++++++++
+ 2 files changed, 31 insertions(+), 7 deletions(-)
+
+--- a/drivers/char/apm-emulation.c
++++ b/drivers/char/apm-emulation.c
+@@ -295,7 +295,6 @@ static int
+ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
+ {
+ struct apm_user *as = filp->private_data;
+- unsigned long flags;
+ int err = -EINVAL;
+
+ if (!as->suser || !as->writer)
+@@ -331,10 +330,16 @@ apm_ioctl(struct inode * inode, struct f
+ * Wait for the suspend/resume to complete. If there
+ * are pending acknowledges, we wait here for them.
+ */
+- flags = current->flags;
++ freezer_do_not_count();
+
+ wait_event(apm_suspend_waitqueue,
+ as->suspend_state == SUSPEND_DONE);
++
++ /*
++ * Since we are waiting until the suspend is done, the
++ * try_to_freeze() in freezer_count() will not trigger
++ */
++ freezer_count();
+ } else {
+ as->suspend_state = SUSPEND_WAIT;
+ mutex_unlock(&state_lock);
+@@ -362,14 +367,10 @@ apm_ioctl(struct inode * inode, struct f
+ * Wait for the suspend/resume to complete. If there
+ * are pending acknowledges, we wait here for them.
+ */
+- flags = current->flags;
+-
+- wait_event_interruptible(apm_suspend_waitqueue,
++ wait_event_freezable(apm_suspend_waitqueue,
+ as->suspend_state == SUSPEND_DONE);
+ }
+
+- current->flags = flags;
+-
+ mutex_lock(&state_lock);
+ err = as->suspend_result;
+ as->suspend_state = SUSPEND_NONE;
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -4,6 +4,7 @@
+ #define FREEZER_H_INCLUDED
+
+ #include <linux/sched.h>
++#include <linux/wait.h>
+
+ #ifdef CONFIG_PM_SLEEP
+ /*
+@@ -126,6 +127,24 @@ static inline void set_freezable(void)
+ current->flags &= ~PF_NOFREEZE;
+ }
+
++/*
++ * Freezer-friendly wrapper around wait_event_interruptible(), originally
++ * defined in <linux/wait.h>
++ */
++
++#define wait_event_freezable(wq, condition) \
++({ \
++ int __retval; \
++ do { \
++ __retval = wait_event_interruptible(wq, \
++ (condition) || freezing(current)); \
++ if (__retval && !freezing(current)) \
++ break; \
++ else if (!(condition)) \
++ __retval = -ERESTARTSYS; \
++ } while (try_to_freeze()); \
++ __retval; \
++})
+ #else /* !CONFIG_PM_SLEEP */
+ static inline int frozen(struct task_struct *p) { return 0; }
+ static inline int freezing(struct task_struct *p) { return 0; }
+@@ -143,6 +162,10 @@ static inline void freezer_do_not_count(
+ static inline void freezer_count(void) {}
+ static inline int freezer_should_skip(struct task_struct *p) { return 0; }
+ static inline void set_freezable(void) {}
++
++#define wait_event_freezable(wq, condition) \
++ wait_event_interruptible(wq, condition)
++
+ #endif /* !CONFIG_PM_SLEEP */
+
+ #endif /* FREEZER_H_INCLUDED */
--- /dev/null
+From stable-bounces@linux.kernel.org Mon Dec 17 16:32:25 2007
+From: Christoph Lameter <clameter@sgi.com>
+Date: Mon, 17 Dec 2007 16:20:27 -0800
+Subject: quicklist: Set tlb->need_flush if pages are remaining in quicklist 0
+To: torvalds@linux-foundation.org
+Cc: stable@kernel.org, akpm@linux-foundation.org, dhaval@linux.vnet.ibm.com, clameter@sgi.com
+Message-ID: <200712180020.lBI0KSKF010011@imap1.linux-foundation.org>
+
+
+From: Christoph Lameter <clameter@sgi.com>
+
+patch 421d99193537a6522aac2148286f08792167d5fd in mainline.
+
+This ensures that the quicklists are drained. Otherwise draining may only
+occur when the processor reaches an idle state.
+
+Fixes fatal leakage of pgd_t's on 2.6.22 and later.
+
+Signed-off-by: Christoph Lameter <clameter@sgi.com>
+Reported-by: Dhaval Giani <dhaval@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+
+---
+ include/asm-generic/tlb.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/include/asm-generic/tlb.h
++++ b/include/asm-generic/tlb.h
+@@ -14,6 +14,7 @@
+ #define _ASM_GENERIC__TLB_H
+
+ #include <linux/swap.h>
++#include <linux/quicklist.h>
+ #include <asm/pgalloc.h>
+ #include <asm/tlbflush.h>
+
+@@ -85,6 +86,9 @@ tlb_flush_mmu(struct mmu_gather *tlb, un
+ static inline void
+ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+ {
++#ifdef CONFIG_QUICKLIST
++ tlb->need_flush += &__get_cpu_var(quicklist)[0].nr_pages != 0;
++#endif
+ tlb_flush_mmu(tlb, start, end);
+
+ /* keep the page table cache within bounds */
--- /dev/null
+From stable-bounces@linux.kernel.org Sat Dec 22 14:04:08 2007
+From: Christoph Lameter <clameter@sgi.com>
+Date: Sat, 22 Dec 2007 14:03:23 -0800
+Subject: quicklists: do not release off node pages early
+To: torvalds@linux-foundation.org
+Cc: stable@kernel.org, akpm@linux-foundation.org, dhaval@linux.vnet.ibm.com, clameter@sgi.com
+Message-ID: <200712222203.lBMM3Nsk021922@imap1.linux-foundation.org>
+
+
+From: Christoph Lameter <clameter@sgi.com>
+
+patch ed367fc3a7349b17354c7acef551533337764859 in mainline.
+
+quicklists must keep even off node pages on the quicklists until the TLB
+flush has been completed.
+
+Signed-off-by: Christoph Lameter <clameter@sgi.com>
+Cc: Dhaval Giani <dhaval@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/quicklist.h | 8 --------
+ 1 file changed, 8 deletions(-)
+
+--- a/include/linux/quicklist.h
++++ b/include/linux/quicklist.h
+@@ -56,14 +56,6 @@ static inline void __quicklist_free(int
+ struct page *page)
+ {
+ struct quicklist *q;
+- int nid = page_to_nid(page);
+-
+- if (unlikely(nid != numa_node_id())) {
+- if (dtor)
+- dtor(p);
+- __free_page(page);
+- return;
+- }
+
+ q = &get_cpu_var(quicklist)[nr];
+ *(void **)p = q->page;
--- /dev/null
+From stable-bounces@linux.kernel.org Tue Jan 15 10:52:21 2008
+From: Christoph Lameter <clameter@sgi.com>
+Date: Wed, 16 Jan 2008 00:21:19 +0530
+Subject: quicklists: Only consider memory that can be used with GFP_KERNEL
+To: gregkh@suse.de
+Cc: Andrew Morton <akpm@linux-foundation.org>, stable@kernel.org, clameter@sgi.com
+Message-ID: <20080115185119.GA6806@linux.vnet.ibm.com>
+Content-Disposition: inline
+
+From: Christoph Lameter <clameter@sgi.com>
+
+patch 96990a4ae979df9e235d01097d6175759331e88c in mainline.
+
+Quicklists calculates the size of the quicklists based on the number of
+free pages. This must be the number of free pages that can be allocated
+with GFP_KERNEL. node_page_state() includes the pages in ZONE_HIGHMEM and
+ZONE_MOVABLE which may lead the quicklists to become too large causing OOM.
+
+Signed-off-by: Christoph Lameter <clameter@sgi.com>
+Tested-by: Dhaval Giani <dhaval@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/quicklist.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/mm/quicklist.c
++++ b/mm/quicklist.c
+@@ -26,9 +26,17 @@ DEFINE_PER_CPU(struct quicklist, quickli
+ static unsigned long max_pages(unsigned long min_pages)
+ {
+ unsigned long node_free_pages, max;
++ struct zone *zones = NODE_DATA(numa_node_id())->node_zones;
++
++ node_free_pages =
++#ifdef CONFIG_ZONE_DMA
++ zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) +
++#endif
++#ifdef CONFIG_ZONE_DMA32
++ zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
++#endif
++ zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
+
+- node_free_pages = node_page_state(numa_node_id(),
+- NR_FREE_PAGES);
+ max = node_free_pages / FRACTION_OF_NODE_MEM;
+ return max(max, min_pages);
+ }
pm-acpi-and-apm-must-not-be-enabled-at-the-same-time.patch
crypto-padlock-fix-spurious-ecb-page-fault.patch
usb-update-sierra.c-with-latest-device-ids-that-are-in-2.6.24-rc7.patch
+clockevents-fix-reprogramming-decision-in-oneshot-broadcast.patch
+freezer-fix-apm-emulation-breakage.patch
+vfs-coredumping-fix.patch
+quicklist-set-tlb-need_flush-if-pages-are-remaining-in-quicklist-0.patch
+quicklists-do-not-release-off-node-pages-early.patch
+quicklists-only-consider-memory-that-can-be-used-with-gfp_kernel.patch
--- /dev/null
+From stable-bounces@linux.kernel.org Mon Dec 17 12:18:29 2007
+From: Ingo Molnar <mingo@elte.hu>
+Date: Mon, 17 Dec 2007 21:17:56 +0100
+Subject: vfs: coredumping fix (CVE-2007-6206)
+To: stable@kernel.org
+Cc: Ingo Molnar <mingo@elte.hu>
+Message-ID: <20071217201756.GA15344@stro.at>
+Content-Disposition: inline
+
+
+From: Ingo Molnar <mingo@elte.hu>
+
+vfs: coredumping fix
+
+patch c46f739dd39db3b07ab5deb4e3ec81e1c04a91af in mainline
+
+fix: http://bugzilla.kernel.org/show_bug.cgi?id=3043
+
+only allow coredumping to the same uid that the coredumping
+task runs under.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Acked-by: Alan Cox <alan@redhat.com>
+Acked-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Al Viro <viro@ftp.linux.org.uk>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: maximilian attems <max@stro.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/exec.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1786,6 +1786,12 @@ int do_coredump(long signr, int exit_cod
+ but keep the previous behaviour for now. */
+ if (!ispipe && !S_ISREG(inode->i_mode))
+ goto close_fail;
++ /*
++ * Dont allow local users get cute and trick others to coredump
++ * into their pre-created files:
++ */
++ if (inode->i_uid != current->fsuid)
++ goto close_fail;
+ if (!file->f_op)
+ goto close_fail;
+ if (!file->f_op->write)