--- /dev/null
+From 74a03b69d1b5ce00a568e142ca97e76b7f5239c6 Mon Sep 17 00:00:00 2001
+From: john stultz <johnstul@us.ibm.com>
+Date: Fri, 1 May 2009 13:10:25 -0700
+Subject: clockevents: prevent endless loop in tick_handle_periodic()
+
+From: john stultz <johnstul@us.ibm.com>
+
+commit 74a03b69d1b5ce00a568e142ca97e76b7f5239c6 upstream.
+
+tick_handle_periodic() can lock up hard when a one shot clock event
+device is used in combination with jiffies clocksource.
+
+Avoid an endless loop issue by requiring that a highres valid
+clocksource be installed before we call tick_periodic() in a loop when
+using ONESHOT mode. The result is we will only increment jiffies once
+per interrupt until a continuous hardware clocksource is available.
+
+Without this, we can run into a endless loop, where each cycle through
+the loop, jiffies is updated which increments time by tick_period or
+more (due to clock steering), which can cause the event programming to
+think the next event was before the newly incremented time and fail
+causing tick_periodic() to be called again and the whole process loops
+forever.
+
+[ Impact: prevent hard lock up ]
+
+Signed-off-by: John Stultz <johnstul@us.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/time/tick-common.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -93,7 +93,17 @@ void tick_handle_periodic(struct clock_e
+ for (;;) {
+ if (!clockevents_program_event(dev, next, ktime_get()))
+ return;
+- tick_periodic(cpu);
++ /*
++ * Have to be careful here. If we're in oneshot mode,
++ * before we call tick_periodic() in a loop, we need
++ * to be sure we're using a real hardware clocksource.
++ * Otherwise we could get trapped in an infinite
++ * loop, as the tick_periodic() increments jiffies,
++ * when then will increment time, posibly causing
++ * the loop to trigger again and again.
++ */
++ if (timekeeping_valid_for_hres())
++ tick_periodic(cpu);
+ next = ktime_add(next, tick_period);
+ }
+ }
--- /dev/null
+From a425a638c858fd10370b573bde81df3ba500e271 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mel@csn.ul.ie>
+Date: Tue, 5 May 2009 16:37:17 +0100
+Subject: Ignore madvise(MADV_WILLNEED) for hugetlbfs-backed regions
+
+From: Mel Gorman <mel@csn.ul.ie>
+
+commit a425a638c858fd10370b573bde81df3ba500e271 upstream.
+
+madvise(MADV_WILLNEED) forces page cache readahead on a range of memory
+backed by a file. The assumption is made that the page required is
+order-0 and "normal" page cache.
+
+On hugetlbfs, this assumption is not true and order-0 pages are
+allocated and inserted into the hugetlbfs page cache. This leaks
+hugetlbfs page reservations and can cause BUGs to trigger related to
+corrupted page tables.
+
+This patch causes MADV_WILLNEED to be ignored for hugetlbfs-backed
+regions.
+
+Signed-off-by: Mel Gorman <mel@csn.ul.ie>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/madvise.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -112,6 +112,14 @@ static long madvise_willneed(struct vm_a
+ if (!file)
+ return -EBADF;
+
++ /*
++ * Page cache readahead assumes page cache pages are order-0 which
++ * is not the case for hugetlbfs. Do not give a bad return value
++ * but ignore the advice.
++ */
++ if (vma->vm_flags & VM_HUGETLB)
++ return 0;
++
+ if (file->f_mapping->a_ops->get_xip_mem) {
+ /* no bad return value, but ignore advice */
+ return 0;
--- /dev/null
+From 93af7aca44f0e82e67bda10a0fb73d383edcc8bd Mon Sep 17 00:00:00 2001
+From: Lennert Buytenhek <buytenh@wantstofly.org>
+Date: Wed, 29 Apr 2009 11:58:18 +0000
+Subject: mv643xx_eth: 64bit mib counter read fix
+
+From: Lennert Buytenhek <buytenh@wantstofly.org>
+
+commit 93af7aca44f0e82e67bda10a0fb73d383edcc8bd upstream.
+
+On several mv643xx_eth hardware versions, the two 64bit mib counters
+for 'good octets received' and 'good octets sent' are actually 32bit
+counters, and reading from the upper half of the register has the same
+effect as reading from the lower half of the register: an atomic
+read-and-clear of the entire 32bit counter value. This can under heavy
+traffic occasionally lead to small numbers being added to the upper
+half of the 64bit mib counter even though no 32bit wrap has occured.
+
+Since we poll the mib counters at least every 30 seconds anyway, we
+might as well just skip the reads of the upper halves of the hardware
+counters without breaking the stats, which this patch does.
+
+Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
+Cc: stable@kernel.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/mv643xx_eth.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/net/mv643xx_eth.c
++++ b/drivers/net/mv643xx_eth.c
+@@ -1060,7 +1060,6 @@ static void mib_counters_update(struct m
+ struct mib_counters *p = &mp->mib_counters;
+
+ p->good_octets_received += mib_read(mp, 0x00);
+- p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
+ p->bad_octets_received += mib_read(mp, 0x08);
+ p->internal_mac_transmit_err += mib_read(mp, 0x0c);
+ p->good_frames_received += mib_read(mp, 0x10);
+@@ -1074,7 +1073,6 @@ static void mib_counters_update(struct m
+ p->frames_512_to_1023_octets += mib_read(mp, 0x30);
+ p->frames_1024_to_max_octets += mib_read(mp, 0x34);
+ p->good_octets_sent += mib_read(mp, 0x38);
+- p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
+ p->good_frames_sent += mib_read(mp, 0x40);
+ p->excessive_collision += mib_read(mp, 0x44);
+ p->multicast_frames_sent += mib_read(mp, 0x48);
--- /dev/null
+From f83ce3e6b02d5e48b3a43b001390e2b58820389d Mon Sep 17 00:00:00 2001
+From: Jake Edge <jake@lwn.net>
+Date: Mon, 4 May 2009 12:51:14 -0600
+Subject: proc: avoid information leaks to non-privileged processes
+
+From: Jake Edge <jake@lwn.net>
+
+commit f83ce3e6b02d5e48b3a43b001390e2b58820389d upstream.
+
+By using the same test as is used for /proc/pid/maps and /proc/pid/smaps,
+only allow processes that can ptrace() a given process to see information
+that might be used to bypass address space layout randomization (ASLR).
+These include eip, esp, wchan, and start_stack in /proc/pid/stat as well
+as the non-symbolic output from /proc/pid/wchan.
+
+ASLR can be bypassed by sampling eip as shown by the proof-of-concept
+code at http://code.google.com/p/fuzzyaslr/ As part of a presentation
+(http://www.cr0.org/paper/to-jt-linux-alsr-leak.pdf) esp and wchan were
+also noted as possibly usable information leaks as well. The
+start_stack address also leaks potentially useful information.
+
+Cc: Stable Team <stable@kernel.org>
+Signed-off-by: Jake Edge <jake@lwn.net>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: "Eric W. Biederman" <ebiederm@xmission.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/proc/array.c | 13 +++++++++----
+ fs/proc/base.c | 5 ++++-
+ 2 files changed, 13 insertions(+), 5 deletions(-)
+
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -80,6 +80,7 @@
+ #include <linux/delayacct.h>
+ #include <linux/seq_file.h>
+ #include <linux/pid_namespace.h>
++#include <linux/ptrace.h>
+ #include <linux/tracehook.h>
+
+ #include <asm/pgtable.h>
+@@ -347,6 +348,7 @@ static int do_task_stat(struct seq_file
+ char state;
+ pid_t ppid = 0, pgid = -1, sid = -1;
+ int num_threads = 0;
++ int permitted;
+ struct mm_struct *mm;
+ unsigned long long start_time;
+ unsigned long cmin_flt = 0, cmaj_flt = 0;
+@@ -359,11 +361,14 @@ static int do_task_stat(struct seq_file
+
+ state = *get_task_state(task);
+ vsize = eip = esp = 0;
++ permitted = ptrace_may_access(task, PTRACE_MODE_READ);
+ mm = get_task_mm(task);
+ if (mm) {
+ vsize = task_vsize(mm);
+- eip = KSTK_EIP(task);
+- esp = KSTK_ESP(task);
++ if (permitted) {
++ eip = KSTK_EIP(task);
++ esp = KSTK_ESP(task);
++ }
+ }
+
+ get_task_comm(tcomm, task);
+@@ -419,7 +424,7 @@ static int do_task_stat(struct seq_file
+ unlock_task_sighand(task, &flags);
+ }
+
+- if (!whole || num_threads < 2)
++ if (permitted && (!whole || num_threads < 2))
+ wchan = get_wchan(task);
+ if (!whole) {
+ min_flt = task->min_flt;
+@@ -471,7 +476,7 @@ static int do_task_stat(struct seq_file
+ rsslim,
+ mm ? mm->start_code : 0,
+ mm ? mm->end_code : 0,
+- mm ? mm->start_stack : 0,
++ (permitted && mm) ? mm->start_stack : 0,
+ esp,
+ eip,
+ /* The signal information here is obsolete.
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -327,7 +327,10 @@ static int proc_pid_wchan(struct task_st
+ wchan = get_wchan(task);
+
+ if (lookup_symbol_name(wchan, symname) < 0)
+- return sprintf(buffer, "%lu", wchan);
++ if (!ptrace_may_access(task, PTRACE_MODE_READ))
++ return 0;
++ else
++ return sprintf(buffer, "%lu", wchan);
+ else
+ return sprintf(buffer, "%s", symname);
+ }
--- /dev/null
+From e805e4d0b53506dff4255a2792483f094e7fcd2c Mon Sep 17 00:00:00 2001
+From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+Date: Wed, 22 Apr 2009 10:59:37 +0300
+Subject: rndis_wlan: fix initialization order for workqueue&workers
+
+From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+
+commit e805e4d0b53506dff4255a2792483f094e7fcd2c upstream.
+
+rndis_wext_link_change() might be called from rndis_command() at
+initialization stage and priv->workqueue/priv->work have not been
+initialized yet. This causes invalid opcode at rndis_wext_bind on
+some brands of bcm4320.
+
+Fix by initializing workqueue/workers in rndis_wext_bind() before
+rndis_command is used.
+
+This bug has existed since 2.6.25, reported at:
+ http://bugzilla.kernel.org/show_bug.cgi?id=12794
+
+Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/rndis_wlan.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/wireless/rndis_wlan.c
++++ b/drivers/net/wireless/rndis_wlan.c
+@@ -2556,6 +2556,11 @@ static int rndis_wext_bind(struct usbnet
+ mutex_init(&priv->command_lock);
+ spin_lock_init(&priv->stats_lock);
+
++ /* because rndis_command() sleeps we need to use workqueue */
++ priv->workqueue = create_singlethread_workqueue("rndis_wlan");
++ INIT_WORK(&priv->work, rndis_wext_worker);
++ INIT_DELAYED_WORK(&priv->stats_work, rndis_update_wireless_stats);
++
+ /* try bind rndis_host */
+ retval = generic_rndis_bind(usbdev, intf, FLAG_RNDIS_PHYM_WIRELESS);
+ if (retval < 0)
+@@ -2600,16 +2605,17 @@ static int rndis_wext_bind(struct usbnet
+ disassociate(usbdev, 1);
+ netif_carrier_off(usbdev->net);
+
+- /* because rndis_command() sleeps we need to use workqueue */
+- priv->workqueue = create_singlethread_workqueue("rndis_wlan");
+- INIT_DELAYED_WORK(&priv->stats_work, rndis_update_wireless_stats);
+ queue_delayed_work(priv->workqueue, &priv->stats_work,
+ round_jiffies_relative(STATS_UPDATE_JIFFIES));
+- INIT_WORK(&priv->work, rndis_wext_worker);
+
+ return 0;
+
+ fail:
++ cancel_delayed_work_sync(&priv->stats_work);
++ cancel_work_sync(&priv->work);
++ flush_workqueue(priv->workqueue);
++ destroy_workqueue(priv->workqueue);
++
+ kfree(priv);
+ return retval;
+ }
acpi-revert-conflicting-workaround-for-bios-w-mangled-prt-entries.patch
mips-cve-2009-0029-enable-syscall-wrappers.patch
usb-serial-fix-lifetime-and-locking-problems.patch
+clockevents-prevent-endless-loop-in-tick_handle_periodic.patch
+ignore-madvise-for-hugetlbfs-backed-regions.patch
+mv643xx_eth-64bit-mib-counter-read-fix.patch
+proc-avoid-information-leaks-to-non-privileged-processes.patch
+rndis_wlan-fix-initialization-order-for-workqueue-workers.patch