--- /dev/null
+From 81156928f8fe31621e467490b9d441c0285998c3 Mon Sep 17 00:00:00 2001
+From: Pavel Roskin <proski@gnu.org>
+Date: Sat, 17 Jan 2009 13:33:03 -0500
+Subject: dell_rbu: use scnprintf() instead of less secure sprintf()
+
+From: Pavel Roskin <proski@gnu.org>
+
+commit 81156928f8fe31621e467490b9d441c0285998c3 upstream.
+
+Reading 0 bytes from /sys/devices/platform/dell_rbu/image_type or
+/sys/devices/platform/dell_rbu/packet_size by an ordinary user causes an
+oops.
+
+Signed-off-by: Pavel Roskin <proski@gnu.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/firmware/dell_rbu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/firmware/dell_rbu.c
++++ b/drivers/firmware/dell_rbu.c
+@@ -576,7 +576,7 @@ static ssize_t read_rbu_image_type(struc
+ {
+ int size = 0;
+ if (!pos)
+- size = sprintf(buffer, "%s\n", image_type);
++ size = scnprintf(buffer, count, "%s\n", image_type);
+ return size;
+ }
+
+@@ -648,7 +648,7 @@ static ssize_t read_rbu_packet_size(stru
+ int size = 0;
+ if (!pos) {
+ spin_lock(&rbu_data.lock);
+- size = sprintf(buffer, "%lu\n", rbu_data.packetsize);
++ size = scnprintf(buffer, count, "%lu\n", rbu_data.packetsize);
+ spin_unlock(&rbu_data.lock);
+ }
+ return size;
--- /dev/null
+From 46a5f173fc88ffc22651162033696d8a9fbcdc5c Mon Sep 17 00:00:00 2001
+From: Alistair John Strachan <alistair@devzero.co.uk>
+Date: Thu, 15 Jan 2009 22:27:48 +0100
+Subject: hwmon: (abituguru3) Fix CONFIG_DMI=n fallback to probe
+
+From: Alistair John Strachan <alistair@devzero.co.uk>
+
+commit 46a5f173fc88ffc22651162033696d8a9fbcdc5c upstream.
+
+When CONFIG_DMI is not enabled, dmi detection should flag that no board
+could be detected (err=1) rather than another error condition (err<0).
+
+This fixes the fallback to manual probing for all motherboards, even
+those without DMI strings, when CONFIG_DMI=n.
+
+Signed-off-by: Alistair John Strachan <alistair@devzero.co.uk>
+Cc: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Jean Delvare <khali@linux-fr.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/hwmon/abituguru3.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/hwmon/abituguru3.c
++++ b/drivers/hwmon/abituguru3.c
+@@ -1153,7 +1153,7 @@ static int __init abituguru3_dmi_detect(
+
+ static inline int abituguru3_dmi_detect(void)
+ {
+- return -ENODEV;
++ return 1;
+ }
+
+ #endif /* CONFIG_DMI */
--- /dev/null
+From 31a12666d8f0c22235297e1c1575f82061480029 Mon Sep 17 00:00:00 2001
+From: Nick Piggin <npiggin@suse.de>
+Date: Tue, 6 Jan 2009 14:39:04 -0800
+Subject: mm: write_cache_pages cyclic fix
+
+From: Nick Piggin <npiggin@suse.de>
+
+commit 31a12666d8f0c22235297e1c1575f82061480029 upstream.
+
+In write_cache_pages, scanned == 1 is supposed to mean that cyclic
+writeback has circled through zero, thus we should not circle again.
+However it gets set to 1 after the first successful pagevec lookup. This
+leads to cases where not enough data gets written.
+
+Counterexample: file with first 10 pages dirty, writeback_index == 5,
+nr_to_write == 10. Then the 5 last pages will be found, and scanned will
+be set to 1, after writing those out, we will not cycle back to get the
+first 5.
+
+Rework this logic, now we'll always cycle unless we started off from index
+0. When cycling, only write out as far as 1 page before the start page
+from the first cycle (so we don't write parts of the file twice).
+
+Signed-off-by: Nick Piggin <npiggin@suse.de>
+Cc: Chris Mason <chris.mason@oracle.com>
+Cc: Dave Chinner <david@fromorbit.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/page-writeback.c | 25 ++++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -872,9 +872,10 @@ int write_cache_pages(struct address_spa
+ int done = 0;
+ struct pagevec pvec;
+ int nr_pages;
++ pgoff_t uninitialized_var(writeback_index);
+ pgoff_t index;
+ pgoff_t end; /* Inclusive */
+- int scanned = 0;
++ int cycled;
+ int range_whole = 0;
+
+ if (wbc->nonblocking && bdi_write_congested(bdi)) {
+@@ -884,14 +885,19 @@ int write_cache_pages(struct address_spa
+
+ pagevec_init(&pvec, 0);
+ if (wbc->range_cyclic) {
+- index = mapping->writeback_index; /* Start from prev offset */
++ writeback_index = mapping->writeback_index; /* prev offset */
++ index = writeback_index;
++ if (index == 0)
++ cycled = 1;
++ else
++ cycled = 0;
+ end = -1;
+ } else {
+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+ range_whole = 1;
+- scanned = 1;
++ cycled = 1; /* ignore range_cyclic tests */
+ }
+ retry:
+ while (!done && (index <= end) &&
+@@ -900,7 +906,6 @@ retry:
+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
+ unsigned i;
+
+- scanned = 1;
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+
+@@ -918,7 +923,11 @@ retry:
+ continue;
+ }
+
+- if (!wbc->range_cyclic && page->index > end) {
++ if (page->index > end) {
++ /*
++ * can't be range_cyclic (1st pass) because
++ * end == -1 in that case.
++ */
+ done = 1;
+ unlock_page(page);
+ continue;
+@@ -949,13 +958,15 @@ retry:
+ pagevec_release(&pvec);
+ cond_resched();
+ }
+- if (!scanned && !done) {
++ if (!cycled) {
+ /*
++ * range_cyclic:
+ * We hit the last page and there is more work to be done: wrap
+ * back to the start of the file
+ */
+- scanned = 1;
++ cycled = 1;
+ index = 0;
++ end = writeback_index - 1;
+ goto retry;
+ }
+ if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
--- /dev/null
+From 9ba0fdbfaed2e74005d87fab948c5522b86ff733 Mon Sep 17 00:00:00 2001
+From: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
+Date: Wed, 14 Jan 2009 09:09:34 +0000
+Subject: powerpc: is_hugepage_only_range() must account for both 4kB and 64kB slices
+
+From: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
+
+commit 9ba0fdbfaed2e74005d87fab948c5522b86ff733 upstream.
+
+powerpc: is_hugepage_only_range() must account for both 4kB and 64kB slices
+
+The subpage_prot syscall fails on second and subsequent calls for a given
+region, because is_hugepage_only_range() is mis-identifying the 4 kB
+slices when the process has a 64 kB page size.
+
+Signed-off-by: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/mm/slice.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/mm/slice.c
++++ b/arch/powerpc/mm/slice.c
+@@ -710,9 +710,18 @@ int is_hugepage_only_range(struct mm_str
+ unsigned long len)
+ {
+ struct slice_mask mask, available;
++ unsigned int psize = mm->context.user_psize;
+
+ mask = slice_range_to_mask(addr, len);
+- available = slice_mask_for_size(mm, mm->context.user_psize);
++ available = slice_mask_for_size(mm, psize);
++#ifdef CONFIG_PPC_64K_PAGES
++ /* We need to account for 4k slices too */
++ if (psize == MMU_PAGE_64K) {
++ struct slice_mask compat_mask;
++ compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
++ or_mask(available, compat_mask);
++ }
++#endif
+
+ #if 0 /* too verbose */
+ slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
clocksource-introduce-clocksource_forward_now.patch
hwmon-vid-add-support-for-amd-family-10h-cpus.patch
ath9k-quiet-harmless-forcexpaon-messages.patch
+dell_rbu-use-scnprintf-instead-of-less-secure-sprintf.patch
+hwmon-fix-config_dmi-n-fallback-to-probe.patch
+powerpc-is_hugepage_only_range-must-account-for-both-4kb-and-64kb-slices.patch
+mm-write_cache_pages-cyclic-fix.patch