]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
.33 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Tue, 7 Dec 2010 20:26:34 +0000 (12:26 -0800)
committerGreg Kroah-Hartman <gregkh@suse.de>
Tue, 7 Dec 2010 20:26:34 +0000 (12:26 -0800)
13 files changed:
queue-2.6.33/can-bcm-fix-minor-heap-overflow.patch [new file with mode: 0644]
queue-2.6.33/crypto-padlock-fix-aes-cbc-handling-on-odd-block-sized-input.patch [new file with mode: 0644]
queue-2.6.33/genirq-fix-incorrect-proc-spurious-output.patch [new file with mode: 0644]
queue-2.6.33/memory-corruption-in-x.25-facilities-parsing.patch [new file with mode: 0644]
queue-2.6.33/mm-vmscan-raise-the-bar-to-pageout_io_sync-stalls.patch [new file with mode: 0644]
queue-2.6.33/net-limit-socket-i-o-iovec-total-length-to-int_max.patch [new file with mode: 0644]
queue-2.6.33/net-truncate-recvfrom-and-sendto-length-to-int_max.patch [new file with mode: 0644]
queue-2.6.33/nohz-s390-fix-arch_needs_cpu-return-value-on-offline-cpus.patch [new file with mode: 0644]
queue-2.6.33/series
queue-2.6.33/v4l-dvb-ivtvfb-prevent-reading-uninitialized-stack-memory.patch [new file with mode: 0644]
queue-2.6.33/wmi-use-memcmp-instead-of-strncmp-to-compare-guids.patch [new file with mode: 0644]
queue-2.6.33/x25-patch-to-fix-bug-15678-x25-accesses-fields-beyond-end-of-packet.patch [new file with mode: 0644]
queue-2.6.33/x25-prevent-crashing-when-parsing-bad-x.25-facilities.patch [new file with mode: 0644]

diff --git a/queue-2.6.33/can-bcm-fix-minor-heap-overflow.patch b/queue-2.6.33/can-bcm-fix-minor-heap-overflow.patch
new file mode 100644 (file)
index 0000000..c7710bd
--- /dev/null
@@ -0,0 +1,35 @@
+From 0597d1b99fcfc2c0eada09a698f85ed413d4ba84 Mon Sep 17 00:00:00 2001
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+Date: Wed, 10 Nov 2010 12:10:30 +0000
+Subject: can-bcm: fix minor heap overflow
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+commit 0597d1b99fcfc2c0eada09a698f85ed413d4ba84 upstream.
+
+On 64-bit platforms the ASCII representation of a pointer may be up to 17
+bytes long. This patch increases the length of the buffer accordingly.
+
+http://marc.info/?l=linux-netdev&m=128872251418192&w=2
+
+Reported-by: Dan Rosenberg <drosenberg@vsecurity.com>
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+CC: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/can/bcm.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -117,7 +117,7 @@ struct bcm_sock {
+       struct list_head tx_ops;
+       unsigned long dropped_usr_msgs;
+       struct proc_dir_entry *bcm_proc_read;
+-      char procname [9]; /* pointer printed in ASCII with \0 */
++      char procname [20]; /* pointer printed in ASCII with \0 */
+ };
+ static inline struct bcm_sock *bcm_sk(const struct sock *sk)
diff --git a/queue-2.6.33/crypto-padlock-fix-aes-cbc-handling-on-odd-block-sized-input.patch b/queue-2.6.33/crypto-padlock-fix-aes-cbc-handling-on-odd-block-sized-input.patch
new file mode 100644 (file)
index 0000000..70d8b76
--- /dev/null
@@ -0,0 +1,37 @@
+From c054a076a1bd4731820a9c4d638b13d5c9bf5935 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Thu, 4 Nov 2010 14:38:39 -0400
+Subject: crypto: padlock - Fix AES-CBC handling on odd-block-sized input
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit c054a076a1bd4731820a9c4d638b13d5c9bf5935 upstream.
+
+On certain VIA chipsets AES-CBC requires the input/output to be
+a multiple of 64 bytes.  We had a workaround for this but it was
+buggy as it sent the whole input for processing when it is meant
+to only send the initial number of blocks which makes the rest
+a multiple of 64 bytes.
+
+As expected this causes memory corruption whenever the workaround
+kicks in.
+
+Reported-by: Phil Sutter <phil@nwl.cc>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/crypto/padlock-aes.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/padlock-aes.c
++++ b/drivers/crypto/padlock-aes.c
+@@ -285,7 +285,7 @@ static inline u8 *padlock_xcrypt_cbc(con
+       if (initial)
+               asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"       /* rep xcryptcbc */
+                             : "+S" (input), "+D" (output), "+a" (iv)
+-                            : "d" (control_word), "b" (key), "c" (count));
++                            : "d" (control_word), "b" (key), "c" (initial));
+       asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"       /* rep xcryptcbc */
+                     : "+S" (input), "+D" (output), "+a" (iv)
diff --git a/queue-2.6.33/genirq-fix-incorrect-proc-spurious-output.patch b/queue-2.6.33/genirq-fix-incorrect-proc-spurious-output.patch
new file mode 100644 (file)
index 0000000..b0321dc
--- /dev/null
@@ -0,0 +1,41 @@
+From 25c9170ed64a6551beefe9315882f754e14486f4 Mon Sep 17 00:00:00 2001
+From: Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com>
+Date: Tue, 30 Nov 2010 17:36:08 +0900
+Subject: genirq: Fix incorrect proc spurious output
+
+From: Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com>
+
+commit 25c9170ed64a6551beefe9315882f754e14486f4 upstream.
+
+Since commit a1afb637(switch /proc/irq/*/spurious to seq_file) all
+/proc/irq/XX/spurious files show the information of irq 0.
+
+Current irq_spurious_proc_open() passes on NULL as the 3rd argument,
+which is used as an IRQ number in irq_spurious_proc_show(), to the
+single_open(). Because of this, all the /proc/irq/XX/spurious file
+shows IRQ 0 information regardless of the IRQ number.
+
+To fix the problem, irq_spurious_proc_open() must pass on the
+appropreate data (IRQ number) to single_open().
+
+Signed-off-by: Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com>
+Reviewed-by: Yong Zhang <yong.zhang0@gmail.com>
+LKML-Reference: <4CF4B778.90604@jp.fujitsu.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/irq/proc.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/irq/proc.c
++++ b/kernel/irq/proc.c
+@@ -160,7 +160,7 @@ static int irq_spurious_proc_show(struct
+ static int irq_spurious_proc_open(struct inode *inode, struct file *file)
+ {
+-      return single_open(file, irq_spurious_proc_show, NULL);
++      return single_open(file, irq_spurious_proc_show, PDE(inode)->data);
+ }
+ static const struct file_operations irq_spurious_proc_fops = {
diff --git a/queue-2.6.33/memory-corruption-in-x.25-facilities-parsing.patch b/queue-2.6.33/memory-corruption-in-x.25-facilities-parsing.patch
new file mode 100644 (file)
index 0000000..0233197
--- /dev/null
@@ -0,0 +1,51 @@
+From a6331d6f9a4298173b413cf99a40cc86a9d92c37 Mon Sep 17 00:00:00 2001
+From: andrew hendry <andrew.hendry@gmail.com>
+Date: Wed, 3 Nov 2010 12:54:53 +0000
+Subject: memory corruption in X.25 facilities parsing
+
+From: andrew hendry <andrew.hendry@gmail.com>
+
+commit a6331d6f9a4298173b413cf99a40cc86a9d92c37 upstream.
+
+Signed-of-by: Andrew Hendry <andrew.hendry@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/x25/x25_facilities.c |    8 ++++----
+ net/x25/x25_in.c         |    2 ++
+ 2 files changed, 6 insertions(+), 4 deletions(-)
+
+--- a/net/x25/x25_facilities.c
++++ b/net/x25/x25_facilities.c
+@@ -134,15 +134,15 @@ int x25_parse_facilities(struct sk_buff
+               case X25_FAC_CLASS_D:
+                       switch (*p) {
+                       case X25_FAC_CALLING_AE:
+-                              if (p[1] > X25_MAX_DTE_FACIL_LEN)
+-                                      break;
++                              if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
++                                      return 0;
+                               dte_facs->calling_len = p[2];
+                               memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
+                               *vc_fac_mask |= X25_MASK_CALLING_AE;
+                               break;
+                       case X25_FAC_CALLED_AE:
+-                              if (p[1] > X25_MAX_DTE_FACIL_LEN)
+-                                      break;
++                              if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
++                                      return 0;
+                               dte_facs->called_len = p[2];
+                               memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
+                               *vc_fac_mask |= X25_MASK_CALLED_AE;
+--- a/net/x25/x25_in.c
++++ b/net/x25/x25_in.c
+@@ -118,6 +118,8 @@ static int x25_state1_machine(struct soc
+                                               &x25->vc_facil_mask);
+                       if (len > 0)
+                               skb_pull(skb, len);
++                      else
++                              return -1;
+                       /*
+                        *      Copy any Call User Data.
+                        */
diff --git a/queue-2.6.33/mm-vmscan-raise-the-bar-to-pageout_io_sync-stalls.patch b/queue-2.6.33/mm-vmscan-raise-the-bar-to-pageout_io_sync-stalls.patch
new file mode 100644 (file)
index 0000000..63bf6c1
--- /dev/null
@@ -0,0 +1,162 @@
+From e31f3698cd3499e676f6b0ea12e3528f569c4fa3 Mon Sep 17 00:00:00 2001
+From: Wu Fengguang <fengguang.wu@intel.com>
+Date: Mon, 9 Aug 2010 17:20:01 -0700
+Subject: vmscan: raise the bar to PAGEOUT_IO_SYNC stalls
+
+From: Wu Fengguang <fengguang.wu@intel.com>
+
+commit e31f3698cd3499e676f6b0ea12e3528f569c4fa3 upstream.
+
+Fix "system goes unresponsive under memory pressure and lots of
+dirty/writeback pages" bug.
+
+       http://lkml.org/lkml/2010/4/4/86
+
+In the above thread, Andreas Mohr described that
+
+       Invoking any command locked up for minutes (note that I'm
+       talking about attempted additional I/O to the _other_,
+       _unaffected_ main system HDD - such as loading some shell
+       binaries -, NOT the external SSD18M!!).
+
+This happens when the two conditions are both meet:
+- under memory pressure
+- writing heavily to a slow device
+
+OOM also happens in Andreas' system.  The OOM trace shows that 3 processes
+are stuck in wait_on_page_writeback() in the direct reclaim path.  One in
+do_fork() and the other two in unix_stream_sendmsg().  They are blocked on
+this condition:
+
+       (sc->order && priority < DEF_PRIORITY - 2)
+
+which was introduced in commit 78dc583d (vmscan: low order lumpy reclaim
+also should use PAGEOUT_IO_SYNC) one year ago.  That condition may be too
+permissive.  In Andreas' case, 512MB/1024 = 512KB.  If the direct reclaim
+for the order-1 fork() allocation runs into a range of 512KB
+hard-to-reclaim LRU pages, it will be stalled.
+
+It's a severe problem in three ways.
+
+Firstly, it can easily happen in daily desktop usage.  vmscan priority can
+easily go below (DEF_PRIORITY - 2) on _local_ memory pressure.  Even if
+the system has 50% globally reclaimable pages, it still has good
+opportunity to have 0.1% sized hard-to-reclaim ranges.  For example, a
+simple dd can easily create a big range (up to 20%) of dirty pages in the
+LRU lists.  And order-1 to order-3 allocations are more than common with
+SLUB.  Try "grep -v '1 :' /proc/slabinfo" to get the list of high order
+slab caches.  For example, the order-1 radix_tree_node slab cache may
+stall applications at swap-in time; the order-3 inode cache on most
+filesystems may stall applications when trying to read some file; the
+order-2 proc_inode_cache may stall applications when trying to open a
+/proc file.
+
+Secondly, once triggered, it will stall unrelated processes (not doing IO
+at all) in the system.  This "one slow USB device stalls the whole system"
+avalanching effect is very bad.
+
+Thirdly, once stalled, the stall time could be intolerable long for the
+users.  When there are 20MB queued writeback pages and USB 1.1 is writing
+them in 1MB/s, wait_on_page_writeback() will stuck for up to 20 seconds.
+Not to mention it may be called multiple times.
+
+So raise the bar to only enable PAGEOUT_IO_SYNC when priority goes below
+DEF_PRIORITY/3, or 6.25% LRU size.  As the default dirty throttle ratio is
+20%, it will hardly be triggered by pure dirty pages.  We'd better treat
+PAGEOUT_IO_SYNC as some last resort workaround -- its stall time is so
+uncomfortably long (easily goes beyond 1s).
+
+The bar is only raised for (order < PAGE_ALLOC_COSTLY_ORDER) allocations,
+which are easy to satisfy in 1TB memory boxes.  So, although 6.25% of
+memory could be an awful lot of pages to scan on a system with 1TB of
+memory, it won't really have to busy scan that much.
+
+Andreas tested an older version of this patch and reported that it mostly
+fixed his problem.  Mel Gorman helped improve it and KOSAKI Motohiro will
+fix it further in the next patch.
+
+Reported-by: Andreas Mohr <andi@lisas.de>
+Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
+Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Signed-off-by: Mel Gorman <mel@csn.ul.ie>
+Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
+Cc: Rik van Riel <riel@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+
+---
+ mm/vmscan.c |   53 +++++++++++++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 45 insertions(+), 8 deletions(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1082,6 +1082,48 @@ static int too_many_isolated(struct zone
+ }
+ /*
++ * Returns true if the caller should wait to clean dirty/writeback pages.
++ *
++ * If we are direct reclaiming for contiguous pages and we do not reclaim
++ * everything in the list, try again and wait for writeback IO to complete.
++ * This will stall high-order allocations noticeably. Only do that when really
++ * need to free the pages under high memory pressure.
++ */
++static inline bool should_reclaim_stall(unsigned long nr_taken,
++                                      unsigned long nr_freed,
++                                      int priority,
++                                      int lumpy_reclaim,
++                                      struct scan_control *sc)
++{
++      int lumpy_stall_priority;
++
++      /* kswapd should not stall on sync IO */
++      if (current_is_kswapd())
++              return false;
++
++      /* Only stall on lumpy reclaim */
++      if (!lumpy_reclaim)
++              return false;
++
++      /* If we have relaimed everything on the isolated list, no stall */
++      if (nr_freed == nr_taken)
++              return false;
++
++      /*
++       * For high-order allocations, there are two stall thresholds.
++       * High-cost allocations stall immediately where as lower
++       * order allocations such as stacks require the scanning
++       * priority to be much higher before stalling.
++       */
++      if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
++              lumpy_stall_priority = DEF_PRIORITY;
++      else
++              lumpy_stall_priority = DEF_PRIORITY / 3;
++
++      return priority <= lumpy_stall_priority;
++}
++
++/*
+  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
+  * of reclaimed pages
+  */
+@@ -1173,14 +1215,9 @@ static unsigned long shrink_inactive_lis
+               nr_scanned += nr_scan;
+               nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
+-              /*
+-               * If we are direct reclaiming for contiguous pages and we do
+-               * not reclaim everything in the list, try again and wait
+-               * for IO to complete. This will stall high-order allocations
+-               * but that should be acceptable to the caller
+-               */
+-              if (nr_freed < nr_taken && !current_is_kswapd() &&
+-                  lumpy_reclaim) {
++              /* Check if we should syncronously wait for writeback */
++              if (should_reclaim_stall(nr_taken, nr_freed, priority,
++                                      lumpy_reclaim, sc)) {
+                       congestion_wait(BLK_RW_ASYNC, HZ/10);
+                       /*
diff --git a/queue-2.6.33/net-limit-socket-i-o-iovec-total-length-to-int_max.patch b/queue-2.6.33/net-limit-socket-i-o-iovec-total-length-to-int_max.patch
new file mode 100644 (file)
index 0000000..f6f370c
--- /dev/null
@@ -0,0 +1,104 @@
+From 8acfe468b0384e834a303f08ebc4953d72fb690a Mon Sep 17 00:00:00 2001
+From: David S. Miller <davem@davemloft.net>
+Date: Thu, 28 Oct 2010 11:41:55 -0700
+Subject: net: Limit socket I/O iovec total length to INT_MAX.
+
+From: David S. Miller <davem@davemloft.net>
+
+commit 8acfe468b0384e834a303f08ebc4953d72fb690a upstream.
+
+This helps protect us from overflow issues down in the
+individual protocol sendmsg/recvmsg handlers.  Once
+we hit INT_MAX we truncate out the rest of the iovec
+by setting the iov_len members to zero.
+
+This works because:
+
+1) For SOCK_STREAM and SOCK_SEQPACKET sockets, partial
+   writes are allowed and the application will just continue
+   with another write to send the rest of the data.
+
+2) For datagram oriented sockets, where there must be a
+   one-to-one correspondance between write() calls and
+   packets on the wire, INT_MAX is going to be far larger
+   than the packet size limit the protocol is going to
+   check for and signal with -EMSGSIZE.
+
+Based upon a patch by Linus Torvalds.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/socket.h |    2 +-
+ net/compat.c           |   10 ++++++----
+ net/core/iovec.c       |   20 +++++++++-----------
+ 3 files changed, 16 insertions(+), 16 deletions(-)
+
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -313,7 +313,7 @@ extern int csum_partial_copy_fromiovecen
+                                         int offset, 
+                                         unsigned int len, __wsum *csump);
+-extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
++extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
+ extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
+ extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
+                            int offset, int len);
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -40,10 +40,12 @@ static inline int iov_from_user_compat_t
+               compat_size_t len;
+               if (get_user(len, &uiov32->iov_len) ||
+-                 get_user(buf, &uiov32->iov_base)) {
+-                      tot_len = -EFAULT;
+-                      break;
+-              }
++                  get_user(buf, &uiov32->iov_base))
++                      return -EFAULT;
++
++              if (len > INT_MAX - tot_len)
++                      len = INT_MAX - tot_len;
++
+               tot_len += len;
+               kiov->iov_base = compat_ptr(buf);
+               kiov->iov_len = (__kernel_size_t) len;
+--- a/net/core/iovec.c
++++ b/net/core/iovec.c
+@@ -36,10 +36,9 @@
+  *    in any case.
+  */
+-long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
++int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
+ {
+-      int size, ct;
+-      long err;
++      int size, ct, err;
+       if (m->msg_namelen) {
+               if (mode == VERIFY_READ) {
+@@ -61,14 +60,13 @@ long verify_iovec(struct msghdr *m, stru
+       err = 0;
+       for (ct = 0; ct < m->msg_iovlen; ct++) {
+-              err += iov[ct].iov_len;
+-              /*
+-               * Goal is not to verify user data, but to prevent returning
+-               * negative value, which is interpreted as errno.
+-               * Overflow is still possible, but it is harmless.
+-               */
+-              if (err < 0)
+-                      return -EMSGSIZE;
++              size_t len = iov[ct].iov_len;
++
++              if (len > INT_MAX - err) {
++                      len = INT_MAX - err;
++                      iov[ct].iov_len = len;
++              }
++              err += len;
+       }
+       return err;
diff --git a/queue-2.6.33/net-truncate-recvfrom-and-sendto-length-to-int_max.patch b/queue-2.6.33/net-truncate-recvfrom-and-sendto-length-to-int_max.patch
new file mode 100644 (file)
index 0000000..8e7bc1a
--- /dev/null
@@ -0,0 +1,37 @@
+From 253eacc070b114c2ec1f81b067d2fed7305467b0 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sat, 30 Oct 2010 16:43:10 -0700
+Subject: net: Truncate recvfrom and sendto length to INT_MAX.
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 253eacc070b114c2ec1f81b067d2fed7305467b0 upstream.
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/socket.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1671,6 +1671,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __
+       struct iovec iov;
+       int fput_needed;
++      if (len > INT_MAX)
++              len = INT_MAX;
+       sock = sockfd_lookup_light(fd, &err, &fput_needed);
+       if (!sock)
+               goto out;
+@@ -1728,6 +1730,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void
+       int err, err2;
+       int fput_needed;
++      if (size > INT_MAX)
++              size = INT_MAX;
+       sock = sockfd_lookup_light(fd, &err, &fput_needed);
+       if (!sock)
+               goto out;
diff --git a/queue-2.6.33/nohz-s390-fix-arch_needs_cpu-return-value-on-offline-cpus.patch b/queue-2.6.33/nohz-s390-fix-arch_needs_cpu-return-value-on-offline-cpus.patch
new file mode 100644 (file)
index 0000000..b0f5245
--- /dev/null
@@ -0,0 +1,95 @@
+From 398812159e328478ae49b4bd01f0d71efea96c39 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Wed, 1 Dec 2010 10:08:01 +0100
+Subject: [S390] nohz/s390: fix arch_needs_cpu() return value on offline cpus
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit 398812159e328478ae49b4bd01f0d71efea96c39 upstream.
+
+This fixes the same problem as described in the patch "nohz: fix
+printk_needs_cpu() return value on offline cpus" for the arch_needs_cpu()
+primitive:
+
+arch_needs_cpu() may return 1 if called on offline cpus. When a cpu gets
+offlined it schedules the idle process which, before killing its own cpu,
+will call tick_nohz_stop_sched_tick().
+That function in turn will call arch_needs_cpu() in order to check if the
+local tick can be disabled. On offline cpus this function should naturally
+return 0 since regardless if the tick gets disabled or not the cpu will be
+dead short after. That is besides the fact that __cpu_disable() should already
+have made sure that no interrupts on the offlined cpu will be delivered anyway.
+
+In this case it prevents tick_nohz_stop_sched_tick() to call
+select_nohz_load_balancer(). No idea if that really is a problem. However what
+made me debug this is that on 2.6.32 the function get_nohz_load_balancer() is
+used within __mod_timer() to select a cpu on which a timer gets enqueued.
+If arch_needs_cpu() returns 1 then the nohz_load_balancer cpu doesn't get
+updated when a cpu gets offlined. It may contain the cpu number of an offline
+cpu. In turn timers get enqueued on an offline cpu and not very surprisingly
+they never expire and cause system hangs.
+
+This has been observed 2.6.32 kernels. On current kernels __mod_timer() uses
+get_nohz_timer_target() which doesn't have that problem. However there might
+be other problems because of the too early exit tick_nohz_stop_sched_tick()
+in case a cpu goes offline.
+
+This specific bug was indrocuded with 3c5d92a0 "nohz: Introduce
+arch_needs_cpu".
+
+In this case a cpu hotplug notifier is used to fix the issue in order to keep
+the normal/fast path small. All we need to do is to clear the condition that
+makes arch_needs_cpu() return 1 since it is just a performance improvement
+which is supposed to keep the local tick running for a short period if a cpu
+goes idle. Nothing special needs to be done except for clearing the condition.
+
+Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/s390/kernel/vtime.c |   19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+--- a/arch/s390/kernel/vtime.c
++++ b/arch/s390/kernel/vtime.c
+@@ -19,6 +19,7 @@
+ #include <linux/kernel_stat.h>
+ #include <linux/rcupdate.h>
+ #include <linux/posix-timers.h>
++#include <linux/cpu.h>
+ #include <asm/s390_ext.h>
+ #include <asm/timer.h>
+@@ -562,6 +563,23 @@ void init_cpu_vtimer(void)
+       __ctl_set_bit(0,10);
+ }
++static int __cpuinit s390_nohz_notify(struct notifier_block *self,
++                                    unsigned long action, void *hcpu)
++{
++      struct s390_idle_data *idle;
++      long cpu = (long) hcpu;
++
++      idle = &per_cpu(s390_idle, cpu);
++      switch (action) {
++      case CPU_DYING:
++      case CPU_DYING_FROZEN:
++              idle->nohz_delay = 0;
++      default:
++              break;
++      }
++      return NOTIFY_OK;
++}
++
+ void __init vtime_init(void)
+ {
+       /* request the cpu timer external interrupt */
+@@ -570,5 +588,6 @@ void __init vtime_init(void)
+       /* Enable cpu timer interrupts on the boot cpu. */
+       init_cpu_vtimer();
++      cpu_notifier(s390_nohz_notify, 0);
+ }
index 820922fa257d3b84e765d5e12151bfb95e3dff16..86130d900393da5ca81c62362b4572a7e62e34bc 100644 (file)
@@ -191,3 +191,15 @@ ext4-prevent-creation-of-files-larger-than-rlimit_fsize-using-fallocate.patch
 mm-fix-corruption-of-hibernation-caused-by-reusing-swap-during-image-saving.patch
 btrfs-kfree-correct-pointer-during-mount-option-parsing.patch
 prioritize-synchronous-signals-over-normal-signals.patch
+mm-vmscan-raise-the-bar-to-pageout_io_sync-stalls.patch
+wmi-use-memcmp-instead-of-strncmp-to-compare-guids.patch
+nohz-s390-fix-arch_needs_cpu-return-value-on-offline-cpus.patch
+genirq-fix-incorrect-proc-spurious-output.patch
+net-truncate-recvfrom-and-sendto-length-to-int_max.patch
+net-limit-socket-i-o-iovec-total-length-to-int_max.patch
+x25-patch-to-fix-bug-15678-x25-accesses-fields-beyond-end-of-packet.patch
+memory-corruption-in-x.25-facilities-parsing.patch
+can-bcm-fix-minor-heap-overflow.patch
+v4l-dvb-ivtvfb-prevent-reading-uninitialized-stack-memory.patch
+x25-prevent-crashing-when-parsing-bad-x.25-facilities.patch
+crypto-padlock-fix-aes-cbc-handling-on-odd-block-sized-input.patch
diff --git a/queue-2.6.33/v4l-dvb-ivtvfb-prevent-reading-uninitialized-stack-memory.patch b/queue-2.6.33/v4l-dvb-ivtvfb-prevent-reading-uninitialized-stack-memory.patch
new file mode 100644 (file)
index 0000000..9d76120
--- /dev/null
@@ -0,0 +1,34 @@
+From 405707985594169cfd0b1d97d29fcb4b4c6f2ac9 Mon Sep 17 00:00:00 2001
+From: Dan Rosenberg <drosenberg@vsecurity.com>
+Date: Wed, 15 Sep 2010 18:44:22 -0300
+Subject: V4L/DVB: ivtvfb: prevent reading uninitialized stack memory
+
+From: Dan Rosenberg <drosenberg@vsecurity.com>
+
+commit 405707985594169cfd0b1d97d29fcb4b4c6f2ac9 upstream.
+
+The FBIOGET_VBLANK device ioctl allows unprivileged users to read 16
+bytes of uninitialized stack memory, because the "reserved" member of
+the fb_vblank struct declared on the stack is not altered or zeroed
+before being copied back to the user.  This patch takes care of it.
+
+Signed-off-by: Dan Rosenberg <dan.j.rosenberg@gmail.com>
+Signed-off-by: Andy Walls <awalls@md.metrocast.net>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/media/video/ivtv/ivtvfb.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/media/video/ivtv/ivtvfb.c
++++ b/drivers/media/video/ivtv/ivtvfb.c
+@@ -457,6 +457,8 @@ static int ivtvfb_ioctl(struct fb_info *
+                       struct fb_vblank vblank;
+                       u32 trace;
++                      memset(&vblank, 0, sizeof(struct fb_vblank));
++
+                       vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT |
+                                       FB_VBLANK_HAVE_VSYNC;
+                       trace = read_reg(0x028c0) >> 16;
diff --git a/queue-2.6.33/wmi-use-memcmp-instead-of-strncmp-to-compare-guids.patch b/queue-2.6.33/wmi-use-memcmp-instead-of-strncmp-to-compare-guids.patch
new file mode 100644 (file)
index 0000000..36d04ae
--- /dev/null
@@ -0,0 +1,37 @@
+From 8b14d7b22c61f17ccb869e0047d9df6dd9f50a9f Mon Sep 17 00:00:00 2001
+From: Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
+Date: Sun, 28 Nov 2010 19:46:50 -0200
+Subject: wmi: use memcmp instead of strncmp to compare GUIDs
+
+From: Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
+
+commit 8b14d7b22c61f17ccb869e0047d9df6dd9f50a9f upstream.
+
+While looking for the duplicates in /sys/class/wmi/, I couldn't find
+them. The code that looks for duplicates uses strncmp in a binary GUID,
+which may contain zero bytes. The right function is memcmp, which is
+also used in another section of wmi code.
+
+It was finding 49142400-C6A3-40FA-BADB-8A2652834100 as a duplicate of
+39142400-C6A3-40FA-BADB-8A2652834100. Since the first byte is the fourth
+printed, they were found as equal by strncmp.
+
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
+Signed-off-by: Matthew Garrett <mjg@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/platform/x86/wmi.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -724,7 +724,7 @@ static bool guid_already_parsed(const ch
+               wblock = list_entry(p, struct wmi_block, list);
+               gblock = &wblock->gblock;
+-              if (strncmp(gblock->guid, guid_string, 16) == 0)
++              if (memcmp(gblock->guid, guid_string, 16) == 0)
+                       return true;
+       }
+       return false;
diff --git a/queue-2.6.33/x25-patch-to-fix-bug-15678-x25-accesses-fields-beyond-end-of-packet.patch b/queue-2.6.33/x25-patch-to-fix-bug-15678-x25-accesses-fields-beyond-end-of-packet.patch
new file mode 100644 (file)
index 0000000..f1e931d
--- /dev/null
@@ -0,0 +1,181 @@
+From f5eb917b861828da18dc28854308068c66d1449a Mon Sep 17 00:00:00 2001
+From: John Hughes <john@calva.com>
+Date: Wed, 7 Apr 2010 21:29:25 -0700
+Subject: x25: Patch to fix bug 15678 - x25 accesses fields beyond end of packet.
+
+From: John Hughes <john@calva.com>
+
+commit f5eb917b861828da18dc28854308068c66d1449a upstream.
+
+Here is a patch to stop X.25 examining fields beyond the end of the packet.
+
+For example, when a simple CALL ACCEPTED was received:
+
+       10 10 0f
+
+x25_parse_facilities was attempting to decode the FACILITIES field, but this
+packet contains no facilities field.
+
+Signed-off-by: John Hughes <john@calva.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/net/x25.h        |    4 ++++
+ net/x25/af_x25.c         |   47 ++++++++++++++++++++++++++++++++++++++++++++++-
+ net/x25/x25_facilities.c |   12 +++++++++++-
+ net/x25/x25_in.c         |   15 +++++++++++----
+ 4 files changed, 72 insertions(+), 6 deletions(-)
+
+--- a/include/net/x25.h
++++ b/include/net/x25.h
+@@ -182,6 +182,10 @@ extern int  sysctl_x25_clear_request_tim
+ extern int  sysctl_x25_ack_holdback_timeout;
+ extern int  sysctl_x25_forward;
++extern int x25_parse_address_block(struct sk_buff *skb,
++              struct x25_address *called_addr,
++              struct x25_address *calling_addr);
++
+ extern int  x25_addr_ntoa(unsigned char *, struct x25_address *,
+                         struct x25_address *);
+ extern int  x25_addr_aton(unsigned char *, struct x25_address *,
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -81,6 +81,41 @@ struct compat_x25_subscrip_struct {
+ };
+ #endif
++
++int x25_parse_address_block(struct sk_buff *skb,
++              struct x25_address *called_addr,
++              struct x25_address *calling_addr)
++{
++      unsigned char len;
++      int needed;
++      int rc;
++
++      if (skb->len < 1) {
++              /* packet has no address block */
++              rc = 0;
++              goto empty;
++      }
++
++      len = *skb->data;
++      needed = 1 + (len >> 4) + (len & 0x0f);
++
++      if (skb->len < needed) {
++              /* packet is too short to hold the addresses it claims
++                 to hold */
++              rc = -1;
++              goto empty;
++      }
++
++      return x25_addr_ntoa(skb->data, called_addr, calling_addr);
++
++empty:
++      *called_addr->x25_addr = 0;
++      *calling_addr->x25_addr = 0;
++
++      return rc;
++}
++
++
+ int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr,
+                 struct x25_address *calling_addr)
+ {
+@@ -907,16 +942,26 @@ int x25_rx_call_request(struct sk_buff *
+       /*
+        *      Extract the X.25 addresses and convert them to ASCII strings,
+        *      and remove them.
++       *
++       *      Address block is mandatory in call request packets
+        */
+-      addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr);
++      addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr);
++      if (addr_len <= 0)
++              goto out_clear_request;
+       skb_pull(skb, addr_len);
+       /*
+        *      Get the length of the facilities, skip past them for the moment
+        *      get the call user data because this is needed to determine
+        *      the correct listener
++       *
++       *      Facilities length is mandatory in call request packets
+        */
++      if (skb->len < 1)
++              goto out_clear_request;
+       len = skb->data[0] + 1;
++      if (skb->len < len)
++              goto out_clear_request;
+       skb_pull(skb,len);
+       /*
+--- a/net/x25/x25_facilities.c
++++ b/net/x25/x25_facilities.c
+@@ -35,7 +35,7 @@ int x25_parse_facilities(struct sk_buff
+               struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
+ {
+       unsigned char *p = skb->data;
+-      unsigned int len = *p++;
++      unsigned int len;
+       *vc_fac_mask = 0;
+@@ -50,6 +50,14 @@ int x25_parse_facilities(struct sk_buff
+       memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
+       memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
++      if (skb->len < 1)
++              return 0;
++
++      len = *p++;
++
++      if (len >= skb->len)
++              return -1;
++
+       while (len > 0) {
+               switch (*p & X25_FAC_CLASS_MASK) {
+               case X25_FAC_CLASS_A:
+@@ -247,6 +255,8 @@ int x25_negotiate_facilities(struct sk_b
+       memcpy(new, ours, sizeof(*new));
+       len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
++      if (len < 0)
++              return len;
+       /*
+        *      They want reverse charging, we won't accept it.
+--- a/net/x25/x25_in.c
++++ b/net/x25/x25_in.c
+@@ -89,6 +89,7 @@ static int x25_queue_rx_frame(struct soc
+ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
+ {
+       struct x25_address source_addr, dest_addr;
++      int len;
+       switch (frametype) {
+               case X25_CALL_ACCEPTED: {
+@@ -106,11 +107,17 @@ static int x25_state1_machine(struct soc
+                        *      Parse the data in the frame.
+                        */
+                       skb_pull(skb, X25_STD_MIN_LEN);
+-                      skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr));
+-                      skb_pull(skb,
+-                               x25_parse_facilities(skb, &x25->facilities,
++
++                      len = x25_parse_address_block(skb, &source_addr,
++                                              &dest_addr);
++                      if (len > 0)
++                              skb_pull(skb, len);
++
++                      len = x25_parse_facilities(skb, &x25->facilities,
+                                               &x25->dte_facilities,
+-                                              &x25->vc_facil_mask));
++                                              &x25->vc_facil_mask);
++                      if (len > 0)
++                              skb_pull(skb, len);
+                       /*
+                        *      Copy any Call User Data.
+                        */
diff --git a/queue-2.6.33/x25-prevent-crashing-when-parsing-bad-x.25-facilities.patch b/queue-2.6.33/x25-prevent-crashing-when-parsing-bad-x.25-facilities.patch
new file mode 100644 (file)
index 0000000..c6a1aa5
--- /dev/null
@@ -0,0 +1,75 @@
+From 5ef41308f94dcbb3b7afc56cdef1c2ba53fa5d2f Mon Sep 17 00:00:00 2001
+From: Dan Rosenberg <drosenberg@vsecurity.com>
+Date: Fri, 12 Nov 2010 12:44:42 -0800
+Subject: x25: Prevent crashing when parsing bad X.25 facilities
+
+From: Dan Rosenberg <drosenberg@vsecurity.com>
+
+commit 5ef41308f94dcbb3b7afc56cdef1c2ba53fa5d2f upstream.
+
+Now with improved comma support.
+
+On parsing malformed X.25 facilities, decrementing the remaining length
+may cause it to underflow.  Since the length is an unsigned integer,
+this will result in the loop continuing until the kernel crashes.
+
+This patch adds checks to ensure decrementing the remaining length does
+not cause it to wrap around.
+
+Signed-off-by: Dan Rosenberg <drosenberg@vsecurity.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/x25/x25_facilities.c |   12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/net/x25/x25_facilities.c
++++ b/net/x25/x25_facilities.c
+@@ -61,6 +61,8 @@ int x25_parse_facilities(struct sk_buff
+       while (len > 0) {
+               switch (*p & X25_FAC_CLASS_MASK) {
+               case X25_FAC_CLASS_A:
++                      if (len < 2)
++                              return 0;
+                       switch (*p) {
+                       case X25_FAC_REVERSE:
+                               if((p[1] & 0x81) == 0x81) {
+@@ -104,6 +106,8 @@ int x25_parse_facilities(struct sk_buff
+                       len -= 2;
+                       break;
+               case X25_FAC_CLASS_B:
++                      if (len < 3)
++                              return 0;
+                       switch (*p) {
+                       case X25_FAC_PACKET_SIZE:
+                               facilities->pacsize_in  = p[1];
+@@ -125,6 +129,8 @@ int x25_parse_facilities(struct sk_buff
+                       len -= 3;
+                       break;
+               case X25_FAC_CLASS_C:
++                      if (len < 4)
++                              return 0;
+                       printk(KERN_DEBUG "X.25: unknown facility %02X, "
+                              "values %02X, %02X, %02X\n",
+                              p[0], p[1], p[2], p[3]);
+@@ -132,6 +138,8 @@ int x25_parse_facilities(struct sk_buff
+                       len -= 4;
+                       break;
+               case X25_FAC_CLASS_D:
++                      if (len < p[1] + 2)
++                              return 0;
+                       switch (*p) {
+                       case X25_FAC_CALLING_AE:
+                               if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
+@@ -149,9 +157,7 @@ int x25_parse_facilities(struct sk_buff
+                               break;
+                       default:
+                               printk(KERN_DEBUG "X.25: unknown facility %02X,"
+-                                      "length %d, values %02X, %02X, "
+-                                      "%02X, %02X\n",
+-                                      p[0], p[1], p[2], p[3], p[4], p[5]);
++                                      "length %d\n", p[0], p[1]);
+                               break;
+                       }
+                       len -= p[1] + 2;