--- /dev/null
+From b9f515e3e3861abbaa093359f7c6f31283695228 Mon Sep 17 00:00:00 2001
+From: Marcelo Roberto Jimenez <mroberto@cpti.cetuc.puc-rio.br>
+Date: Mon, 18 Oct 2010 22:38:08 +0100
+Subject: ARM: 6456/1: Fix for building DEBUG with sa11xx_base.c as a module.
+
+From: Marcelo Roberto Jimenez <mroberto@cpti.cetuc.puc-rio.br>
+
+commit b9f515e3e3861abbaa093359f7c6f31283695228 upstream.
+
+This patch fixes a compilation issue when compiling PCMCIA SA1100
+support as a module with PCMCIA_DEBUG enabled. The symbol
+soc_pcmcia_debug was not beeing exported.
+ARM: pcmcia: Fix for building DEBUG with sa11xx_base.c as a module.
+
+This patch fixes a compilation issue when compiling PCMCIA SA1100
+support as a module with PCMCIA_DEBUG enabled. The symbol
+soc_pcmcia_debug was not beeing exported.
+
+Signed-off-by: Marcelo Roberto Jimenez <mroberto@cpti.cetuc.puc-rio.br>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/pcmcia/soc_common.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/pcmcia/soc_common.c
++++ b/drivers/pcmcia/soc_common.c
+@@ -65,6 +65,7 @@ void soc_pcmcia_debug(struct soc_pcmcia_
+ va_end(args);
+ }
+ }
++EXPORT_SYMBOL(soc_pcmcia_debug);
+
+ #endif
+
--- /dev/null
+From 44266416f786514ec43a0d15ad951c34566b99c9 Mon Sep 17 00:00:00 2001
+From: Anton Vorontsov <cbouatmailru@gmail.com>
+Date: Mon, 29 Nov 2010 18:46:22 +0300
+Subject: ARM: cns3xxx: Fix build with CONFIG_PCI=y
+
+From: Anton Vorontsov <cbouatmailru@gmail.com>
+
+commit 44266416f786514ec43a0d15ad951c34566b99c9 upstream.
+
+commit 6338a6aa7c082f11d55712251e14178c68bf5869 ("ARM: 6269/1: Add 'code'
+parameter for hook_fault_code()") breaks CNS3xxx build:
+
+ CC arch/arm/mach-cns3xxx/pcie.o
+pcie.c: In function 'cns3xxx_pcie_init':
+pcie.c:373: warning: passing argument 4 of 'hook_fault_code' makes integer from pointer without a cast
+pcie.c:373: error: too few arguments to function 'hook_fault_code'
+
+This commit fixes the small issue.
+
+Signed-off-by: Anton Vorontsov <cbouatmailru@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/mach-cns3xxx/pcie.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/mach-cns3xxx/pcie.c
++++ b/arch/arm/mach-cns3xxx/pcie.c
+@@ -369,7 +369,7 @@ static int __init cns3xxx_pcie_init(void
+ {
+ int i;
+
+- hook_fault_code(16 + 6, cns3xxx_pcie_abort_handler, SIGBUS,
++ hook_fault_code(16 + 6, cns3xxx_pcie_abort_handler, SIGBUS, 0,
+ "imprecise external abort");
+
+ for (i = 0; i < ARRAY_SIZE(cns3xxx_pcie); i++) {
--- /dev/null
+From 0597d1b99fcfc2c0eada09a698f85ed413d4ba84 Mon Sep 17 00:00:00 2001
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+Date: Wed, 10 Nov 2010 12:10:30 +0000
+Subject: can-bcm: fix minor heap overflow
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+commit 0597d1b99fcfc2c0eada09a698f85ed413d4ba84 upstream.
+
+On 64-bit platforms the ASCII representation of a pointer may be up to 17
+bytes long. This patch increases the length of the buffer accordingly.
+
+http://marc.info/?l=linux-netdev&m=128872251418192&w=2
+
+Reported-by: Dan Rosenberg <drosenberg@vsecurity.com>
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+CC: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/can/bcm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -125,7 +125,7 @@ struct bcm_sock {
+ struct list_head tx_ops;
+ unsigned long dropped_usr_msgs;
+ struct proc_dir_entry *bcm_proc_read;
+- char procname [9]; /* pointer printed in ASCII with \0 */
++ char procname [20]; /* pointer printed in ASCII with \0 */
+ };
+
+ static inline struct bcm_sock *bcm_sk(const struct sock *sk)
--- /dev/null
+From 57fe93b374a6b8711995c2d466c502af9f3a08bb Mon Sep 17 00:00:00 2001
+From: David S. Miller <davem@davemloft.net>
+Date: Wed, 10 Nov 2010 10:38:24 -0800
+Subject: filter: make sure filters dont read uninitialized memory
+
+From: David S. Miller <davem@davemloft.net>
+
+commit 57fe93b374a6b8711995c2d466c502af9f3a08bb upstream.
+
+There is a possibility malicious users can get limited information about
+uninitialized stack mem array. Even if sk_run_filter() result is bound
+to packet length (0 .. 65535), we could imagine this can be used by
+hostile user.
+
+Initializing mem[] array, like Dan Rosenberg suggested in his patch is
+expensive since most filters dont even use this array.
+
+Its hard to make the filter validation in sk_chk_filter(), because of
+the jumps. This might be done later.
+
+In this patch, I use a bitmap (a single long var) so that only filters
+using mem[] loads/stores pay the price of added security checks.
+
+For other filters, additional cost is a single instruction.
+
+[ Since we access fentry->k a lot now, cache it in a local variable
+ and mark filter entry pointer as const. -DaveM ]
+
+Reported-by: Dan Rosenberg <drosenberg@vsecurity.com>
+Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/core/filter.c | 64 +++++++++++++++++++++++++++++-------------------------
+ 1 file changed, 35 insertions(+), 29 deletions(-)
+
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -112,39 +112,41 @@ EXPORT_SYMBOL(sk_filter);
+ */
+ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
+ {
+- struct sock_filter *fentry; /* We walk down these */
+ void *ptr;
+ u32 A = 0; /* Accumulator */
+ u32 X = 0; /* Index Register */
+ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
++ unsigned long memvalid = 0;
+ u32 tmp;
+ int k;
+ int pc;
+
++ BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
+ /*
+ * Process array of filter instructions.
+ */
+ for (pc = 0; pc < flen; pc++) {
+- fentry = &filter[pc];
++ const struct sock_filter *fentry = &filter[pc];
++ u32 f_k = fentry->k;
+
+ switch (fentry->code) {
+ case BPF_S_ALU_ADD_X:
+ A += X;
+ continue;
+ case BPF_S_ALU_ADD_K:
+- A += fentry->k;
++ A += f_k;
+ continue;
+ case BPF_S_ALU_SUB_X:
+ A -= X;
+ continue;
+ case BPF_S_ALU_SUB_K:
+- A -= fentry->k;
++ A -= f_k;
+ continue;
+ case BPF_S_ALU_MUL_X:
+ A *= X;
+ continue;
+ case BPF_S_ALU_MUL_K:
+- A *= fentry->k;
++ A *= f_k;
+ continue;
+ case BPF_S_ALU_DIV_X:
+ if (X == 0)
+@@ -152,49 +154,49 @@ unsigned int sk_run_filter(struct sk_buf
+ A /= X;
+ continue;
+ case BPF_S_ALU_DIV_K:
+- A /= fentry->k;
++ A /= f_k;
+ continue;
+ case BPF_S_ALU_AND_X:
+ A &= X;
+ continue;
+ case BPF_S_ALU_AND_K:
+- A &= fentry->k;
++ A &= f_k;
+ continue;
+ case BPF_S_ALU_OR_X:
+ A |= X;
+ continue;
+ case BPF_S_ALU_OR_K:
+- A |= fentry->k;
++ A |= f_k;
+ continue;
+ case BPF_S_ALU_LSH_X:
+ A <<= X;
+ continue;
+ case BPF_S_ALU_LSH_K:
+- A <<= fentry->k;
++ A <<= f_k;
+ continue;
+ case BPF_S_ALU_RSH_X:
+ A >>= X;
+ continue;
+ case BPF_S_ALU_RSH_K:
+- A >>= fentry->k;
++ A >>= f_k;
+ continue;
+ case BPF_S_ALU_NEG:
+ A = -A;
+ continue;
+ case BPF_S_JMP_JA:
+- pc += fentry->k;
++ pc += f_k;
+ continue;
+ case BPF_S_JMP_JGT_K:
+- pc += (A > fentry->k) ? fentry->jt : fentry->jf;
++ pc += (A > f_k) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_S_JMP_JGE_K:
+- pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
++ pc += (A >= f_k) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_S_JMP_JEQ_K:
+- pc += (A == fentry->k) ? fentry->jt : fentry->jf;
++ pc += (A == f_k) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_S_JMP_JSET_K:
+- pc += (A & fentry->k) ? fentry->jt : fentry->jf;
++ pc += (A & f_k) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_S_JMP_JGT_X:
+ pc += (A > X) ? fentry->jt : fentry->jf;
+@@ -209,7 +211,7 @@ unsigned int sk_run_filter(struct sk_buf
+ pc += (A & X) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_S_LD_W_ABS:
+- k = fentry->k;
++ k = f_k;
+ load_w:
+ ptr = load_pointer(skb, k, 4, &tmp);
+ if (ptr != NULL) {
+@@ -218,7 +220,7 @@ load_w:
+ }
+ break;
+ case BPF_S_LD_H_ABS:
+- k = fentry->k;
++ k = f_k;
+ load_h:
+ ptr = load_pointer(skb, k, 2, &tmp);
+ if (ptr != NULL) {
+@@ -227,7 +229,7 @@ load_h:
+ }
+ break;
+ case BPF_S_LD_B_ABS:
+- k = fentry->k;
++ k = f_k;
+ load_b:
+ ptr = load_pointer(skb, k, 1, &tmp);
+ if (ptr != NULL) {
+@@ -242,32 +244,34 @@ load_b:
+ X = skb->len;
+ continue;
+ case BPF_S_LD_W_IND:
+- k = X + fentry->k;
++ k = X + f_k;
+ goto load_w;
+ case BPF_S_LD_H_IND:
+- k = X + fentry->k;
++ k = X + f_k;
+ goto load_h;
+ case BPF_S_LD_B_IND:
+- k = X + fentry->k;
++ k = X + f_k;
+ goto load_b;
+ case BPF_S_LDX_B_MSH:
+- ptr = load_pointer(skb, fentry->k, 1, &tmp);
++ ptr = load_pointer(skb, f_k, 1, &tmp);
+ if (ptr != NULL) {
+ X = (*(u8 *)ptr & 0xf) << 2;
+ continue;
+ }
+ return 0;
+ case BPF_S_LD_IMM:
+- A = fentry->k;
++ A = f_k;
+ continue;
+ case BPF_S_LDX_IMM:
+- X = fentry->k;
++ X = f_k;
+ continue;
+ case BPF_S_LD_MEM:
+- A = mem[fentry->k];
++ A = (memvalid & (1UL << f_k)) ?
++ mem[f_k] : 0;
+ continue;
+ case BPF_S_LDX_MEM:
+- X = mem[fentry->k];
++ X = (memvalid & (1UL << f_k)) ?
++ mem[f_k] : 0;
+ continue;
+ case BPF_S_MISC_TAX:
+ X = A;
+@@ -276,14 +280,16 @@ load_b:
+ A = X;
+ continue;
+ case BPF_S_RET_K:
+- return fentry->k;
++ return f_k;
+ case BPF_S_RET_A:
+ return A;
+ case BPF_S_ST:
+- mem[fentry->k] = A;
++ memvalid |= 1UL << f_k;
++ mem[f_k] = A;
+ continue;
+ case BPF_S_STX:
+- mem[fentry->k] = X;
++ memvalid |= 1UL << f_k;
++ mem[f_k] = X;
+ continue;
+ default:
+ WARN_ON(1);
--- /dev/null
+From 25c9170ed64a6551beefe9315882f754e14486f4 Mon Sep 17 00:00:00 2001
+From: Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com>
+Date: Tue, 30 Nov 2010 17:36:08 +0900
+Subject: genirq: Fix incorrect proc spurious output
+
+From: Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com>
+
+commit 25c9170ed64a6551beefe9315882f754e14486f4 upstream.
+
+Since commit a1afb637(switch /proc/irq/*/spurious to seq_file) all
+/proc/irq/XX/spurious files show the information of irq 0.
+
+Current irq_spurious_proc_open() passes on NULL as the 3rd argument,
+which is used as an IRQ number in irq_spurious_proc_show(), to the
+single_open(). Because of this, all the /proc/irq/XX/spurious file
+shows IRQ 0 information regardless of the IRQ number.
+
+To fix the problem, irq_spurious_proc_open() must pass on the
+appropreate data (IRQ number) to single_open().
+
+Signed-off-by: Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com>
+Reviewed-by: Yong Zhang <yong.zhang0@gmail.com>
+LKML-Reference: <4CF4B778.90604@jp.fujitsu.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/irq/proc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/irq/proc.c
++++ b/kernel/irq/proc.c
+@@ -214,7 +214,7 @@ static int irq_spurious_proc_show(struct
+
+ static int irq_spurious_proc_open(struct inode *inode, struct file *file)
+ {
+- return single_open(file, irq_spurious_proc_show, NULL);
++ return single_open(file, irq_spurious_proc_show, PDE(inode)->data);
+ }
+
+ static const struct file_operations irq_spurious_proc_fops = {
--- /dev/null
+From dmitry.torokhov@gmail.com Tue Dec 7 11:41:18 2010
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Date: Thu, 4 Nov 2010 09:12:44 -0700
+Subject: Input: i8042 - add Sony VAIO VPCZ122GX to nomux list
+To: stable@kernel.org
+Message-ID: <20101104161243.GA11168@core.coreip.homeip.net>
+Content-Disposition: inline
+
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+[Note that the mainline will not have this particular fix but rather
+will blacklist entire VAIO line based off DMI board name. For stable
+I am being a bit more cautious and blacklist one particular product.]
+
+Trying to query/activate active multiplexing mode on this VAIO makes
+both keyboard and touchpad inoperable. Futher kernels will blacklist
+entire VAIO line, however here we blacklist just one particular model.
+
+Reported-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/input/serio/i8042-x86ia64io.h | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -333,6 +333,13 @@ static const struct dmi_system_id __init
+ },
+ },
+ {
++ /* Sony Vaio VPCZ122GX */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCZ122GX"),
++ },
++ },
++ {
+ /* Sony Vaio FS-115b */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
--- /dev/null
+From c94d3fb01fb6db1899cdf53ea4eb9d38e08a08fe Mon Sep 17 00:00:00 2001
+From: Arnaud Lacombe <lacombar@gmail.com>
+Date: Mon, 23 Aug 2010 12:01:24 -0400
+Subject: kbuild: use getopt_long(), not its _only() variant
+
+From: Arnaud Lacombe <lacombar@gmail.com>
+
+commit c94d3fb01fb6db1899cdf53ea4eb9d38e08a08fe upstream.
+
+NetBSD lacks getopt_long_only() whereas getopt_long() works just fine.
+
+Signed-off-by: Arnaud Lacombe <lacombar@gmail.com>
+Acked-by: Sam Ravnborg <sam@ravnborg.org>
+Signed-off-by: Michal Marek <mmarek@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ scripts/kconfig/conf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/scripts/kconfig/conf.c
++++ b/scripts/kconfig/conf.c
+@@ -466,7 +466,7 @@ int main(int ac, char **av)
+ bindtextdomain(PACKAGE, LOCALEDIR);
+ textdomain(PACKAGE);
+
+- while ((opt = getopt_long_only(ac, av, "", long_opts, NULL)) != -1) {
++ while ((opt = getopt_long(ac, av, "", long_opts, NULL)) != -1) {
+ input_mode = (enum input_mode)opt;
+ switch (opt) {
+ case silentoldconfig:
--- /dev/null
+From a6331d6f9a4298173b413cf99a40cc86a9d92c37 Mon Sep 17 00:00:00 2001
+From: andrew hendry <andrew.hendry@gmail.com>
+Date: Wed, 3 Nov 2010 12:54:53 +0000
+Subject: memory corruption in X.25 facilities parsing
+
+From: andrew hendry <andrew.hendry@gmail.com>
+
+commit a6331d6f9a4298173b413cf99a40cc86a9d92c37 upstream.
+
+Signed-of-by: Andrew Hendry <andrew.hendry@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/x25/x25_facilities.c | 8 ++++----
+ net/x25/x25_in.c | 2 ++
+ 2 files changed, 6 insertions(+), 4 deletions(-)
+
+--- a/net/x25/x25_facilities.c
++++ b/net/x25/x25_facilities.c
+@@ -134,15 +134,15 @@ int x25_parse_facilities(struct sk_buff
+ case X25_FAC_CLASS_D:
+ switch (*p) {
+ case X25_FAC_CALLING_AE:
+- if (p[1] > X25_MAX_DTE_FACIL_LEN)
+- break;
++ if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
++ return 0;
+ dte_facs->calling_len = p[2];
+ memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
+ *vc_fac_mask |= X25_MASK_CALLING_AE;
+ break;
+ case X25_FAC_CALLED_AE:
+- if (p[1] > X25_MAX_DTE_FACIL_LEN)
+- break;
++ if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
++ return 0;
+ dte_facs->called_len = p[2];
+ memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
+ *vc_fac_mask |= X25_MASK_CALLED_AE;
+--- a/net/x25/x25_in.c
++++ b/net/x25/x25_in.c
+@@ -119,6 +119,8 @@ static int x25_state1_machine(struct soc
+ &x25->vc_facil_mask);
+ if (len > 0)
+ skb_pull(skb, len);
++ else
++ return -1;
+ /*
+ * Copy any Call User Data.
+ */
--- /dev/null
+From 8acfe468b0384e834a303f08ebc4953d72fb690a Mon Sep 17 00:00:00 2001
+From: David S. Miller <davem@davemloft.net>
+Date: Thu, 28 Oct 2010 11:41:55 -0700
+Subject: net: Limit socket I/O iovec total length to INT_MAX.
+
+From: David S. Miller <davem@davemloft.net>
+
+commit 8acfe468b0384e834a303f08ebc4953d72fb690a upstream.
+
+This helps protect us from overflow issues down in the
+individual protocol sendmsg/recvmsg handlers. Once
+we hit INT_MAX we truncate out the rest of the iovec
+by setting the iov_len members to zero.
+
+This works because:
+
+1) For SOCK_STREAM and SOCK_SEQPACKET sockets, partial
+ writes are allowed and the application will just continue
+ with another write to send the rest of the data.
+
+2) For datagram oriented sockets, where there must be a
+ one-to-one correspondance between write() calls and
+ packets on the wire, INT_MAX is going to be far larger
+ than the packet size limit the protocol is going to
+ check for and signal with -EMSGSIZE.
+
+Based upon a patch by Linus Torvalds.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/socket.h | 2 +-
+ net/compat.c | 10 ++++++----
+ net/core/iovec.c | 20 +++++++++-----------
+ 3 files changed, 16 insertions(+), 16 deletions(-)
+
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -322,7 +322,7 @@ extern int csum_partial_copy_fromiovecen
+ int offset,
+ unsigned int len, __wsum *csump);
+
+-extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
++extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
+ extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
+ extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
+ int offset, int len);
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -41,10 +41,12 @@ static inline int iov_from_user_compat_t
+ compat_size_t len;
+
+ if (get_user(len, &uiov32->iov_len) ||
+- get_user(buf, &uiov32->iov_base)) {
+- tot_len = -EFAULT;
+- break;
+- }
++ get_user(buf, &uiov32->iov_base))
++ return -EFAULT;
++
++ if (len > INT_MAX - tot_len)
++ len = INT_MAX - tot_len;
++
+ tot_len += len;
+ kiov->iov_base = compat_ptr(buf);
+ kiov->iov_len = (__kernel_size_t) len;
+--- a/net/core/iovec.c
++++ b/net/core/iovec.c
+@@ -35,10 +35,9 @@
+ * in any case.
+ */
+
+-long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
++int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
+ {
+- int size, ct;
+- long err;
++ int size, ct, err;
+
+ if (m->msg_namelen) {
+ if (mode == VERIFY_READ) {
+@@ -60,14 +59,13 @@ long verify_iovec(struct msghdr *m, stru
+ err = 0;
+
+ for (ct = 0; ct < m->msg_iovlen; ct++) {
+- err += iov[ct].iov_len;
+- /*
+- * Goal is not to verify user data, but to prevent returning
+- * negative value, which is interpreted as errno.
+- * Overflow is still possible, but it is harmless.
+- */
+- if (err < 0)
+- return -EMSGSIZE;
++ size_t len = iov[ct].iov_len;
++
++ if (len > INT_MAX - err) {
++ len = INT_MAX - err;
++ iov[ct].iov_len = len;
++ }
++ err += len;
+ }
+
+ return err;
--- /dev/null
+From 253eacc070b114c2ec1f81b067d2fed7305467b0 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sat, 30 Oct 2010 16:43:10 -0700
+Subject: net: Truncate recvfrom and sendto length to INT_MAX.
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 253eacc070b114c2ec1f81b067d2fed7305467b0 upstream.
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/socket.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1651,6 +1651,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __
+ struct iovec iov;
+ int fput_needed;
+
++ if (len > INT_MAX)
++ len = INT_MAX;
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+@@ -1708,6 +1710,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void
+ int err, err2;
+ int fput_needed;
+
++ if (size > INT_MAX)
++ size = INT_MAX;
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
--- /dev/null
+From 398812159e328478ae49b4bd01f0d71efea96c39 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Wed, 1 Dec 2010 10:08:01 +0100
+Subject: [S390] nohz/s390: fix arch_needs_cpu() return value on offline cpus
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit 398812159e328478ae49b4bd01f0d71efea96c39 upstream.
+
+This fixes the same problem as described in the patch "nohz: fix
+printk_needs_cpu() return value on offline cpus" for the arch_needs_cpu()
+primitive:
+
+arch_needs_cpu() may return 1 if called on offline cpus. When a cpu gets
+offlined it schedules the idle process which, before killing its own cpu,
+will call tick_nohz_stop_sched_tick().
+That function in turn will call arch_needs_cpu() in order to check if the
+local tick can be disabled. On offline cpus this function should naturally
+return 0 since regardless if the tick gets disabled or not the cpu will be
+dead short after. That is besides the fact that __cpu_disable() should already
+have made sure that no interrupts on the offlined cpu will be delivered anyway.
+
+In this case it prevents tick_nohz_stop_sched_tick() to call
+select_nohz_load_balancer(). No idea if that really is a problem. However what
+made me debug this is that on 2.6.32 the function get_nohz_load_balancer() is
+used within __mod_timer() to select a cpu on which a timer gets enqueued.
+If arch_needs_cpu() returns 1 then the nohz_load_balancer cpu doesn't get
+updated when a cpu gets offlined. It may contain the cpu number of an offline
+cpu. In turn timers get enqueued on an offline cpu and not very surprisingly
+they never expire and cause system hangs.
+
+This has been observed 2.6.32 kernels. On current kernels __mod_timer() uses
+get_nohz_timer_target() which doesn't have that problem. However there might
+be other problems because of the too early exit tick_nohz_stop_sched_tick()
+in case a cpu goes offline.
+
+This specific bug was indrocuded with 3c5d92a0 "nohz: Introduce
+arch_needs_cpu".
+
+In this case a cpu hotplug notifier is used to fix the issue in order to keep
+the normal/fast path small. All we need to do is to clear the condition that
+makes arch_needs_cpu() return 1 since it is just a performance improvement
+which is supposed to keep the local tick running for a short period if a cpu
+goes idle. Nothing special needs to be done except for clearing the condition.
+
+Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/s390/kernel/vtime.c | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+--- a/arch/s390/kernel/vtime.c
++++ b/arch/s390/kernel/vtime.c
+@@ -19,6 +19,7 @@
+ #include <linux/kernel_stat.h>
+ #include <linux/rcupdate.h>
+ #include <linux/posix-timers.h>
++#include <linux/cpu.h>
+
+ #include <asm/s390_ext.h>
+ #include <asm/timer.h>
+@@ -565,6 +566,23 @@ void init_cpu_vtimer(void)
+ __ctl_set_bit(0,10);
+ }
+
++static int __cpuinit s390_nohz_notify(struct notifier_block *self,
++ unsigned long action, void *hcpu)
++{
++ struct s390_idle_data *idle;
++ long cpu = (long) hcpu;
++
++ idle = &per_cpu(s390_idle, cpu);
++ switch (action) {
++ case CPU_DYING:
++ case CPU_DYING_FROZEN:
++ idle->nohz_delay = 0;
++ default:
++ break;
++ }
++ return NOTIFY_OK;
++}
++
+ void __init vtime_init(void)
+ {
+ /* request the cpu timer external interrupt */
+@@ -573,5 +591,6 @@ void __init vtime_init(void)
+
+ /* Enable cpu timer interrupts on the boot cpu. */
+ init_cpu_vtimer();
++ cpu_notifier(s390_nohz_notify, 0);
+ }
+
--- /dev/null
+From 3e57f1626b5febe5cc99aa6870377deef3ae03cc Mon Sep 17 00:00:00 2001
+From: Jarkko Nikula <jhnikula@gmail.com>
+Date: Mon, 11 Oct 2010 14:18:45 -0700
+Subject: omap: dma: Fix buffering disable bit setting for omap24xx
+
+From: Jarkko Nikula <jhnikula@gmail.com>
+
+commit 3e57f1626b5febe5cc99aa6870377deef3ae03cc upstream.
+
+An errata workaround for omap24xx is not setting the buffering disable bit
+25 what is the purpose but channel enable bit 7 instead.
+
+Background for this fix is the DMA stalling issue with ASoC omap-mcbsp
+driver. Peter Ujfalusi <peter.ujfalusi@nokia.com> has found an issue in
+recording that the DMA stall could happen if there were a buffer overrun
+detected by ALSA and the DMA was stopped and restarted due that. This
+problem is known to occur on both OMAP2420 and OMAP3. It can recover on
+OMAP3 after dma free, dma request and reconfiguration cycle. However, on
+OMAP2420 it seems that only way to recover is a reset.
+
+Problem was not visible before the commit c12abc0. That commit changed that
+the McBSP transmitter/receiver is released from reset only when needed. That
+is, only enabled McBSP transmitter without transmission was able to prevent
+this DMA stall problem in receiving side and underlying problem did not show
+up until now. McBSP transmitter itself seems to no be reason since DMA
+stall does not recover by enabling the transmission after stall.
+
+Debugging showed that there were a DMA write active during DMA stop time and
+it never completed even when restarting the DMA. Experimenting showed that
+the DMA buffering disable bit could be used to avoid stalling when using
+source synchronized transfers. However that could have performance hit and
+OMAP3 TRM states that buffering disable is not allowed for destination
+synchronized transfers so subsequent patch will implement a method to
+complete DMA writes when stopping.
+
+This patch is based on assumtion that complete lock-up on OMAP2420 is
+different but related problem. I don't have access to OMAP2420 errata but
+I believe this old workaround here is put for a reason but unfortunately
+a wrong bit was typed and problem showed up only now.
+
+Signed-off-by: Jarkko Nikula <jhnikula@gmail.com>
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@nokia.com>
+Acked-by: Manjunath Kondaiah G <manjugk@ti.com>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/plat-omap/dma.c | 14 ++++++++++----
+ arch/arm/plat-omap/include/plat/dma.h | 1 +
+ 2 files changed, 11 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/plat-omap/dma.c
++++ b/arch/arm/plat-omap/dma.c
+@@ -996,11 +996,17 @@ void omap_start_dma(int lch)
+ l = dma_read(CCR(lch));
+
+ /*
+- * Errata: On ES2.0 BUFFERING disable must be set.
+- * This will always fail on ES1.0
++ * Errata: Inter Frame DMA buffering issue (All OMAP2420 and
++ * OMAP2430ES1.0): DMA will wrongly buffer elements if packing and
++ * bursting is enabled. This might result in data gets stalled in
++ * FIFO at the end of the block.
++ * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
++ * guarantee no data will stay in the DMA FIFO in case inter frame
++ * buffering occurs.
+ */
+- if (cpu_is_omap24xx())
+- l |= OMAP_DMA_CCR_EN;
++ if (cpu_is_omap2420() ||
++ (cpu_is_omap2430() && (omap_type() == OMAP2430_REV_ES1_0)))
++ l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
+
+ l |= OMAP_DMA_CCR_EN;
+ dma_write(l, CCR(lch));
+--- a/arch/arm/plat-omap/include/plat/dma.h
++++ b/arch/arm/plat-omap/include/plat/dma.h
+@@ -335,6 +335,7 @@
+ #define OMAP2_DMA_MISALIGNED_ERR_IRQ (1 << 11)
+
+ #define OMAP_DMA_CCR_EN (1 << 7)
++#define OMAP_DMA_CCR_BUFFERING_DISABLE (1 << 25)
+
+ #define OMAP_DMA_DATA_TYPE_S8 0x00
+ #define OMAP_DMA_DATA_TYPE_S16 0x01
--- /dev/null
+From 0e4905c0199d683497833be60a428c784d7575b8 Mon Sep 17 00:00:00 2001
+From: Peter Ujfalusi <peter.ujfalusi@nokia.com>
+Date: Mon, 11 Oct 2010 14:18:56 -0700
+Subject: OMAP3: DMA: Errata i541: sDMA FIFO draining does not finish
+
+From: Peter Ujfalusi <peter.ujfalusi@nokia.com>
+
+commit 0e4905c0199d683497833be60a428c784d7575b8 upstream.
+
+Implement the suggested workaround for OMAP3 regarding to sDMA draining
+issue, when the channel is disabled on the fly.
+This errata affects the following configuration:
+sDMA transfer is source synchronized
+Buffering is enabled
+SmartStandby is selected.
+
+The issue can be easily reproduced by creating overrun situation while
+recording audio.
+Either introduce load to the CPU:
+nice -19 arecord -D hw:0 -M -B 10000 -F 5000 -f dat > /dev/null & \
+dd if=/dev/urandom of=/dev/null
+
+or suspending the arecord, and resuming it:
+arecord -D hw:0 -M -B 10000 -F 5000 -f dat > /dev/null
+CTRL+Z; fg; CTRL+Z; fg; ...
+
+In case of overrun audio stops DMA, and restarts it (without reseting
+the sDMA channel). When we hit this errata in stop case (sDMA drain did
+not complete), at the coming start the sDMA will not going to be
+operational (it is still draining).
+This leads to DMA stall condition.
+On OMAP3 we can recover with sDMA channel reset, it has been observed
+that by introducing unrelated sDMA activity might also help (reading
+from MMC for example).
+
+The same errata exists for OMAP2, where the suggestion is to disable the
+buffering to avoid this type of error.
+On OMAP3 the suggestion is to set sDMA to NoStandby before disabling
+the channel, and wait for the drain to finish, than configure sDMA to
+SmartStandby again.
+
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@nokia.com>
+Acked-by: Jarkko Nikula <jhnikula@gmail.com>
+Acked-by : Santosh Shilimkar <santosh.shilimkar@ti.com>
+Acked-by : Manjunath Kondaiah G <manjugk@ti.com>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/arm/plat-omap/dma.c | 36 ++++++++++++++++++++++++++++++++--
+ arch/arm/plat-omap/include/plat/dma.h | 3 ++
+ 2 files changed, 37 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/plat-omap/dma.c
++++ b/arch/arm/plat-omap/dma.c
+@@ -30,6 +30,7 @@
+ #include <linux/irq.h>
+ #include <linux/io.h>
+ #include <linux/slab.h>
++#include <linux/delay.h>
+
+ #include <asm/system.h>
+ #include <mach/hardware.h>
+@@ -1024,8 +1025,39 @@ void omap_stop_dma(int lch)
+ dma_write(0, CICR(lch));
+
+ l = dma_read(CCR(lch));
+- l &= ~OMAP_DMA_CCR_EN;
+- dma_write(l, CCR(lch));
++ /* OMAP3 Errata i541: sDMA FIFO draining does not finish */
++ if (cpu_is_omap34xx() && (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
++ int i = 0;
++ u32 sys_cf;
++
++ /* Configure No-Standby */
++ l = dma_read(OCP_SYSCONFIG);
++ sys_cf = l;
++ l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
++ l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
++ dma_write(l , OCP_SYSCONFIG);
++
++ l = dma_read(CCR(lch));
++ l &= ~OMAP_DMA_CCR_EN;
++ dma_write(l, CCR(lch));
++
++ /* Wait for sDMA FIFO drain */
++ l = dma_read(CCR(lch));
++ while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
++ OMAP_DMA_CCR_WR_ACTIVE))) {
++ udelay(5);
++ i++;
++ l = dma_read(CCR(lch));
++ }
++ if (i >= 100)
++ printk(KERN_ERR "DMA drain did not complete on "
++ "lch %d\n", lch);
++ /* Restore OCP_SYSCONFIG */
++ dma_write(sys_cf, OCP_SYSCONFIG);
++ } else {
++ l &= ~OMAP_DMA_CCR_EN;
++ dma_write(l, CCR(lch));
++ }
+
+ if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
+ int next_lch, cur_lch = lch;
+--- a/arch/arm/plat-omap/include/plat/dma.h
++++ b/arch/arm/plat-omap/include/plat/dma.h
+@@ -335,6 +335,9 @@
+ #define OMAP2_DMA_MISALIGNED_ERR_IRQ (1 << 11)
+
+ #define OMAP_DMA_CCR_EN (1 << 7)
++#define OMAP_DMA_CCR_RD_ACTIVE (1 << 9)
++#define OMAP_DMA_CCR_WR_ACTIVE (1 << 10)
++#define OMAP_DMA_CCR_SEL_SRC_DST_SYNC (1 << 24)
+ #define OMAP_DMA_CCR_BUFFERING_DISABLE (1 << 25)
+
+ #define OMAP_DMA_DATA_TYPE_S8 0x00
--- /dev/null
+From dddd3379a619a4cb8247bfd3c94ca9ae3797aa2e Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 Nov 2010 10:05:55 +0100
+Subject: perf: Fix inherit vs. context rotation bug
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit dddd3379a619a4cb8247bfd3c94ca9ae3797aa2e upstream.
+
+It was found that sometimes children of tasks with inherited events had
+one extra event. Eventually it turned out to be due to the list rotation
+no being exclusive with the list iteration in the inheritance code.
+
+Cure this by temporarily disabling the rotation while we inherit the events.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+LKML-Reference: <new-submission>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/perf_event.h | 1 +
+ kernel/perf_event.c | 22 ++++++++++++++++++++--
+ 2 files changed, 21 insertions(+), 2 deletions(-)
+
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -788,6 +788,7 @@ struct perf_event_context {
+ int nr_active;
+ int is_active;
+ int nr_stat;
++ int rotate_disable;
+ atomic_t refcount;
+ struct task_struct *task;
+
+--- a/kernel/perf_event.c
++++ b/kernel/perf_event.c
+@@ -1620,8 +1620,12 @@ static void rotate_ctx(struct perf_event
+ {
+ raw_spin_lock(&ctx->lock);
+
+- /* Rotate the first entry last of non-pinned groups */
+- list_rotate_left(&ctx->flexible_groups);
++ /*
++ * Rotate the first entry last of non-pinned groups. Rotation might be
++ * disabled by the inheritance code.
++ */
++ if (!ctx->rotate_disable)
++ list_rotate_left(&ctx->flexible_groups);
+
+ raw_spin_unlock(&ctx->lock);
+ }
+@@ -5622,6 +5626,7 @@ int perf_event_init_task(struct task_str
+ struct perf_event *event;
+ struct task_struct *parent = current;
+ int inherited_all = 1;
++ unsigned long flags;
+ int ret = 0;
+
+ child->perf_event_ctxp = NULL;
+@@ -5662,6 +5667,15 @@ int perf_event_init_task(struct task_str
+ break;
+ }
+
++ /*
++ * We can't hold ctx->lock when iterating the ->flexible_group list due
++ * to allocations, but we need to prevent rotation because
++ * rotate_ctx() will change the list from interrupt context.
++ */
++ raw_spin_lock_irqsave(&parent_ctx->lock, flags);
++ parent_ctx->rotate_disable = 1;
++ raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
++
+ list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
+ ret = inherit_task_group(event, parent, parent_ctx, child,
+ &inherited_all);
+@@ -5669,6 +5683,10 @@ int perf_event_init_task(struct task_str
+ break;
+ }
+
++ raw_spin_lock_irqsave(&parent_ctx->lock, flags);
++ parent_ctx->rotate_disable = 0;
++ raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
++
+ child_ctx = child->perf_event_ctxp;
+
+ if (child_ctx && inherited_all) {
--- /dev/null
+From c9e664f1fdf34aa8cede047b206deaa8f1945af0 Mon Sep 17 00:00:00 2001
+From: Rafael J. Wysocki <rjw@sisk.pl>
+Date: Fri, 3 Dec 2010 22:57:45 +0100
+Subject: PM / Hibernate: Fix memory corruption related to swap
+
+From: Rafael J. Wysocki <rjw@sisk.pl>
+
+commit c9e664f1fdf34aa8cede047b206deaa8f1945af0 upstream.
+
+There is a problem that swap pages allocated before the creation of
+a hibernation image can be released and used for storing the contents
+of different memory pages while the image is being saved. Since the
+kernel stored in the image doesn't know of that, it causes memory
+corruption to occur after resume from hibernation, especially on
+systems with relatively small RAM that need to swap often.
+
+This issue can be addressed by keeping the GFP_IOFS bits clear
+in gfp_allowed_mask during the entire hibernation, including the
+saving of the image, until the system is finally turned off or
+the hibernation is aborted. Unfortunately, for this purpose
+it's necessary to rework the way in which the hibernate and
+suspend code manipulates gfp_allowed_mask.
+
+This change is based on an earlier patch from Hugh Dickins.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Reported-by: Ondrej Zary <linux@rainbow-software.org>
+Acked-by: Hugh Dickins <hughd@google.com>
+Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/gfp.h | 4 ++--
+ kernel/power/hibernate.c | 22 ++++++++++++----------
+ kernel/power/suspend.c | 5 ++---
+ kernel/power/user.c | 2 ++
+ mm/page_alloc.c | 19 ++++++++++++-------
+ 5 files changed, 30 insertions(+), 22 deletions(-)
+
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -339,7 +339,7 @@ void drain_local_pages(void *dummy);
+
+ extern gfp_t gfp_allowed_mask;
+
+-extern void set_gfp_allowed_mask(gfp_t mask);
+-extern gfp_t clear_gfp_allowed_mask(gfp_t mask);
++extern void pm_restrict_gfp_mask(void);
++extern void pm_restore_gfp_mask(void);
+
+ #endif /* __LINUX_GFP_H */
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -326,7 +326,6 @@ static int create_image(int platform_mod
+ int hibernation_snapshot(int platform_mode)
+ {
+ int error;
+- gfp_t saved_mask;
+
+ error = platform_begin(platform_mode);
+ if (error)
+@@ -338,7 +337,7 @@ int hibernation_snapshot(int platform_mo
+ goto Close;
+
+ suspend_console();
+- saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
++ pm_restrict_gfp_mask();
+ error = dpm_suspend_start(PMSG_FREEZE);
+ if (error)
+ goto Recover_platform;
+@@ -347,7 +346,10 @@ int hibernation_snapshot(int platform_mo
+ goto Recover_platform;
+
+ error = create_image(platform_mode);
+- /* Control returns here after successful restore */
++ /*
++ * Control returns here (1) after the image has been created or the
++ * image creation has failed and (2) after a successful restore.
++ */
+
+ Resume_devices:
+ /* We may need to release the preallocated image pages here. */
+@@ -356,7 +358,10 @@ int hibernation_snapshot(int platform_mo
+
+ dpm_resume_end(in_suspend ?
+ (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
+- set_gfp_allowed_mask(saved_mask);
++
++ if (error || !in_suspend)
++ pm_restore_gfp_mask();
++
+ resume_console();
+ Close:
+ platform_end(platform_mode);
+@@ -451,17 +456,16 @@ static int resume_target_kernel(bool pla
+ int hibernation_restore(int platform_mode)
+ {
+ int error;
+- gfp_t saved_mask;
+
+ pm_prepare_console();
+ suspend_console();
+- saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
++ pm_restrict_gfp_mask();
+ error = dpm_suspend_start(PMSG_QUIESCE);
+ if (!error) {
+ error = resume_target_kernel(platform_mode);
+ dpm_resume_end(PMSG_RECOVER);
+ }
+- set_gfp_allowed_mask(saved_mask);
++ pm_restore_gfp_mask();
+ resume_console();
+ pm_restore_console();
+ return error;
+@@ -475,7 +479,6 @@ int hibernation_restore(int platform_mod
+ int hibernation_platform_enter(void)
+ {
+ int error;
+- gfp_t saved_mask;
+
+ if (!hibernation_ops)
+ return -ENOSYS;
+@@ -491,7 +494,6 @@ int hibernation_platform_enter(void)
+
+ entering_platform_hibernation = true;
+ suspend_console();
+- saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
+ error = dpm_suspend_start(PMSG_HIBERNATE);
+ if (error) {
+ if (hibernation_ops->recover)
+@@ -535,7 +537,6 @@ int hibernation_platform_enter(void)
+ Resume_devices:
+ entering_platform_hibernation = false;
+ dpm_resume_end(PMSG_RESTORE);
+- set_gfp_allowed_mask(saved_mask);
+ resume_console();
+
+ Close:
+@@ -643,6 +644,7 @@ int hibernate(void)
+ swsusp_free();
+ if (!error)
+ power_down();
++ pm_restore_gfp_mask();
+ } else {
+ pr_debug("PM: Image restored successfully.\n");
+ }
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -197,7 +197,6 @@ static int suspend_enter(suspend_state_t
+ int suspend_devices_and_enter(suspend_state_t state)
+ {
+ int error;
+- gfp_t saved_mask;
+
+ if (!suspend_ops)
+ return -ENOSYS;
+@@ -208,7 +207,7 @@ int suspend_devices_and_enter(suspend_st
+ goto Close;
+ }
+ suspend_console();
+- saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
++ pm_restrict_gfp_mask();
+ suspend_test_start();
+ error = dpm_suspend_start(PMSG_SUSPEND);
+ if (error) {
+@@ -225,7 +224,7 @@ int suspend_devices_and_enter(suspend_st
+ suspend_test_start();
+ dpm_resume_end(PMSG_RESUME);
+ suspend_test_finish("resume devices");
+- set_gfp_allowed_mask(saved_mask);
++ pm_restore_gfp_mask();
+ resume_console();
+ Close:
+ if (suspend_ops->end)
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -263,6 +263,7 @@ static long snapshot_ioctl(struct file *
+ case SNAPSHOT_UNFREEZE:
+ if (!data->frozen || data->ready)
+ break;
++ pm_restore_gfp_mask();
+ thaw_processes();
+ usermodehelper_enable();
+ data->frozen = 0;
+@@ -275,6 +276,7 @@ static long snapshot_ioctl(struct file *
+ error = -EPERM;
+ break;
+ }
++ pm_restore_gfp_mask();
+ error = hibernation_snapshot(data->platform_support);
+ if (!error)
+ error = put_user(in_suspend, (int __user *)arg);
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -103,19 +103,24 @@ gfp_t gfp_allowed_mask __read_mostly = G
+ * only be modified with pm_mutex held, unless the suspend/hibernate code is
+ * guaranteed not to run in parallel with that modification).
+ */
+-void set_gfp_allowed_mask(gfp_t mask)
++
++static gfp_t saved_gfp_mask;
++
++void pm_restore_gfp_mask(void)
+ {
+ WARN_ON(!mutex_is_locked(&pm_mutex));
+- gfp_allowed_mask = mask;
++ if (saved_gfp_mask) {
++ gfp_allowed_mask = saved_gfp_mask;
++ saved_gfp_mask = 0;
++ }
+ }
+
+-gfp_t clear_gfp_allowed_mask(gfp_t mask)
++void pm_restrict_gfp_mask(void)
+ {
+- gfp_t ret = gfp_allowed_mask;
+-
+ WARN_ON(!mutex_is_locked(&pm_mutex));
+- gfp_allowed_mask &= ~mask;
+- return ret;
++ WARN_ON(saved_gfp_mask);
++ saved_gfp_mask = gfp_allowed_mask;
++ gfp_allowed_mask &= ~GFP_IOFS;
+ }
+ #endif /* CONFIG_PM_SLEEP */
+
staging-frontier-fix-up-some-sysfs-attribute-permissions.patch
staging-rtl8187se-change-panic-to-warn-when-rf-switch-turned-off.patch
staging-batman-adv-ensure-that-eth_type_trans-gets-linear-memory.patch
+perf-fix-inherit-vs.-context-rotation-bug.patch
+arm-6456-1-fix-for-building-debug-with-sa11xx_base.c-as-a-module.patch
+arm-cns3xxx-fix-build-with-config_pci-y.patch
+pm-hibernate-fix-memory-corruption-related-to-swap.patch
+wmi-use-memcmp-instead-of-strncmp-to-compare-guids.patch
+nohz-s390-fix-arch_needs_cpu-return-value-on-offline-cpus.patch
+genirq-fix-incorrect-proc-spurious-output.patch
+net-truncate-recvfrom-and-sendto-length-to-int_max.patch
+net-limit-socket-i-o-iovec-total-length-to-int_max.patch
+input-i8042-add-sony-vaio-vpcz122gx-to-nomux-list.patch
+omap-dma-fix-buffering-disable-bit-setting-for-omap24xx.patch
+omap3-dma-errata-i541-sdma-fifo-draining-does-not-finish.patch
+memory-corruption-in-x.25-facilities-parsing.patch
+vlan-avoid-hwaccel-vlan-packets-when-vid-not-used.patch
+kbuild-use-getopt_long-not-its-_only-variant.patch
+filter-make-sure-filters-dont-read-uninitialized-memory.patch
+can-bcm-fix-minor-heap-overflow.patch
+x25-prevent-crashing-when-parsing-bad-x.25-facilities.patch
--- /dev/null
+From jesse@nicira.com Tue Dec 7 11:49:39 2010
+From: Jesse Gross <jesse@nicira.com>
+Date: Mon, 8 Nov 2010 13:23:01 -0800
+Subject: [stable] [PATCH 2.6.36 stable] vlan: Avoid hwaccel vlan packets when vid not used.
+To: stable@kernel.org
+Cc: netdev@vger.kernel.org, David Miller <davem@davemloft.net>
+Message-ID: <1289251381-6671-1-git-send-email-jesse@nicira.com>
+
+From: Jesse Gross <jesse@nicira.com>
+
+[This patch applies only to 2.6.36 stable. The problem was introduced
+in that release and is already fixed by larger changes to the vlan
+code in 2.6.37.]
+
+Normally hardware accelerated vlan packets are quickly dropped if
+there is no corresponding vlan device configured. The one exception
+is promiscuous mode, where we allow all of these packets through so
+they can be picked up by tcpdump. However, this behavior causes a
+crash if we actually try to receive these packets. This fixes that
+crash by ignoring packets with vids not corresponding to a configured
+device in the vlan hwaccel routines and then dropping them before they
+get to consumers in the network stack.
+
+
+Reported-by: Ben Greear <greearb@candelatech.com>
+Tested-by: Nikola Ciprich <extmaillist@linuxbox.cz>
+Signed-off-by: Jesse Gross <jesse@nicira.com>
+Acked-by: David Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/8021q/vlan_core.c | 3 +++
+ net/core/dev.c | 13 +++++++++++++
+ 2 files changed, 16 insertions(+)
+
+--- a/net/8021q/vlan_core.c
++++ b/net/8021q/vlan_core.c
+@@ -43,6 +43,9 @@ int vlan_hwaccel_do_receive(struct sk_bu
+ struct net_device *dev = skb->dev;
+ struct vlan_rx_stats *rx_stats;
+
++ if (unlikely(!is_vlan_dev(dev)))
++ return 0;
++
+ skb->dev = vlan_dev_info(dev)->real_dev;
+ netif_nit_deliver(skb);
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2891,6 +2891,19 @@ static int __netif_receive_skb(struct sk
+ ncls:
+ #endif
+
++ /* If we got this far with a hardware accelerated VLAN tag, it means
++ * that we were put in promiscuous mode but nobody is interested in
++ * this vid. Drop the packet now to prevent it from getting propagated
++ * to other parts of the stack that won't know how to deal with packets
++ * tagged in this manner.
++ */
++ if (unlikely(vlan_tx_tag_present(skb))) {
++ if (pt_prev)
++ ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
++ kfree_skb(skb);
++ goto out;
++ }
++
+ /* Handle special case of bridge or macvlan */
+ rx_handler = rcu_dereference(skb->dev->rx_handler);
+ if (rx_handler) {
--- /dev/null
+From 8b14d7b22c61f17ccb869e0047d9df6dd9f50a9f Mon Sep 17 00:00:00 2001
+From: Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
+Date: Sun, 28 Nov 2010 19:46:50 -0200
+Subject: wmi: use memcmp instead of strncmp to compare GUIDs
+
+From: Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
+
+commit 8b14d7b22c61f17ccb869e0047d9df6dd9f50a9f upstream.
+
+While looking for the duplicates in /sys/class/wmi/, I couldn't find
+them. The code that looks for duplicates uses strncmp in a binary GUID,
+which may contain zero bytes. The right function is memcmp, which is
+also used in another section of wmi code.
+
+It was finding 49142400-C6A3-40FA-BADB-8A2652834100 as a duplicate of
+39142400-C6A3-40FA-BADB-8A2652834100. Since the first byte is the fourth
+printed, they were found as equal by strncmp.
+
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
+Signed-off-by: Matthew Garrett <mjg@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/platform/x86/wmi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -801,7 +801,7 @@ static bool guid_already_parsed(const ch
+ wblock = list_entry(p, struct wmi_block, list);
+ gblock = &wblock->gblock;
+
+- if (strncmp(gblock->guid, guid_string, 16) == 0)
++ if (memcmp(gblock->guid, guid_string, 16) == 0)
+ return true;
+ }
+ return false;
--- /dev/null
+From 5ef41308f94dcbb3b7afc56cdef1c2ba53fa5d2f Mon Sep 17 00:00:00 2001
+From: Dan Rosenberg <drosenberg@vsecurity.com>
+Date: Fri, 12 Nov 2010 12:44:42 -0800
+Subject: x25: Prevent crashing when parsing bad X.25 facilities
+
+From: Dan Rosenberg <drosenberg@vsecurity.com>
+
+commit 5ef41308f94dcbb3b7afc56cdef1c2ba53fa5d2f upstream.
+
+Now with improved comma support.
+
+On parsing malformed X.25 facilities, decrementing the remaining length
+may cause it to underflow. Since the length is an unsigned integer,
+this will result in the loop continuing until the kernel crashes.
+
+This patch adds checks to ensure decrementing the remaining length does
+not cause it to wrap around.
+
+Signed-off-by: Dan Rosenberg <drosenberg@vsecurity.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/x25/x25_facilities.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/net/x25/x25_facilities.c
++++ b/net/x25/x25_facilities.c
+@@ -61,6 +61,8 @@ int x25_parse_facilities(struct sk_buff
+ while (len > 0) {
+ switch (*p & X25_FAC_CLASS_MASK) {
+ case X25_FAC_CLASS_A:
++ if (len < 2)
++ return 0;
+ switch (*p) {
+ case X25_FAC_REVERSE:
+ if((p[1] & 0x81) == 0x81) {
+@@ -104,6 +106,8 @@ int x25_parse_facilities(struct sk_buff
+ len -= 2;
+ break;
+ case X25_FAC_CLASS_B:
++ if (len < 3)
++ return 0;
+ switch (*p) {
+ case X25_FAC_PACKET_SIZE:
+ facilities->pacsize_in = p[1];
+@@ -125,6 +129,8 @@ int x25_parse_facilities(struct sk_buff
+ len -= 3;
+ break;
+ case X25_FAC_CLASS_C:
++ if (len < 4)
++ return 0;
+ printk(KERN_DEBUG "X.25: unknown facility %02X, "
+ "values %02X, %02X, %02X\n",
+ p[0], p[1], p[2], p[3]);
+@@ -132,6 +138,8 @@ int x25_parse_facilities(struct sk_buff
+ len -= 4;
+ break;
+ case X25_FAC_CLASS_D:
++ if (len < p[1] + 2)
++ return 0;
+ switch (*p) {
+ case X25_FAC_CALLING_AE:
+ if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
+@@ -149,9 +157,7 @@ int x25_parse_facilities(struct sk_buff
+ break;
+ default:
+ printk(KERN_DEBUG "X.25: unknown facility %02X,"
+- "length %d, values %02X, %02X, "
+- "%02X, %02X\n",
+- p[0], p[1], p[2], p[3], p[4], p[5]);
++ "length %d\n", p[0], p[1]);
+ break;
+ }
+ len -= p[1] + 2;