--- /dev/null
+From e2dbe12557d85d81f4527879499f55681c3cca4f Mon Sep 17 00:00:00 2001
+From: Amerigo Wang <amwang@redhat.com>
+Date: Wed, 1 Jul 2009 01:06:26 -0400
+Subject: elf: fix one check-after-use
+
+From: Amerigo Wang <amwang@redhat.com>
+
+commit e2dbe12557d85d81f4527879499f55681c3cca4f upstream.
+
+Check before use it.
+
+Signed-off-by: WANG Cong <amwang@redhat.com>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: David Howells <dhowells@redhat.com>
+Acked-by: Roland McGrath <roland@redhat.com>
+Acked-by: James Morris <jmorris@namei.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/binfmt_elf.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1517,11 +1517,11 @@ static int fill_note_info(struct elfhdr
+ info->thread = NULL;
+
+ psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
+- fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
+-
+ if (psinfo == NULL)
+ return 0;
+
++ fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
++
+ /*
+ * Figure out how many notes we're going to need for each thread.
+ */
--- /dev/null
+From 025dc740d01f99ccba945df1f9ef9e06b1c15d96 Mon Sep 17 00:00:00 2001
+From: Jiri Slaby <jirislaby@gmail.com>
+Date: Sat, 11 Jul 2009 13:42:37 +0200
+Subject: hwmon: (max6650) Fix lock imbalance
+
+From: Jiri Slaby <jirislaby@gmail.com>
+
+commit 025dc740d01f99ccba945df1f9ef9e06b1c15d96 upstream.
+
+Add omitted update_lock to one switch/case in set_div.
+
+Signed-off-by: Jiri Slaby <jirislaby@gmail.com>
+Acked-by: Hans J. Koch <hjk@linutronix.de>
+Signed-off-by: Jean Delvare <khali@linux-fr.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/hwmon/max6650.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/hwmon/max6650.c
++++ b/drivers/hwmon/max6650.c
+@@ -407,6 +407,7 @@ static ssize_t set_div(struct device *de
+ data->count = 3;
+ break;
+ default:
++ mutex_unlock(&data->update_lock);
+ dev_err(&client->dev,
+ "illegal value for fan divider (%d)\n", div);
+ return -EINVAL;
--- /dev/null
+From b8d966efd9a46a9a35beac50cbff6e30565125ef Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Wed, 1 Jul 2009 11:14:04 +1000
+Subject: md: avoid dereferencing NULL pointer when accessing suspend_* sysfs attributes.
+
+From: NeilBrown <neilb@suse.de>
+
+commit b8d966efd9a46a9a35beac50cbff6e30565125ef upstream.
+
+If we try to modify one of the md/ sysfs files
+ suspend_lo or suspend_hi
+when the array is not active, we dereference a NULL.
+Protect against that.
+
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/md.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -3281,7 +3281,8 @@ suspend_lo_store(mddev_t *mddev, const c
+ char *e;
+ unsigned long long new = simple_strtoull(buf, &e, 10);
+
+- if (mddev->pers->quiesce == NULL)
++ if (mddev->pers == NULL ||
++ mddev->pers->quiesce == NULL)
+ return -EINVAL;
+ if (buf == e || (*e && *e != '\n'))
+ return -EINVAL;
+@@ -3309,7 +3310,8 @@ suspend_hi_store(mddev_t *mddev, const c
+ char *e;
+ unsigned long long new = simple_strtoull(buf, &e, 10);
+
+- if (mddev->pers->quiesce == NULL)
++ if (mddev->pers == NULL ||
++ mddev->pers->quiesce == NULL)
+ return -EINVAL;
+ if (buf == e || (*e && *e != '\n'))
+ return -EINVAL;
--- /dev/null
+From c8236db9cd7aa492dcfcdcca702638e704abed49 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@redhat.com>
+Date: Sun, 5 Jul 2009 12:08:18 -0700
+Subject: mm: mark page accessed before we write_end()
+
+From: Josef Bacik <josef@redhat.com>
+
+commit c8236db9cd7aa492dcfcdcca702638e704abed49 upstream.
+
+In testing a backport of the write_begin/write_end AOPs, a 10% re-read
+regression was noticed when running iozone. This regression was
+introduced because the old AOPs would always do a mark_page_accessed(page)
+after the commit_write, but when the new AOPs where introduced, the only
+place this was kept was in pagecache_write_end().
+
+This patch does the same thing in the generic case as what is done in
+pagecache_write_end(), which is just to mark the page accessed before we
+do write_end().
+
+Signed-off-by: Josef Bacik <jbacik@redhat.com>
+Acked-by: Nick Piggin <npiggin@suse.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/filemap.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2441,6 +2441,7 @@ again:
+ pagefault_enable();
+ flush_dcache_page(page);
+
++ mark_page_accessed(page);
+ status = a_ops->write_end(file, mapping, pos, bytes, copied,
+ page, fsdata);
+ if (unlikely(status < 0))
alsa-ca0106-fix-the-max-capture-buffer-size.patch
alsa-hda-fix-mute-control-with-some-alc262-models.patch
hid-hiddev-fix-lock-imbalance.patch
+elf-fix-one-check-after-use.patch
+hwmon-fix-lock-imbalance.patch
+md-avoid-dereferencing-null-pointer-when-accessing-suspend_-sysfs-attributes.patch
+mm-mark-page-accessed-before-we-write_end.patch
+x86-64-fix-bad_srat-to-clear-all-state.patch
+x86-don-t-use-access_ok-as-a-range-check-in-get_user_pages_fast.patch
+sunrpc-avoid-an-unnecessary-task-reschedule-on-enotconn.patch
+sunrpc-ensure-we-set-xprt_closing-only-after-we-ve-sent-a-tcp-fin.patch
+sunrpc-don-t-disconnect-if-a-connection-is-still-in-progress.patch
--- /dev/null
+From 15f081ca8ddfe150fb639c591b18944a539da0fc Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Wed, 11 Mar 2009 14:37:57 -0400
+Subject: SUNRPC: Avoid an unnecessary task reschedule on ENOTCONN
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 15f081ca8ddfe150fb639c591b18944a539da0fc upstream.
+
+If the socket is unconnected, and xprt_transmit() returns ENOTCONN, we
+currently give up the lock on the transport channel. Doing so means that
+the lock automatically gets assigned to the next task in the xprt->sending
+queue, and so that task needs to be woken up to do the actual connect.
+
+The following patch aims to avoid that unnecessary task switch.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/sunrpc/clnt.c | 26 ++++++++++++++++++--------
+ 1 file changed, 18 insertions(+), 8 deletions(-)
+
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1089,14 +1089,24 @@ static void
+ call_transmit_status(struct rpc_task *task)
+ {
+ task->tk_action = call_status;
+- /*
+- * Special case: if we've been waiting on the socket's write_space()
+- * callback, then don't call xprt_end_transmit().
+- */
+- if (task->tk_status == -EAGAIN)
+- return;
+- xprt_end_transmit(task);
+- rpc_task_force_reencode(task);
++ switch (task->tk_status) {
++ case -EAGAIN:
++ break;
++ default:
++ xprt_end_transmit(task);
++ /*
++ * Special cases: if we've been waiting on the
++ * socket's write_space() callback, or if the
++ * socket just returned a connection error,
++ * then hold onto the transport lock.
++ */
++ case -ECONNREFUSED:
++ case -ENOTCONN:
++ case -EHOSTDOWN:
++ case -EHOSTUNREACH:
++ case -ENETUNREACH:
++ rpc_task_force_reencode(task);
++ }
+ }
+
+ /*
--- /dev/null
+From 40d2549db5f515e415894def98b49db7d4c56714 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Wed, 11 Mar 2009 14:37:58 -0400
+Subject: SUNRPC: Don't disconnect if a connection is still in progress.
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 40d2549db5f515e415894def98b49db7d4c56714 upstream.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/sunrpc/xprtsock.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1560,10 +1560,9 @@ out:
+ * We need to preserve the port number so the reply cache on the server can
+ * find our cached RPC replies when we get around to reconnecting.
+ */
+-static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
++static void xs_abort_connection(struct rpc_xprt *xprt, struct sock_xprt *transport)
+ {
+ int result;
+- struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+ struct sockaddr any;
+
+ dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt);
+@@ -1580,6 +1579,17 @@ static void xs_tcp_reuse_connection(stru
+ result);
+ }
+
++static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *transport)
++{
++ unsigned int state = transport->inet->sk_state;
++
++ if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED)
++ return;
++ if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT))
++ return;
++ xs_abort_connection(xprt, transport);
++}
++
+ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
+ {
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+@@ -1650,7 +1660,7 @@ static void xs_tcp_connect_worker4(struc
+ }
+ } else
+ /* "close" the socket, preserving the local port */
+- xs_tcp_reuse_connection(xprt);
++ xs_tcp_reuse_connection(xprt, transport);
+
+ dprintk("RPC: worker connecting xprt %p to address: %s\n",
+ xprt, xprt->address_strings[RPC_DISPLAY_ALL]);
+@@ -1710,7 +1720,7 @@ static void xs_tcp_connect_worker6(struc
+ }
+ } else
+ /* "close" the socket, preserving the local port */
+- xs_tcp_reuse_connection(xprt);
++ xs_tcp_reuse_connection(xprt, transport);
+
+ dprintk("RPC: worker connecting xprt %p to address: %s\n",
+ xprt, xprt->address_strings[RPC_DISPLAY_ALL]);
--- /dev/null
+From 670f94573104b4a25525d3fcdcd6496c678df172 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Wed, 11 Mar 2009 14:37:58 -0400
+Subject: SUNRPC: Ensure we set XPRT_CLOSING only after we've sent a tcp FIN...
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 670f94573104b4a25525d3fcdcd6496c678df172 upstream.
+
+...so that we can distinguish between when we need to shutdown and when we
+don't. Also remove the call to xs_tcp_shutdown() from xs_tcp_connect(),
+since xprt_connect() makes the same test.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/sunrpc/xprtsock.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1150,7 +1150,6 @@ static void xs_tcp_state_change(struct s
+ break;
+ case TCP_CLOSE_WAIT:
+ /* The server initiated a shutdown of the socket */
+- set_bit(XPRT_CLOSING, &xprt->state);
+ xprt_force_disconnect(xprt);
+ case TCP_SYN_SENT:
+ xprt->connect_cookie++;
+@@ -1163,6 +1162,7 @@ static void xs_tcp_state_change(struct s
+ xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+ break;
+ case TCP_LAST_ACK:
++ set_bit(XPRT_CLOSING, &xprt->state);
+ smp_mb__before_clear_bit();
+ clear_bit(XPRT_CONNECTED, &xprt->state);
+ smp_mb__after_clear_bit();
+@@ -1780,9 +1780,6 @@ static void xs_tcp_connect(struct rpc_ta
+ {
+ struct rpc_xprt *xprt = task->tk_xprt;
+
+- /* Initiate graceful shutdown of the socket if not already done */
+- if (test_bit(XPRT_CONNECTED, &xprt->state))
+- xs_tcp_shutdown(xprt);
+ /* Exit if we need to wait for socket shutdown to complete */
+ if (test_bit(XPRT_CLOSING, &xprt->state))
+ return;
--- /dev/null
+From 429b2b319af3987e808c18f6b81313104caf782c Mon Sep 17 00:00:00 2001
+From: Andi Kleen <andi@firstfloor.org>
+Date: Sat, 18 Jul 2009 08:56:57 +0200
+Subject: x86-64: Fix bad_srat() to clear all state
+
+From: Andi Kleen <andi@firstfloor.org>
+
+commit 429b2b319af3987e808c18f6b81313104caf782c upstream.
+
+Need to clear both nodes and nodes_add state for start/end.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+LKML-Reference: <20090718065657.GA2898@basil.fritz.box>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/mm/srat_64.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/mm/srat_64.c
++++ b/arch/x86/mm/srat_64.c
+@@ -87,8 +87,10 @@ static __init void bad_srat(void)
+ found_add_area = 0;
+ for (i = 0; i < MAX_LOCAL_APIC; i++)
+ apicid_to_node[i] = NUMA_NO_NODE;
+- for (i = 0; i < MAX_NUMNODES; i++)
+- nodes_add[i].start = nodes[i].end = 0;
++ for (i = 0; i < MAX_NUMNODES; i++) {
++ nodes[i].start = nodes[i].end = 0;
++ nodes_add[i].start = nodes_add[i].end = 0;
++ }
+ remove_all_active_ranges();
+ }
+
--- /dev/null
+From torvalds@linux-foundation.org Tue Jul 28 11:13:51 2009
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Mon, 22 Jun 2009 10:25:25 -0700 (PDT)
+Subject: x86: don't use 'access_ok()' as a range check in get_user_pages_fast()
+To: Greg KH <gregkh@suse.de>
+Cc: Ingo Molnar <mingo@elte.hu>, Andrew Morton <akpm@linux-foundation.org>, Hugh Dickins <hugh.dickins@tiscali.co.uk>, Chris Wright <chrisw@sous-sol.org>, Nick Piggin <npiggin@suse.de>, "H. Peter Anvin" <hpa@zytor.com>, Thomas Gleixner <tglx@linutronix.de>, Alan Cox <alan@lxorguk.ukuu.org.uk>, Peter Zijlstra <a.p.zijlstra@chello.nl>
+Message-ID: <alpine.LFD.2.01.0906221024140.3240@localhost.localdomain>
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+[ Upstream commit 7f8189068726492950bf1a2dcfd9b51314560abf - modified
+ for stable to not use the sloppy __VIRTUAL_MASK_SHIFT ]
+
+It's really not right to use 'access_ok()', since that is meant for the
+normal "get_user()" and "copy_from/to_user()" accesses, which are done
+through the TLB, rather than through the page tables.
+
+Why? access_ok() does both too few, and too many checks. Too many,
+because it is meant for regular kernel accesses that will not honor the
+'user' bit in the page tables, and because it honors the USER_DS vs
+KERNEL_DS distinction that we shouldn't care about in GUP. And too few,
+because it doesn't do the 'canonical' check on the address on x86-64,
+since the TLB will do that for us.
+
+So instead of using a function that isn't meant for this, and does
+something else and much more complicated, just do the real rules: we
+don't want the range to overflow, and on x86-64, we want it to be a
+canonical low address (on 32-bit, all addresses are canonical).
+
+Acked-by: Ingo Molnar <mingo@elte.hu>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/mm/gup.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -231,10 +231,15 @@ int get_user_pages_fast(unsigned long st
+ start &= PAGE_MASK;
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
++
+ end = start + len;
+- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+- start, len)))
++ if (end < start)
++ goto slow_irqon;
++
++#ifdef CONFIG_X86_64
++ if (end >> 47)
+ goto slow_irqon;
++#endif
+
+ /*
+ * XXX: batch / limit 'nr', to avoid large irq off latency