target-tcm_fc-fix-the-lockdep-warning-due-to-inconsistent-lock-state.patch
mfd-only-unregister-platform-devices-allocated-by-the-mfd-core.patch
ext4-fix-memory-leak-in-ext4_xattr_set_acl-s-error-path.patch
+sunrpc-ensure-that-we-free-the-rpc_task-after-cleanups-are-done.patch
+sunrpc-ensure-we-release-the-socket-write-lock-if-the-rpc_task-exits-early.patch
--- /dev/null
+From c6567ed1402c55e19b012e66a8398baec2a726f3 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Fri, 4 Jan 2013 12:23:21 -0500
+Subject: SUNRPC: Ensure that we free the rpc_task after cleanups are done
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit c6567ed1402c55e19b012e66a8398baec2a726f3 upstream.
+
+This patch ensures that we free the rpc_task after the cleanup callbacks
+are done in order to avoid a deadlock problem that can be triggered if
+the callback needs to wait for another workqueue item to complete.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Cc: Weston Andros Adamson <dros@netapp.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Bruce Fields <bfields@fieldses.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/sched.c | 27 +++++++++++++++++++++++----
+ 1 file changed, 23 insertions(+), 4 deletions(-)
+
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -844,16 +844,35 @@ struct rpc_task *rpc_new_task(const stru
+ return task;
+ }
+
++/*
++ * rpc_free_task - release rpc task and perform cleanups
++ *
++ * Note that we free up the rpc_task _after_ rpc_release_calldata()
++ * in order to work around a workqueue dependency issue.
++ *
++ * Tejun Heo states:
++ * "Workqueue currently considers two work items to be the same if they're
++ * on the same address and won't execute them concurrently - ie. it
++ * makes a work item which is queued again while being executed wait
++ * for the previous execution to complete.
++ *
++ * If a work function frees the work item, and then waits for an event
++ * which should be performed by another work item and *that* work item
++ * recycles the freed work item, it can create a false dependency loop.
++ * There really is no reliable way to detect this short of verifying
++ * every memory free."
++ *
++ */
+ static void rpc_free_task(struct rpc_task *task)
+ {
+- const struct rpc_call_ops *tk_ops = task->tk_ops;
+- void *calldata = task->tk_calldata;
++ unsigned short tk_flags = task->tk_flags;
+
+- if (task->tk_flags & RPC_TASK_DYNAMIC) {
++ rpc_release_calldata(task->tk_ops, task->tk_calldata);
++
++ if (tk_flags & RPC_TASK_DYNAMIC) {
+ dprintk("RPC: %5u freeing task\n", task->tk_pid);
+ mempool_free(task, rpc_task_mempool);
+ }
+- rpc_release_calldata(tk_ops, calldata);
+ }
+
+ static void rpc_async_release(struct work_struct *work)
--- /dev/null
+From 87ed50036b866db2ec2ba16b2a7aec4a2b0b7c39 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Mon, 7 Jan 2013 14:30:46 -0500
+Subject: SUNRPC: Ensure we release the socket write lock if the rpc_task exits early
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 87ed50036b866db2ec2ba16b2a7aec4a2b0b7c39 upstream.
+
+If the rpc_task exits while holding the socket write lock before it has
+allocated an rpc slot, then the usual mechanism for releasing the write
+lock in xprt_release() is defeated.
+
+The problem occurs if the call to xprt_lock_write() initially fails, so
+that the rpc_task is put on the xprt->sending wait queue. If the task
+exits after being assigned the lock by __xprt_lock_write_func, but
+before it has retried the call to xprt_lock_and_alloc_slot(), then
+it calls xprt_release() while holding the write lock, but will
+immediately exit due to the test for task->tk_rqstp != NULL.
+
+Reported-by: Chris Perl <chris.perl@gmail.com>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/sched.c | 3 +--
+ net/sunrpc/xprt.c | 12 ++++++++++--
+ 2 files changed, 11 insertions(+), 4 deletions(-)
+
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -882,8 +882,7 @@ static void rpc_async_release(struct wor
+
+ static void rpc_release_resources_task(struct rpc_task *task)
+ {
+- if (task->tk_rqstp)
+- xprt_release(task);
++ xprt_release(task);
+ if (task->tk_msg.rpc_cred) {
+ put_rpccred(task->tk_msg.rpc_cred);
+ task->tk_msg.rpc_cred = NULL;
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1040,10 +1040,18 @@ static void xprt_request_init(struct rpc
+ void xprt_release(struct rpc_task *task)
+ {
+ struct rpc_xprt *xprt;
+- struct rpc_rqst *req;
++ struct rpc_rqst *req = task->tk_rqstp;
+
+- if (!(req = task->tk_rqstp))
++ if (req == NULL) {
++ if (task->tk_client) {
++ rcu_read_lock();
++ xprt = rcu_dereference(task->tk_client->cl_xprt);
++ if (xprt->snd_task == task)
++ xprt_release_write(xprt, task);
++ rcu_read_unlock();
++ }
+ return;
++ }
+
+ xprt = req->rq_xprt;
+ rpc_count_iostats(task);