--- /dev/null
+From 1daaae8fa4afe3df78ca34e724ed7e8187e4eb32 Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@redhat.com>
+Date: Wed, 21 Mar 2012 06:30:40 -0400
+Subject: cifs: fix issue mounting of DFS ROOT when redirecting from one domain controller to the next
+
+From: Jeff Layton <jlayton@redhat.com>
+
+commit 1daaae8fa4afe3df78ca34e724ed7e8187e4eb32 upstream.
+
+This patch fixes an issue when cifs_mount receives a
+STATUS_BAD_NETWORK_NAME error during cifs_get_tcon but is able to
+continue after an DFS ROOT referral. In this case, the return code
+variable is not reset prior to trying to mount from the system referred
+to. Thus, is_path_accessible is not executed and the final DFS referral
+is not performed causing a mount error.
+
+Use case: In DNS, example.com resolves to the secondary AD server
+ad2.example.com Our primary domain controller is ad1.example.com and has
+a DFS redirection set up from \\ad1\share\Users to \\files\share\Users.
+Mounting \\example.com\share\Users fails.
+
+Regression introduced by commit 724d9f1.
+
+Reviewed-by: Pavel Shilovsky <piastry@etersoft.ru
+Signed-off-by: Thomas Hadig <thomas@intapp.com>
+Signed-off-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/connect.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3004,7 +3004,7 @@ cifs_get_volume_info(char *mount_data, c
+ int
+ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
+ {
+- int rc = 0;
++ int rc;
+ int xid;
+ struct cifs_ses *pSesInfo;
+ struct cifs_tcon *tcon;
+@@ -3033,6 +3033,7 @@ try_mount_again:
+ FreeXid(xid);
+ }
+ #endif
++ rc = 0;
+ tcon = NULL;
+ pSesInfo = NULL;
+ srvTcp = NULL;
--- /dev/null
+From a1d0b5eebc4fd6e0edb02688b35f17f67f42aea5 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Mon, 5 Mar 2012 19:56:44 -0500
+Subject: NFS: Properly handle the case where the delegation is revoked
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit a1d0b5eebc4fd6e0edb02688b35f17f67f42aea5 upstream.
+
+If we know that the delegation stateid is bad or revoked, we need to
+remove that delegation as soon as possible, and then mark all the
+stateids that relied on that delegation for recovery. We cannot use
+the delegation as part of the recovery process.
+
+Also note that NFSv4.1 uses a different error code (NFS4ERR_DELEG_REVOKED)
+to indicate that the delegation was revoked.
+
+Finally, ensure that setlk() and setattr() can both recover safely from
+a revoked delegation.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/delegation.c | 11 +++++++++++
+ fs/nfs/delegation.h | 1 +
+ fs/nfs/nfs4_fs.h | 2 ++
+ fs/nfs/nfs4proc.c | 18 ++++++++++++++++--
+ fs/nfs/nfs4state.c | 29 +++++++++++++++++++++++++++--
+ 5 files changed, 57 insertions(+), 4 deletions(-)
+
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -466,6 +466,17 @@ static void nfs_delegation_run_state_man
+ nfs4_schedule_state_manager(clp);
+ }
+
++void nfs_remove_bad_delegation(struct inode *inode)
++{
++ struct nfs_delegation *delegation;
++
++ delegation = nfs_detach_delegation(NFS_I(inode), NFS_SERVER(inode));
++ if (delegation) {
++ nfs_inode_find_state_and_recover(inode, &delegation->stateid);
++ nfs_free_delegation(delegation);
++ }
++}
++
+ /**
+ * nfs_expire_all_delegation_types
+ * @clp: client to process
+--- a/fs/nfs/delegation.h
++++ b/fs/nfs/delegation.h
+@@ -45,6 +45,7 @@ void nfs_expire_unreferenced_delegations
+ void nfs_handle_cb_pathdown(struct nfs_client *clp);
+ int nfs_client_return_marked_delegations(struct nfs_client *clp);
+ int nfs_delegations_present(struct nfs_client *clp);
++void nfs_remove_bad_delegation(struct inode *inode);
+
+ void nfs_delegation_mark_reclaim(struct nfs_client *clp);
+ void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -344,6 +344,8 @@ extern void nfs4_put_open_state(struct n
+ extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t);
+ extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t);
+ extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
++extern void nfs_inode_find_state_and_recover(struct inode *inode,
++ const nfs4_stateid *stateid);
+ extern void nfs4_schedule_lease_recovery(struct nfs_client *);
+ extern void nfs4_schedule_state_manager(struct nfs_client *);
+ extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -260,8 +260,11 @@ static int nfs4_handle_exception(struct
+ switch(errorcode) {
+ case 0:
+ return 0;
++ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
++ if (state != NULL)
++ nfs_remove_bad_delegation(state->inode);
+ case -NFS4ERR_OPENMODE:
+ if (state == NULL)
+ break;
+@@ -1305,8 +1308,11 @@ int nfs4_open_delegation_recall(struct n
+ * The show must go on: exit, but mark the
+ * stateid as needing recovery.
+ */
++ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
++ nfs_inode_find_state_and_recover(state->inode,
++ stateid);
+ nfs4_schedule_stateid_recovery(server, state);
+ case -EKEYEXPIRED:
+ /*
+@@ -1862,7 +1868,9 @@ static int nfs4_do_setattr(struct inode
+ struct nfs4_state *state)
+ {
+ struct nfs_server *server = NFS_SERVER(inode);
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {
++ .state = state,
++ };
+ int err;
+ do {
+ err = nfs4_handle_exception(server,
+@@ -3678,8 +3686,11 @@ nfs4_async_handle_error(struct rpc_task
+ if (task->tk_status >= 0)
+ return 0;
+ switch(task->tk_status) {
++ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
++ if (state != NULL)
++ nfs_remove_bad_delegation(state->inode);
+ case -NFS4ERR_OPENMODE:
+ if (state == NULL)
+ break;
+@@ -4484,7 +4495,9 @@ out:
+
+ static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {
++ .state = state,
++ };
+ int err;
+
+ do {
+@@ -4577,6 +4590,7 @@ int nfs4_lock_delegation_recall(struct n
+ * The show must go on: exit, but mark the
+ * stateid as needing recovery.
+ */
++ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OPENMODE:
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1065,12 +1065,37 @@ void nfs4_schedule_stateid_recovery(cons
+ {
+ struct nfs_client *clp = server->nfs_client;
+
+- if (test_and_clear_bit(NFS_DELEGATED_STATE, &state->flags))
+- nfs_async_inode_return_delegation(state->inode, &state->stateid);
+ nfs4_state_mark_reclaim_nograce(clp, state);
+ nfs4_schedule_state_manager(clp);
+ }
+
++void nfs_inode_find_state_and_recover(struct inode *inode,
++ const nfs4_stateid *stateid)
++{
++ struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
++ struct nfs_inode *nfsi = NFS_I(inode);
++ struct nfs_open_context *ctx;
++ struct nfs4_state *state;
++ bool found = false;
++
++ spin_lock(&inode->i_lock);
++ list_for_each_entry(ctx, &nfsi->open_files, list) {
++ state = ctx->state;
++ if (state == NULL)
++ continue;
++ if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
++ continue;
++ if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0)
++ continue;
++ nfs4_state_mark_reclaim_nograce(clp, state);
++ found = true;
++ }
++ spin_unlock(&inode->i_lock);
++ if (found)
++ nfs4_schedule_state_manager(clp);
++}
++
++
+ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
+ {
+ struct inode *inode = state->inode;
--- /dev/null
+From 3114ea7a24d3264c090556a2444fc6d2c06176d4 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Wed, 7 Mar 2012 16:39:06 -0500
+Subject: NFSv4: Return the delegation if the server returns NFS4ERR_OPENMODE
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 3114ea7a24d3264c090556a2444fc6d2c06176d4 upstream.
+
+If a setattr() fails because of an NFS4ERR_OPENMODE error, it is
+probably due to us holding a read delegation. Ensure that the
+recovery routines return that delegation in this case.
+
+Reported-by: Miklos Szeredi <miklos@szeredi.hu>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4_fs.h | 1 +
+ fs/nfs/nfs4proc.c | 13 ++++++++++++-
+ 2 files changed, 13 insertions(+), 1 deletion(-)
+
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -209,6 +209,7 @@ struct nfs4_exception {
+ long timeout;
+ int retry;
+ struct nfs4_state *state;
++ struct inode *inode;
+ };
+
+ struct nfs4_state_recovery_ops {
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -254,18 +254,28 @@ static int nfs4_handle_exception(struct
+ {
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_state *state = exception->state;
++ struct inode *inode = exception->inode;
+ int ret = errorcode;
+
+ exception->retry = 0;
+ switch(errorcode) {
+ case 0:
+ return 0;
++ case -NFS4ERR_OPENMODE:
++ if (nfs_have_delegation(inode, FMODE_READ)) {
++ nfs_inode_return_delegation(inode);
++ exception->retry = 1;
++ return 0;
++ }
++ if (state == NULL)
++ break;
++ nfs4_schedule_stateid_recovery(server, state);
++ goto wait_on_recovery;
+ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ if (state != NULL)
+ nfs_remove_bad_delegation(state->inode);
+- case -NFS4ERR_OPENMODE:
+ if (state == NULL)
+ break;
+ nfs4_schedule_stateid_recovery(server, state);
+@@ -1870,6 +1880,7 @@ static int nfs4_do_setattr(struct inode
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs4_exception exception = {
+ .state = state,
++ .inode = inode,
+ };
+ int err;
+ do {
video-uvesafb-fix-oops-that-uvesafb-try-to-execute-nx-protected-page.patch
kvm-x86-extend-struct-x86_emulate_ops-with-get_cpuid.patch
kvm-x86-fix-missing-checks-in-syscall-emulation.patch
+nfs-properly-handle-the-case-where-the-delegation-is-revoked.patch
+nfsv4-return-the-delegation-if-the-server-returns-nfs4err_openmode.patch
+xfs-fix-inode-lookup-race.patch
+cifs-fix-issue-mounting-of-dfs-root-when-redirecting-from-one-domain-controller-to-the-next.patch
+ubi-fix-error-handling-in-ubi_scan.patch
+ubi-fix-eraseblock-picking-criteria.patch
+sunrpc-we-must-not-use-list_for_each_entry_safe-in-rpc_wake_up.patch
+usbnet-increase-urb-reference-count-before-usb_unlink_urb.patch
+usbnet-don-t-clear-urb-dev-in-tx_complete.patch
--- /dev/null
+From 540a0f7584169651f485e8ab67461fcb06934e38 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Mon, 19 Mar 2012 13:39:35 -0400
+Subject: SUNRPC: We must not use list_for_each_entry_safe() in rpc_wake_up()
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 540a0f7584169651f485e8ab67461fcb06934e38 upstream.
+
+The problem is that for the case of priority queues, we
+have to assume that __rpc_remove_wait_queue_priority will move new
+elements from the tk_wait.links lists into the queue->tasks[] list.
+We therefore cannot use list_for_each_entry_safe() on queue->tasks[],
+since that will skip these new tasks that __rpc_remove_wait_queue_priority
+is adding.
+
+Without this fix, rpc_wake_up and rpc_wake_up_status will both fail
+to wake up all functions on priority wait queues, which can result
+in some nasty hangs.
+
+Reported-by: Andy Adamson <andros@netapp.com>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/sched.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -480,14 +480,18 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_next);
+ */
+ void rpc_wake_up(struct rpc_wait_queue *queue)
+ {
+- struct rpc_task *task, *next;
+ struct list_head *head;
+
+ spin_lock_bh(&queue->lock);
+ head = &queue->tasks[queue->maxpriority];
+ for (;;) {
+- list_for_each_entry_safe(task, next, head, u.tk_wait.list)
++ while (!list_empty(head)) {
++ struct rpc_task *task;
++ task = list_first_entry(head,
++ struct rpc_task,
++ u.tk_wait.list);
+ rpc_wake_up_task_queue_locked(queue, task);
++ }
+ if (head == &queue->tasks[0])
+ break;
+ head--;
+@@ -505,13 +509,16 @@ EXPORT_SYMBOL_GPL(rpc_wake_up);
+ */
+ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
+ {
+- struct rpc_task *task, *next;
+ struct list_head *head;
+
+ spin_lock_bh(&queue->lock);
+ head = &queue->tasks[queue->maxpriority];
+ for (;;) {
+- list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
++ while (!list_empty(head)) {
++ struct rpc_task *task;
++ task = list_first_entry(head,
++ struct rpc_task,
++ u.tk_wait.list);
+ task->tk_status = status;
+ rpc_wake_up_task_queue_locked(queue, task);
+ }
--- /dev/null
+From 7eb3aa65853e1b223bfc786b023b702018cb76c0 Mon Sep 17 00:00:00 2001
+From: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
+Date: Wed, 7 Mar 2012 19:08:36 +0200
+Subject: UBI: fix eraseblock picking criteria
+
+From: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
+
+commit 7eb3aa65853e1b223bfc786b023b702018cb76c0 upstream.
+
+The 'find_wl_entry()' function expects the maximum difference as the second
+argument, not the maximum absolute value. So the "unknown" eraseblock picking
+was incorrect, as Shmulik Ladkani spotted. This patch fixes the issue.
+
+Reported-by: Shmulik Ladkani <shmulik.ladkani@gmail.com>
+Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
+Reviewed-by: Shmulik Ladkani <shmulik.ladkani@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/ubi/wl.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -386,7 +386,7 @@ static struct ubi_wl_entry *find_wl_entr
+ */
+ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
+ {
+- int err, medium_ec;
++ int err;
+ struct ubi_wl_entry *e, *first, *last;
+
+ ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
+@@ -424,7 +424,7 @@ retry:
+ * For unknown data we pick a physical eraseblock with medium
+ * erase counter. But we by no means can pick a physical
+ * eraseblock with erase counter greater or equivalent than the
+- * lowest erase counter plus %WL_FREE_MAX_DIFF.
++ * lowest erase counter plus %WL_FREE_MAX_DIFF/2.
+ */
+ first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
+ u.rb);
+@@ -433,10 +433,8 @@ retry:
+ if (last->ec - first->ec < WL_FREE_MAX_DIFF)
+ e = rb_entry(ubi->free.rb_node,
+ struct ubi_wl_entry, u.rb);
+- else {
+- medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
+- e = find_wl_entry(&ubi->free, medium_ec);
+- }
++ else
++ e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2);
+ break;
+ case UBI_SHORTTERM:
+ /*
--- /dev/null
+From a29852be492d61001d86c6ebf5fff9b93d7b4be9 Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Mon, 30 Jan 2012 18:20:13 +0100
+Subject: UBI: fix error handling in ubi_scan()
+
+From: Richard Weinberger <richard@nod.at>
+
+commit a29852be492d61001d86c6ebf5fff9b93d7b4be9 upstream.
+
+Two bad things can happen in ubi_scan():
+1. If kmem_cache_create() fails we jump to out_si and call
+ ubi_scan_destroy_si() which calls kmem_cache_destroy().
+ But si->scan_leb_slab is NULL.
+2. If process_eb() fails we jump to out_vidh, call
+ kmem_cache_destroy() and ubi_scan_destroy_si() which calls
+ again kmem_cache_destroy().
+
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/ubi/scan.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/mtd/ubi/scan.c
++++ b/drivers/mtd/ubi/scan.c
+@@ -1174,7 +1174,7 @@ struct ubi_scan_info *ubi_scan(struct ub
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+- goto out_slab;
++ goto out_si;
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+@@ -1235,8 +1235,6 @@ out_vidh:
+ ubi_free_vid_hdr(ubi, vidh);
+ out_ech:
+ kfree(ech);
+-out_slab:
+- kmem_cache_destroy(si->scan_leb_slab);
+ out_si:
+ ubi_scan_destroy_si(si);
+ return ERR_PTR(err);
+@@ -1325,7 +1323,9 @@ void ubi_scan_destroy_si(struct ubi_scan
+ }
+ }
+
+- kmem_cache_destroy(si->scan_leb_slab);
++ if (si->scan_leb_slab)
++ kmem_cache_destroy(si->scan_leb_slab);
++
+ kfree(si);
+ }
+
--- /dev/null
+From 5d5440a835710d09f0ef18da5000541ec98b537a Mon Sep 17 00:00:00 2001
+From: "tom.leiming@gmail.com" <tom.leiming@gmail.com>
+Date: Thu, 22 Mar 2012 03:22:38 +0000
+Subject: usbnet: don't clear urb->dev in tx_complete
+
+From: "tom.leiming@gmail.com" <tom.leiming@gmail.com>
+
+commit 5d5440a835710d09f0ef18da5000541ec98b537a upstream.
+
+URB unlinking is always racing with its completion and tx_complete
+may be called before or during running usb_unlink_urb, so tx_complete
+must not clear urb->dev since it will be used in unlink path,
+otherwise invalid memory accesses or usb device leak may be caused
+inside usb_unlink_urb.
+
+Cc: Alan Stern <stern@rowland.harvard.edu>
+Cc: Oliver Neukum <oliver@neukum.org>
+Signed-off-by: Ming Lei <tom.leiming@gmail.com>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/usb/usbnet.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -1033,7 +1033,6 @@ static void tx_complete (struct urb *urb
+ }
+
+ usb_autopm_put_interface_async(dev->intf);
+- urb->dev = NULL;
+ entry->state = tx_done;
+ defer_bh(dev, skb, &dev->txq);
+ }
--- /dev/null
+From 0956a8c20b23d429e79ff86d4325583fc06f9eb4 Mon Sep 17 00:00:00 2001
+From: "tom.leiming@gmail.com" <tom.leiming@gmail.com>
+Date: Thu, 22 Mar 2012 03:22:18 +0000
+Subject: usbnet: increase URB reference count before usb_unlink_urb
+
+From: "tom.leiming@gmail.com" <tom.leiming@gmail.com>
+
+commit 0956a8c20b23d429e79ff86d4325583fc06f9eb4 upstream.
+
+Commit 4231d47e6fe69f061f96c98c30eaf9fb4c14b96d(net/usbnet: avoid
+recursive locking in usbnet_stop()) fixes the recursive locking
+problem by releasing the skb queue lock, but it makes usb_unlink_urb
+racing with defer_bh, and the URB to being unlinked may be freed before
+or during calling usb_unlink_urb, so use-after-free problem may be
+triggerd inside usb_unlink_urb.
+
+The patch fixes the use-after-free problem by increasing URB
+reference count with skb queue lock held before calling
+usb_unlink_urb, so the URB won't be freed until return from
+usb_unlink_urb.
+
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Alan Stern <stern@rowland.harvard.edu>
+Cc: Oliver Neukum <oliver@neukum.org>
+Reported-by: Dave Jones <davej@redhat.com>
+Signed-off-by: Ming Lei <tom.leiming@gmail.com>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/usb/usbnet.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -585,6 +585,14 @@ static int unlink_urbs (struct usbnet *d
+ entry = (struct skb_data *) skb->cb;
+ urb = entry->urb;
+
++ /*
++ * Get reference count of the URB to avoid it to be
++ * freed during usb_unlink_urb, which may trigger
++ * use-after-free problem inside usb_unlink_urb since
++ * usb_unlink_urb is always racing with .complete
++ * handler(include defer_bh).
++ */
++ usb_get_urb(urb);
+ spin_unlock_irqrestore(&q->lock, flags);
+ // during some PM-driven resume scenarios,
+ // these (async) unlinks complete immediately
+@@ -593,6 +601,7 @@ static int unlink_urbs (struct usbnet *d
+ netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
+ else
+ count++;
++ usb_put_urb(urb);
+ spin_lock_irqsave(&q->lock, flags);
+ }
+ spin_unlock_irqrestore (&q->lock, flags);
--- /dev/null
+From f30d500f809eca67a21704347ab14bb35877b5ee Mon Sep 17 00:00:00 2001
+From: Dave Chinner <dchinner@redhat.com>
+Date: Wed, 7 Mar 2012 04:50:25 +0000
+Subject: xfs: fix inode lookup race
+
+From: Dave Chinner <dchinner@redhat.com>
+
+commit f30d500f809eca67a21704347ab14bb35877b5ee upstream.
+
+When we get concurrent lookups of the same inode that is not in the
+per-AG inode cache, there is a race condition that triggers warnings
+in unlock_new_inode() indicating that we are initialising an inode
+that isn't in a the correct state for a new inode.
+
+When we do an inode lookup via a file handle or a bulkstat, we don't
+serialise lookups at a higher level through the dentry cache (i.e.
+pathless lookup), and so we can get concurrent lookups of the same
+inode.
+
+The race condition is between the insertion of the inode into the
+cache in the case of a cache miss and a concurrently lookup:
+
+Thread 1 Thread 2
+xfs_iget()
+ xfs_iget_cache_miss()
+ xfs_iread()
+ lock radix tree
+ radix_tree_insert()
+ rcu_read_lock
+ radix_tree_lookup
+ lock inode flags
+ XFS_INEW not set
+ igrab()
+ unlock inode flags
+ rcu_read_unlock
+ use uninitialised inode
+ .....
+ lock inode flags
+ set XFS_INEW
+ unlock inode flags
+ unlock radix tree
+ xfs_setup_inode()
+ inode flags = I_NEW
+ unlock_new_inode()
+ WARNING as inode flags != I_NEW
+
+This can lead to inode corruption, inode list corruption, etc, and
+is generally a bad thing to occur.
+
+Fix this by setting XFS_INEW before inserting the inode into the
+radix tree. This will ensure any concurrent lookup will find the new
+inode with XFS_INEW set and that forces the lookup to wait until the
+XFS_INEW flag is removed before allowing the lookup to succeed.
+
+Signed-off-by: Dave Chinner <dchinner@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Ben Myers <bpm@sgi.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/xfs/xfs_iget.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+--- a/fs/xfs/xfs_iget.c
++++ b/fs/xfs/xfs_iget.c
+@@ -356,9 +356,20 @@ xfs_iget_cache_miss(
+ BUG();
+ }
+
+- spin_lock(&pag->pag_ici_lock);
++ /*
++ * These values must be set before inserting the inode into the radix
++ * tree as the moment it is inserted a concurrent lookup (allowed by the
++ * RCU locking mechanism) can find it and that lookup must see that this
++ * is an inode currently under construction (i.e. that XFS_INEW is set).
++ * The ip->i_flags_lock that protects the XFS_INEW flag forms the
++ * memory barrier that ensures this detection works correctly at lookup
++ * time.
++ */
++ ip->i_udquot = ip->i_gdquot = NULL;
++ xfs_iflags_set(ip, XFS_INEW);
+
+ /* insert the new inode */
++ spin_lock(&pag->pag_ici_lock);
+ error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
+ if (unlikely(error)) {
+ WARN_ON(error != -EEXIST);
+@@ -366,11 +377,6 @@ xfs_iget_cache_miss(
+ error = EAGAIN;
+ goto out_preload_end;
+ }
+-
+- /* These values _must_ be set before releasing the radix tree lock! */
+- ip->i_udquot = ip->i_gdquot = NULL;
+- xfs_iflags_set(ip, XFS_INEW);
+-
+ spin_unlock(&pag->pag_ici_lock);
+ radix_tree_preload_end();
+