]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.2-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 23 Mar 2012 17:58:27 +0000 (10:58 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 23 Mar 2012 17:58:27 +0000 (10:58 -0700)
added patches:
cifs-fix-a-spurious-error-in-cifs_push_posix_locks.patch
cifs-fix-issue-mounting-of-dfs-root-when-redirecting-from-one-domain-controller-to-the-next.patch
cifs-respect-negotiated-maxmpxcount.patch
nfs-properly-handle-the-case-where-the-delegation-is-revoked.patch
nfsv4-return-the-delegation-if-the-server-returns-nfs4err_openmode.patch
sunrpc-we-must-not-use-list_for_each_entry_safe-in-rpc_wake_up.patch
ubi-fix-eraseblock-picking-criteria.patch
ubi-fix-error-handling-in-ubi_scan.patch
usbnet-don-t-clear-urb-dev-in-tx_complete.patch
usbnet-increase-urb-reference-count-before-usb_unlink_urb.patch
xfs-fix-inode-lookup-race.patch

12 files changed:
queue-3.2/cifs-fix-a-spurious-error-in-cifs_push_posix_locks.patch [new file with mode: 0644]
queue-3.2/cifs-fix-issue-mounting-of-dfs-root-when-redirecting-from-one-domain-controller-to-the-next.patch [new file with mode: 0644]
queue-3.2/cifs-respect-negotiated-maxmpxcount.patch [new file with mode: 0644]
queue-3.2/nfs-properly-handle-the-case-where-the-delegation-is-revoked.patch [new file with mode: 0644]
queue-3.2/nfsv4-return-the-delegation-if-the-server-returns-nfs4err_openmode.patch [new file with mode: 0644]
queue-3.2/series
queue-3.2/sunrpc-we-must-not-use-list_for_each_entry_safe-in-rpc_wake_up.patch [new file with mode: 0644]
queue-3.2/ubi-fix-eraseblock-picking-criteria.patch [new file with mode: 0644]
queue-3.2/ubi-fix-error-handling-in-ubi_scan.patch [new file with mode: 0644]
queue-3.2/usbnet-don-t-clear-urb-dev-in-tx_complete.patch [new file with mode: 0644]
queue-3.2/usbnet-increase-urb-reference-count-before-usb_unlink_urb.patch [new file with mode: 0644]
queue-3.2/xfs-fix-inode-lookup-race.patch [new file with mode: 0644]

diff --git a/queue-3.2/cifs-fix-a-spurious-error-in-cifs_push_posix_locks.patch b/queue-3.2/cifs-fix-a-spurious-error-in-cifs_push_posix_locks.patch
new file mode 100644 (file)
index 0000000..5f92c96
--- /dev/null
@@ -0,0 +1,68 @@
+From ce85852b90a214cf577fc1b4f49d99fd7e98784a Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <piastry@etersoft.ru>
+Date: Sat, 17 Mar 2012 09:46:55 +0300
+Subject: CIFS: Fix a spurious error in cifs_push_posix_locks
+
+From: Pavel Shilovsky <piastry@etersoft.ru>
+
+commit ce85852b90a214cf577fc1b4f49d99fd7e98784a upstream.
+
+Signed-off-by: Pavel Shilovsky <piastry@etersoft.ru>
+Reviewed-by: Jeff Layton <jlayton@redhat.com>
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Steve French <stevef@smf-gateway.(none)>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/file.c |   19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -960,9 +960,9 @@ cifs_push_posix_locks(struct cifsFileInf
+       INIT_LIST_HEAD(&locks_to_send);
+       /*
+-       * Allocating count locks is enough because no locks can be added to
+-       * the list while we are holding cinode->lock_mutex that protects
+-       * locking operations of this inode.
++       * Allocating count locks is enough because no FL_POSIX locks can be
++       * added to the list while we are holding cinode->lock_mutex that
++       * protects locking operations of this inode.
+        */
+       for (; i < count; i++) {
+               lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
+@@ -973,18 +973,20 @@ cifs_push_posix_locks(struct cifsFileInf
+               list_add_tail(&lck->llist, &locks_to_send);
+       }
+-      i = 0;
+       el = locks_to_send.next;
+       lock_flocks();
+       cifs_for_each_lock(cfile->dentry->d_inode, before) {
++              flock = *before;
++              if ((flock->fl_flags & FL_POSIX) == 0)
++                      continue;
+               if (el == &locks_to_send) {
+-                      /* something is really wrong */
++                      /*
++                       * The list ended. We don't have enough allocated
++                       * structures - something is really wrong.
++                       */
+                       cERROR(1, "Can't push all brlocks!");
+                       break;
+               }
+-              flock = *before;
+-              if ((flock->fl_flags & FL_POSIX) == 0)
+-                      continue;
+               length = 1 + flock->fl_end - flock->fl_start;
+               if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
+                       type = CIFS_RDLCK;
+@@ -996,7 +998,6 @@ cifs_push_posix_locks(struct cifsFileInf
+               lck->length = length;
+               lck->type = type;
+               lck->offset = flock->fl_start;
+-              i++;
+               el = el->next;
+       }
+       unlock_flocks();
diff --git a/queue-3.2/cifs-fix-issue-mounting-of-dfs-root-when-redirecting-from-one-domain-controller-to-the-next.patch b/queue-3.2/cifs-fix-issue-mounting-of-dfs-root-when-redirecting-from-one-domain-controller-to-the-next.patch
new file mode 100644 (file)
index 0000000..d5338fe
--- /dev/null
@@ -0,0 +1,52 @@
+From 1daaae8fa4afe3df78ca34e724ed7e8187e4eb32 Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@redhat.com>
+Date: Wed, 21 Mar 2012 06:30:40 -0400
+Subject: cifs: fix issue mounting of DFS ROOT when redirecting from one domain controller to the next
+
+From: Jeff Layton <jlayton@redhat.com>
+
+commit 1daaae8fa4afe3df78ca34e724ed7e8187e4eb32 upstream.
+
+This patch fixes an issue when cifs_mount receives a
+STATUS_BAD_NETWORK_NAME error during cifs_get_tcon but is able to
+continue after an DFS ROOT referral. In this case, the return code
+variable is not reset prior to trying to mount from the system referred
+to. Thus, is_path_accessible is not executed and the final DFS referral
+is not performed causing a mount error.
+
+Use case: In DNS, example.com  resolves to the secondary AD server
+ad2.example.com Our primary domain controller is ad1.example.com and has
+a DFS redirection set up from \\ad1\share\Users to \\files\share\Users.
+Mounting \\example.com\share\Users fails.
+
+Regression introduced by commit 724d9f1.
+
+Reviewed-by: Pavel Shilovsky <piastry@etersoft.ru
+Signed-off-by: Thomas Hadig <thomas@intapp.com>
+Signed-off-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/connect.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3217,7 +3217,7 @@ cifs_ra_pages(struct cifs_sb_info *cifs_
+ int
+ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
+ {
+-      int rc = 0;
++      int rc;
+       int xid;
+       struct cifs_ses *pSesInfo;
+       struct cifs_tcon *tcon;
+@@ -3244,6 +3244,7 @@ try_mount_again:
+               FreeXid(xid);
+       }
+ #endif
++      rc = 0;
+       tcon = NULL;
+       pSesInfo = NULL;
+       srvTcp = NULL;
diff --git a/queue-3.2/cifs-respect-negotiated-maxmpxcount.patch b/queue-3.2/cifs-respect-negotiated-maxmpxcount.patch
new file mode 100644 (file)
index 0000000..feca221
--- /dev/null
@@ -0,0 +1,198 @@
+From 10b9b98e41ba248a899f6175ce96ee91431b6194 Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <piastry@etersoft.ru>
+Date: Tue, 20 Mar 2012 12:55:09 +0300
+Subject: CIFS: Respect negotiated MaxMpxCount
+
+From: Pavel Shilovsky <piastry@etersoft.ru>
+
+commit 10b9b98e41ba248a899f6175ce96ee91431b6194 upstream.
+
+Some servers sets this value less than 50 that was hardcoded and
+we lost the connection if when we exceed this limit. Fix this by
+respecting this value - not sending more than the server allows.
+
+Reviewed-by: Jeff Layton <jlayton@samba.org>
+Signed-off-by: Pavel Shilovsky <piastry@etersoft.ru>
+Signed-off-by: Steve French <stevef@smf-gateway.(none)>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/cifsfs.c    |    8 ++++----
+ fs/cifs/cifsglob.h  |   10 +++-------
+ fs/cifs/cifssmb.c   |    9 +++++++--
+ fs/cifs/connect.c   |   11 ++++-------
+ fs/cifs/dir.c       |    6 ++++--
+ fs/cifs/file.c      |    4 ++--
+ fs/cifs/transport.c |    4 ++--
+ 7 files changed, 26 insertions(+), 26 deletions(-)
+
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -76,7 +76,7 @@ MODULE_PARM_DESC(cifs_min_small, "Small
+ unsigned int cifs_max_pending = CIFS_MAX_REQ;
+ module_param(cifs_max_pending, int, 0444);
+ MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
+-                                 "Default: 50 Range: 2 to 256");
++                                 "Default: 32767 Range: 2 to 32767.");
+ unsigned short echo_retries = 5;
+ module_param(echo_retries, ushort, 0644);
+ MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and "
+@@ -1116,9 +1116,9 @@ init_cifs(void)
+       if (cifs_max_pending < 2) {
+               cifs_max_pending = 2;
+               cFYI(1, "cifs_max_pending set to min of 2");
+-      } else if (cifs_max_pending > 256) {
+-              cifs_max_pending = 256;
+-              cFYI(1, "cifs_max_pending set to max of 256");
++      } else if (cifs_max_pending > CIFS_MAX_REQ) {
++              cifs_max_pending = CIFS_MAX_REQ;
++              cFYI(1, "cifs_max_pending set to max of %u", CIFS_MAX_REQ);
+       }
+       rc = cifs_fscache_register();
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -55,14 +55,9 @@
+ /*
+  * MAX_REQ is the maximum number of requests that WE will send
+- * on one socket concurrently. It also matches the most common
+- * value of max multiplex returned by servers.  We may
+- * eventually want to use the negotiated value (in case
+- * future servers can handle more) when we are more confident that
+- * we will not have problems oveloading the socket with pending
+- * write data.
++ * on one socket concurrently.
+  */
+-#define CIFS_MAX_REQ 50
++#define CIFS_MAX_REQ 32767
+ #define RFC1001_NAME_LEN 15
+ #define RFC1001_NAME_LEN_WITH_NULL (RFC1001_NAME_LEN + 1)
+@@ -263,6 +258,7 @@ struct TCP_Server_Info {
+       bool session_estab; /* mark when very first sess is established */
+       u16 dialect; /* dialect index that server chose */
+       enum securityEnum secType;
++      bool oplocks:1; /* enable oplocks */
+       unsigned int maxReq;    /* Clients should submit no more */
+       /* than maxReq distinct unanswered SMBs to the server when using  */
+       /* multiplexed reads or writes */
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -458,7 +458,10 @@ CIFSSMBNegotiate(unsigned int xid, struc
+                       goto neg_err_exit;
+               }
+               server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode);
+-              server->maxReq = le16_to_cpu(rsp->MaxMpxCount);
++              server->maxReq = min_t(unsigned int,
++                                     le16_to_cpu(rsp->MaxMpxCount),
++                                     cifs_max_pending);
++              server->oplocks = server->maxReq > 1 ? enable_oplocks : false;
+               server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
+               server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
+               /* even though we do not use raw we might as well set this
+@@ -564,7 +567,9 @@ CIFSSMBNegotiate(unsigned int xid, struc
+       /* one byte, so no need to convert this or EncryptionKeyLen from
+          little endian */
+-      server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount);
++      server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount),
++                             cifs_max_pending);
++      server->oplocks = server->maxReq > 1 ? enable_oplocks : false;
+       /* probably no need to store and check maxvcs */
+       server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
+       server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -625,14 +625,10 @@ static void clean_demultiplex_info(struc
+       spin_unlock(&GlobalMid_Lock);
+       wake_up_all(&server->response_q);
+-      /*
+-       * Check if we have blocked requests that need to free. Note that
+-       * cifs_max_pending is normally 50, but can be set at module install
+-       * time to as little as two.
+-       */
++      /* Check if we have blocked requests that need to free. */
+       spin_lock(&GlobalMid_Lock);
+-      if (atomic_read(&server->inFlight) >= cifs_max_pending)
+-              atomic_set(&server->inFlight, cifs_max_pending - 1);
++      if (atomic_read(&server->inFlight) >= server->maxReq)
++              atomic_set(&server->inFlight, server->maxReq - 1);
+       /*
+        * We do not want to set the max_pending too low or we could end up
+        * with the counter going negative.
+@@ -1890,6 +1886,7 @@ cifs_get_tcp_session(struct smb_vol *vol
+       tcp_ses->noautotune = volume_info->noautotune;
+       tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
+       atomic_set(&tcp_ses->inFlight, 0);
++      tcp_ses->maxReq = 1; /* enough to send negotiate request */
+       init_waitqueue_head(&tcp_ses->response_q);
+       init_waitqueue_head(&tcp_ses->request_q);
+       INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -171,7 +171,7 @@ cifs_create(struct inode *inode, struct
+       }
+       tcon = tlink_tcon(tlink);
+-      if (enable_oplocks)
++      if (tcon->ses->server->oplocks)
+               oplock = REQ_OPLOCK;
+       if (nd)
+@@ -492,7 +492,7 @@ cifs_lookup(struct inode *parent_dir_ino
+ {
+       int xid;
+       int rc = 0; /* to get around spurious gcc warning, set to zero here */
+-      __u32 oplock = enable_oplocks ? REQ_OPLOCK : 0;
++      __u32 oplock;
+       __u16 fileHandle = 0;
+       bool posix_open = false;
+       struct cifs_sb_info *cifs_sb;
+@@ -518,6 +518,8 @@ cifs_lookup(struct inode *parent_dir_ino
+       }
+       pTcon = tlink_tcon(tlink);
++      oplock = pTcon->ses->server->oplocks ? REQ_OPLOCK : 0;
++
+       /*
+        * Don't allow the separator character in a path component.
+        * The VFS will not allow "/", but "\" is allowed by posix.
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -380,7 +380,7 @@ int cifs_open(struct inode *inode, struc
+       cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
+                inode, file->f_flags, full_path);
+-      if (enable_oplocks)
++      if (tcon->ses->server->oplocks)
+               oplock = REQ_OPLOCK;
+       else
+               oplock = 0;
+@@ -505,7 +505,7 @@ static int cifs_reopen_file(struct cifsF
+       cFYI(1, "inode = 0x%p file flags 0x%x for %s",
+                inode, pCifsFile->f_flags, full_path);
+-      if (enable_oplocks)
++      if (tcon->ses->server->oplocks)
+               oplock = REQ_OPLOCK;
+       else
+               oplock = 0;
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -265,12 +265,12 @@ static int wait_for_free_request(struct
+       spin_lock(&GlobalMid_Lock);
+       while (1) {
+-              if (atomic_read(&server->inFlight) >= cifs_max_pending) {
++              if (atomic_read(&server->inFlight) >= server->maxReq) {
+                       spin_unlock(&GlobalMid_Lock);
+                       cifs_num_waiters_inc(server);
+                       wait_event(server->request_q,
+                                  atomic_read(&server->inFlight)
+-                                   < cifs_max_pending);
++                                   < server->maxReq);
+                       cifs_num_waiters_dec(server);
+                       spin_lock(&GlobalMid_Lock);
+               } else {
diff --git a/queue-3.2/nfs-properly-handle-the-case-where-the-delegation-is-revoked.patch b/queue-3.2/nfs-properly-handle-the-case-where-the-delegation-is-revoked.patch
new file mode 100644 (file)
index 0000000..5d2b2ff
--- /dev/null
@@ -0,0 +1,182 @@
+From a1d0b5eebc4fd6e0edb02688b35f17f67f42aea5 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Mon, 5 Mar 2012 19:56:44 -0500
+Subject: NFS: Properly handle the case where the delegation is revoked
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit a1d0b5eebc4fd6e0edb02688b35f17f67f42aea5 upstream.
+
+If we know that the delegation stateid is bad or revoked, we need to
+remove that delegation as soon as possible, and then mark all the
+stateids that relied on that delegation for recovery. We cannot use
+the delegation as part of the recovery process.
+
+Also note that NFSv4.1 uses a different error code (NFS4ERR_DELEG_REVOKED)
+to indicate that the delegation was revoked.
+
+Finally, ensure that setlk() and setattr() can both recover safely from
+a revoked delegation.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/delegation.c |   11 +++++++++++
+ fs/nfs/delegation.h |    1 +
+ fs/nfs/nfs4_fs.h    |    2 ++
+ fs/nfs/nfs4proc.c   |   18 ++++++++++++++++--
+ fs/nfs/nfs4state.c  |   29 +++++++++++++++++++++++++++--
+ 5 files changed, 57 insertions(+), 4 deletions(-)
+
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -466,6 +466,17 @@ static void nfs_delegation_run_state_man
+               nfs4_schedule_state_manager(clp);
+ }
++void nfs_remove_bad_delegation(struct inode *inode)
++{
++      struct nfs_delegation *delegation;
++
++      delegation = nfs_detach_delegation(NFS_I(inode), NFS_SERVER(inode));
++      if (delegation) {
++              nfs_inode_find_state_and_recover(inode, &delegation->stateid);
++              nfs_free_delegation(delegation);
++      }
++}
++
+ /**
+  * nfs_expire_all_delegation_types
+  * @clp: client to process
+--- a/fs/nfs/delegation.h
++++ b/fs/nfs/delegation.h
+@@ -45,6 +45,7 @@ void nfs_expire_unreferenced_delegations
+ void nfs_handle_cb_pathdown(struct nfs_client *clp);
+ int nfs_client_return_marked_delegations(struct nfs_client *clp);
+ int nfs_delegations_present(struct nfs_client *clp);
++void nfs_remove_bad_delegation(struct inode *inode);
+ void nfs_delegation_mark_reclaim(struct nfs_client *clp);
+ void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -324,6 +324,8 @@ extern void nfs4_put_open_state(struct n
+ extern void nfs4_close_state(struct nfs4_state *, fmode_t);
+ extern void nfs4_close_sync(struct nfs4_state *, fmode_t);
+ extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
++extern void nfs_inode_find_state_and_recover(struct inode *inode,
++              const nfs4_stateid *stateid);
+ extern void nfs4_schedule_lease_recovery(struct nfs_client *);
+ extern void nfs4_schedule_state_manager(struct nfs_client *);
+ extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp);
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -263,8 +263,11 @@ static int nfs4_handle_exception(struct
+       switch(errorcode) {
+               case 0:
+                       return 0;
++              case -NFS4ERR_DELEG_REVOKED:
+               case -NFS4ERR_ADMIN_REVOKED:
+               case -NFS4ERR_BAD_STATEID:
++                      if (state != NULL)
++                              nfs_remove_bad_delegation(state->inode);
+               case -NFS4ERR_OPENMODE:
+                       if (state == NULL)
+                               break;
+@@ -1316,8 +1319,11 @@ int nfs4_open_delegation_recall(struct n
+                                * The show must go on: exit, but mark the
+                                * stateid as needing recovery.
+                                */
++                      case -NFS4ERR_DELEG_REVOKED:
+                       case -NFS4ERR_ADMIN_REVOKED:
+                       case -NFS4ERR_BAD_STATEID:
++                              nfs_inode_find_state_and_recover(state->inode,
++                                              stateid);
+                               nfs4_schedule_stateid_recovery(server, state);
+                       case -EKEYEXPIRED:
+                               /*
+@@ -1893,7 +1899,9 @@ static int nfs4_do_setattr(struct inode
+                          struct nfs4_state *state)
+ {
+       struct nfs_server *server = NFS_SERVER(inode);
+-      struct nfs4_exception exception = { };
++      struct nfs4_exception exception = {
++              .state = state,
++      };
+       int err;
+       do {
+               err = nfs4_handle_exception(server,
+@@ -3707,8 +3715,11 @@ nfs4_async_handle_error(struct rpc_task
+       if (task->tk_status >= 0)
+               return 0;
+       switch(task->tk_status) {
++              case -NFS4ERR_DELEG_REVOKED:
+               case -NFS4ERR_ADMIN_REVOKED:
+               case -NFS4ERR_BAD_STATEID:
++                      if (state != NULL)
++                              nfs_remove_bad_delegation(state->inode);
+               case -NFS4ERR_OPENMODE:
+                       if (state == NULL)
+                               break;
+@@ -4526,7 +4537,9 @@ out:
+ static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+ {
+-      struct nfs4_exception exception = { };
++      struct nfs4_exception exception = {
++              .state = state,
++      };
+       int err;
+       do {
+@@ -4619,6 +4632,7 @@ int nfs4_lock_delegation_recall(struct n
+                                * The show must go on: exit, but mark the
+                                * stateid as needing recovery.
+                                */
++                      case -NFS4ERR_DELEG_REVOKED:
+                       case -NFS4ERR_ADMIN_REVOKED:
+                       case -NFS4ERR_BAD_STATEID:
+                       case -NFS4ERR_OPENMODE:
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1071,12 +1071,37 @@ void nfs4_schedule_stateid_recovery(cons
+ {
+       struct nfs_client *clp = server->nfs_client;
+-      if (test_and_clear_bit(NFS_DELEGATED_STATE, &state->flags))
+-              nfs_async_inode_return_delegation(state->inode, &state->stateid);
+       nfs4_state_mark_reclaim_nograce(clp, state);
+       nfs4_schedule_state_manager(clp);
+ }
++void nfs_inode_find_state_and_recover(struct inode *inode,
++              const nfs4_stateid *stateid)
++{
++      struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
++      struct nfs_inode *nfsi = NFS_I(inode);
++      struct nfs_open_context *ctx;
++      struct nfs4_state *state;
++      bool found = false;
++
++      spin_lock(&inode->i_lock);
++      list_for_each_entry(ctx, &nfsi->open_files, list) {
++              state = ctx->state;
++              if (state == NULL)
++                      continue;
++              if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
++                      continue;
++              if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0)
++                      continue;
++              nfs4_state_mark_reclaim_nograce(clp, state);
++              found = true;
++      }
++      spin_unlock(&inode->i_lock);
++      if (found)
++              nfs4_schedule_state_manager(clp);
++}
++
++
+ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
+ {
+       struct inode *inode = state->inode;
diff --git a/queue-3.2/nfsv4-return-the-delegation-if-the-server-returns-nfs4err_openmode.patch b/queue-3.2/nfsv4-return-the-delegation-if-the-server-returns-nfs4err_openmode.patch
new file mode 100644 (file)
index 0000000..668718d
--- /dev/null
@@ -0,0 +1,72 @@
+From 3114ea7a24d3264c090556a2444fc6d2c06176d4 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Wed, 7 Mar 2012 16:39:06 -0500
+Subject: NFSv4: Return the delegation if the server returns NFS4ERR_OPENMODE
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 3114ea7a24d3264c090556a2444fc6d2c06176d4 upstream.
+
+If a setattr() fails because of an NFS4ERR_OPENMODE error, it is
+probably due to us holding a read delegation. Ensure that the
+recovery routines return that delegation in this case.
+
+Reported-by: Miklos Szeredi <miklos@szeredi.hu>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4_fs.h  |    1 +
+ fs/nfs/nfs4proc.c |   13 ++++++++++++-
+ 2 files changed, 13 insertions(+), 1 deletion(-)
+
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -191,6 +191,7 @@ struct nfs4_exception {
+       long timeout;
+       int retry;
+       struct nfs4_state *state;
++      struct inode *inode;
+ };
+ struct nfs4_state_recovery_ops {
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -257,18 +257,28 @@ static int nfs4_handle_exception(struct
+ {
+       struct nfs_client *clp = server->nfs_client;
+       struct nfs4_state *state = exception->state;
++      struct inode *inode = exception->inode;
+       int ret = errorcode;
+       exception->retry = 0;
+       switch(errorcode) {
+               case 0:
+                       return 0;
++              case -NFS4ERR_OPENMODE:
++                      if (nfs_have_delegation(inode, FMODE_READ)) {
++                              nfs_inode_return_delegation(inode);
++                              exception->retry = 1;
++                              return 0;
++                      }
++                      if (state == NULL)
++                              break;
++                      nfs4_schedule_stateid_recovery(server, state);
++                      goto wait_on_recovery;
+               case -NFS4ERR_DELEG_REVOKED:
+               case -NFS4ERR_ADMIN_REVOKED:
+               case -NFS4ERR_BAD_STATEID:
+                       if (state != NULL)
+                               nfs_remove_bad_delegation(state->inode);
+-              case -NFS4ERR_OPENMODE:
+                       if (state == NULL)
+                               break;
+                       nfs4_schedule_stateid_recovery(server, state);
+@@ -1901,6 +1911,7 @@ static int nfs4_do_setattr(struct inode
+       struct nfs_server *server = NFS_SERVER(inode);
+       struct nfs4_exception exception = {
+               .state = state,
++              .inode = inode,
+       };
+       int err;
+       do {
index 2840ec7d9c891837920510a87eacda4df47ebcf3..42d7c52ac37b4bf396555b9a2136c622cfb0a743 100644 (file)
@@ -89,3 +89,14 @@ firewire-ohci-fix-too-early-completion-of-ir-multichannel-buffers.patch
 video-uvesafb-fix-oops-that-uvesafb-try-to-execute-nx-protected-page.patch
 kvm-x86-extend-struct-x86_emulate_ops-with-get_cpuid.patch
 kvm-x86-fix-missing-checks-in-syscall-emulation.patch
+nfs-properly-handle-the-case-where-the-delegation-is-revoked.patch
+nfsv4-return-the-delegation-if-the-server-returns-nfs4err_openmode.patch
+xfs-fix-inode-lookup-race.patch
+cifs-respect-negotiated-maxmpxcount.patch
+cifs-fix-issue-mounting-of-dfs-root-when-redirecting-from-one-domain-controller-to-the-next.patch
+cifs-fix-a-spurious-error-in-cifs_push_posix_locks.patch
+ubi-fix-error-handling-in-ubi_scan.patch
+ubi-fix-eraseblock-picking-criteria.patch
+sunrpc-we-must-not-use-list_for_each_entry_safe-in-rpc_wake_up.patch
+usbnet-increase-urb-reference-count-before-usb_unlink_urb.patch
+usbnet-don-t-clear-urb-dev-in-tx_complete.patch
diff --git a/queue-3.2/sunrpc-we-must-not-use-list_for_each_entry_safe-in-rpc_wake_up.patch b/queue-3.2/sunrpc-we-must-not-use-list_for_each_entry_safe-in-rpc_wake_up.patch
new file mode 100644 (file)
index 0000000..fa74816
--- /dev/null
@@ -0,0 +1,70 @@
+From 540a0f7584169651f485e8ab67461fcb06934e38 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Mon, 19 Mar 2012 13:39:35 -0400
+Subject: SUNRPC: We must not use list_for_each_entry_safe() in rpc_wake_up()
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 540a0f7584169651f485e8ab67461fcb06934e38 upstream.
+
+The problem is that for the case of priority queues, we
+have to assume that __rpc_remove_wait_queue_priority will move new
+elements from the tk_wait.links lists into the queue->tasks[] list.
+We therefore cannot use list_for_each_entry_safe() on queue->tasks[],
+since that will skip these new tasks that __rpc_remove_wait_queue_priority
+is adding.
+
+Without this fix, rpc_wake_up and rpc_wake_up_status will both fail
+to wake up all functions on priority wait queues, which can result
+in some nasty hangs.
+
+Reported-by: Andy Adamson <andros@netapp.com>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/sched.c |   15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -500,14 +500,18 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_next);
+  */
+ void rpc_wake_up(struct rpc_wait_queue *queue)
+ {
+-      struct rpc_task *task, *next;
+       struct list_head *head;
+       spin_lock_bh(&queue->lock);
+       head = &queue->tasks[queue->maxpriority];
+       for (;;) {
+-              list_for_each_entry_safe(task, next, head, u.tk_wait.list)
++              while (!list_empty(head)) {
++                      struct rpc_task *task;
++                      task = list_first_entry(head,
++                                      struct rpc_task,
++                                      u.tk_wait.list);
+                       rpc_wake_up_task_queue_locked(queue, task);
++              }
+               if (head == &queue->tasks[0])
+                       break;
+               head--;
+@@ -525,13 +529,16 @@ EXPORT_SYMBOL_GPL(rpc_wake_up);
+  */
+ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
+ {
+-      struct rpc_task *task, *next;
+       struct list_head *head;
+       spin_lock_bh(&queue->lock);
+       head = &queue->tasks[queue->maxpriority];
+       for (;;) {
+-              list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
++              while (!list_empty(head)) {
++                      struct rpc_task *task;
++                      task = list_first_entry(head,
++                                      struct rpc_task,
++                                      u.tk_wait.list);
+                       task->tk_status = status;
+                       rpc_wake_up_task_queue_locked(queue, task);
+               }
diff --git a/queue-3.2/ubi-fix-eraseblock-picking-criteria.patch b/queue-3.2/ubi-fix-eraseblock-picking-criteria.patch
new file mode 100644 (file)
index 0000000..24f025b
--- /dev/null
@@ -0,0 +1,55 @@
+From 7eb3aa65853e1b223bfc786b023b702018cb76c0 Mon Sep 17 00:00:00 2001
+From: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
+Date: Wed, 7 Mar 2012 19:08:36 +0200
+Subject: UBI: fix eraseblock picking criteria
+
+From: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
+
+commit 7eb3aa65853e1b223bfc786b023b702018cb76c0 upstream.
+
+The 'find_wl_entry()' function expects the maximum difference as the second
+argument, not the maximum absolute value. So the "unknown" eraseblock picking
+was incorrect, as Shmulik Ladkani spotted. This patch fixes the issue.
+
+Reported-by: Shmulik Ladkani <shmulik.ladkani@gmail.com>
+Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
+Reviewed-by: Shmulik Ladkani <shmulik.ladkani@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/ubi/wl.c |   10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -389,7 +389,7 @@ static struct ubi_wl_entry *find_wl_entr
+  */
+ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
+ {
+-      int err, medium_ec;
++      int err;
+       struct ubi_wl_entry *e, *first, *last;
+       ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
+@@ -427,7 +427,7 @@ retry:
+                * For unknown data we pick a physical eraseblock with medium
+                * erase counter. But we by no means can pick a physical
+                * eraseblock with erase counter greater or equivalent than the
+-               * lowest erase counter plus %WL_FREE_MAX_DIFF.
++               * lowest erase counter plus %WL_FREE_MAX_DIFF/2.
+                */
+               first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
+                                       u.rb);
+@@ -436,10 +436,8 @@ retry:
+               if (last->ec - first->ec < WL_FREE_MAX_DIFF)
+                       e = rb_entry(ubi->free.rb_node,
+                                       struct ubi_wl_entry, u.rb);
+-              else {
+-                      medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
+-                      e = find_wl_entry(&ubi->free, medium_ec);
+-              }
++              else
++                      e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2);
+               break;
+       case UBI_SHORTTERM:
+               /*
diff --git a/queue-3.2/ubi-fix-error-handling-in-ubi_scan.patch b/queue-3.2/ubi-fix-error-handling-in-ubi_scan.patch
new file mode 100644 (file)
index 0000000..ca4aaa0
--- /dev/null
@@ -0,0 +1,56 @@
+From a29852be492d61001d86c6ebf5fff9b93d7b4be9 Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Mon, 30 Jan 2012 18:20:13 +0100
+Subject: UBI: fix error handling in ubi_scan()
+
+From: Richard Weinberger <richard@nod.at>
+
+commit a29852be492d61001d86c6ebf5fff9b93d7b4be9 upstream.
+
+Two bad things can happen in ubi_scan():
+1. If kmem_cache_create() fails we jump to out_si and call
+   ubi_scan_destroy_si() which calls kmem_cache_destroy().
+   But si->scan_leb_slab is NULL.
+2. If process_eb() fails we jump to out_vidh, call
+   kmem_cache_destroy() and ubi_scan_destroy_si() which calls
+   again kmem_cache_destroy().
+
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/ubi/scan.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/mtd/ubi/scan.c
++++ b/drivers/mtd/ubi/scan.c
+@@ -1174,7 +1174,7 @@ struct ubi_scan_info *ubi_scan(struct ub
+       ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+       if (!ech)
+-              goto out_slab;
++              goto out_si;
+       vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+       if (!vidh)
+@@ -1235,8 +1235,6 @@ out_vidh:
+       ubi_free_vid_hdr(ubi, vidh);
+ out_ech:
+       kfree(ech);
+-out_slab:
+-      kmem_cache_destroy(si->scan_leb_slab);
+ out_si:
+       ubi_scan_destroy_si(si);
+       return ERR_PTR(err);
+@@ -1325,7 +1323,9 @@ void ubi_scan_destroy_si(struct ubi_scan
+               }
+       }
+-      kmem_cache_destroy(si->scan_leb_slab);
++      if (si->scan_leb_slab)
++              kmem_cache_destroy(si->scan_leb_slab);
++
+       kfree(si);
+ }
diff --git a/queue-3.2/usbnet-don-t-clear-urb-dev-in-tx_complete.patch b/queue-3.2/usbnet-don-t-clear-urb-dev-in-tx_complete.patch
new file mode 100644 (file)
index 0000000..3876c33
--- /dev/null
@@ -0,0 +1,36 @@
+From 5d5440a835710d09f0ef18da5000541ec98b537a Mon Sep 17 00:00:00 2001
+From: "tom.leiming@gmail.com" <tom.leiming@gmail.com>
+Date: Thu, 22 Mar 2012 03:22:38 +0000
+Subject: usbnet: don't clear urb->dev in tx_complete
+
+From: "tom.leiming@gmail.com" <tom.leiming@gmail.com>
+
+commit 5d5440a835710d09f0ef18da5000541ec98b537a upstream.
+
+URB unlinking is always racing with its completion and tx_complete
+may be called before or during running usb_unlink_urb, so tx_complete
+must not clear urb->dev since it will be used in unlink path,
+otherwise invalid memory accesses or usb device leak may be caused
+inside usb_unlink_urb.
+
+Cc: Alan Stern <stern@rowland.harvard.edu>
+Cc: Oliver Neukum <oliver@neukum.org>
+Signed-off-by: Ming Lei <tom.leiming@gmail.com>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/usb/usbnet.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -1037,7 +1037,6 @@ static void tx_complete (struct urb *urb
+       }
+       usb_autopm_put_interface_async(dev->intf);
+-      urb->dev = NULL;
+       entry->state = tx_done;
+       defer_bh(dev, skb, &dev->txq);
+ }
diff --git a/queue-3.2/usbnet-increase-urb-reference-count-before-usb_unlink_urb.patch b/queue-3.2/usbnet-increase-urb-reference-count-before-usb_unlink_urb.patch
new file mode 100644 (file)
index 0000000..7b08473
--- /dev/null
@@ -0,0 +1,59 @@
+From 0956a8c20b23d429e79ff86d4325583fc06f9eb4 Mon Sep 17 00:00:00 2001
+From: "tom.leiming@gmail.com" <tom.leiming@gmail.com>
+Date: Thu, 22 Mar 2012 03:22:18 +0000
+Subject: usbnet: increase URB reference count before usb_unlink_urb
+
+From: "tom.leiming@gmail.com" <tom.leiming@gmail.com>
+
+commit 0956a8c20b23d429e79ff86d4325583fc06f9eb4 upstream.
+
+Commit 4231d47e6fe69f061f96c98c30eaf9fb4c14b96d(net/usbnet: avoid
+recursive locking in usbnet_stop()) fixes the recursive locking
+problem by releasing the skb queue lock, but it makes usb_unlink_urb
+racing with defer_bh, and the URB to being unlinked may be freed before
+or during calling usb_unlink_urb, so use-after-free problem may be
+triggerd inside usb_unlink_urb.
+
+The patch fixes the use-after-free problem by increasing URB
+reference count with skb queue lock held before calling
+usb_unlink_urb, so the URB won't be freed until return from
+usb_unlink_urb.
+
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Alan Stern <stern@rowland.harvard.edu>
+Cc: Oliver Neukum <oliver@neukum.org>
+Reported-by: Dave Jones <davej@redhat.com>
+Signed-off-by: Ming Lei <tom.leiming@gmail.com>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/usb/usbnet.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -589,6 +589,14 @@ static int unlink_urbs (struct usbnet *d
+               entry = (struct skb_data *) skb->cb;
+               urb = entry->urb;
++              /*
++               * Get reference count of the URB to avoid it to be
++               * freed during usb_unlink_urb, which may trigger
++               * use-after-free problem inside usb_unlink_urb since
++               * usb_unlink_urb is always racing with .complete
++               * handler(include defer_bh).
++               */
++              usb_get_urb(urb);
+               spin_unlock_irqrestore(&q->lock, flags);
+               // during some PM-driven resume scenarios,
+               // these (async) unlinks complete immediately
+@@ -597,6 +605,7 @@ static int unlink_urbs (struct usbnet *d
+                       netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
+               else
+                       count++;
++              usb_put_urb(urb);
+               spin_lock_irqsave(&q->lock, flags);
+       }
+       spin_unlock_irqrestore (&q->lock, flags);
diff --git a/queue-3.2/xfs-fix-inode-lookup-race.patch b/queue-3.2/xfs-fix-inode-lookup-race.patch
new file mode 100644 (file)
index 0000000..c833fa8
--- /dev/null
@@ -0,0 +1,99 @@
+From f30d500f809eca67a21704347ab14bb35877b5ee Mon Sep 17 00:00:00 2001
+From: Dave Chinner <dchinner@redhat.com>
+Date: Wed, 7 Mar 2012 04:50:25 +0000
+Subject: xfs: fix inode lookup race
+
+From: Dave Chinner <dchinner@redhat.com>
+
+commit f30d500f809eca67a21704347ab14bb35877b5ee upstream.
+
+When we get concurrent lookups of the same inode that is not in the
+per-AG inode cache, there is a race condition that triggers warnings
+in unlock_new_inode() indicating that we are initialising an inode
+that isn't in a the correct state for a new inode.
+
+When we do an inode lookup via a file handle or a bulkstat, we don't
+serialise lookups at a higher level through the dentry cache (i.e.
+pathless lookup), and so we can get concurrent lookups of the same
+inode.
+
+The race condition is between the insertion of the inode into the
+cache in the case of a cache miss and a concurrently lookup:
+
+Thread 1                       Thread 2
+xfs_iget()
+  xfs_iget_cache_miss()
+    xfs_iread()
+    lock radix tree
+    radix_tree_insert()
+                               rcu_read_lock
+                               radix_tree_lookup
+                               lock inode flags
+                               XFS_INEW not set
+                               igrab()
+                               unlock inode flags
+                               rcu_read_unlock
+                               use uninitialised inode
+                               .....
+    lock inode flags
+    set XFS_INEW
+    unlock inode flags
+    unlock radix tree
+  xfs_setup_inode()
+    inode flags = I_NEW
+    unlock_new_inode()
+      WARNING as inode flags != I_NEW
+
+This can lead to inode corruption, inode list corruption, etc, and
+is generally a bad thing to occur.
+
+Fix this by setting XFS_INEW before inserting the inode into the
+radix tree. This will ensure any concurrent lookup will find the new
+inode with XFS_INEW set and that forces the lookup to wait until the
+XFS_INEW flag is removed before allowing the lookup to succeed.
+
+Signed-off-by: Dave Chinner <dchinner@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Ben Myers <bpm@sgi.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/xfs/xfs_iget.c |   18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+--- a/fs/xfs/xfs_iget.c
++++ b/fs/xfs/xfs_iget.c
+@@ -353,9 +353,20 @@ xfs_iget_cache_miss(
+                       BUG();
+       }
+-      spin_lock(&pag->pag_ici_lock);
++      /*
++       * These values must be set before inserting the inode into the radix
++       * tree as the moment it is inserted a concurrent lookup (allowed by the
++       * RCU locking mechanism) can find it and that lookup must see that this
++       * is an inode currently under construction (i.e. that XFS_INEW is set).
++       * The ip->i_flags_lock that protects the XFS_INEW flag forms the
++       * memory barrier that ensures this detection works correctly at lookup
++       * time.
++       */
++      ip->i_udquot = ip->i_gdquot = NULL;
++      xfs_iflags_set(ip, XFS_INEW);
+       /* insert the new inode */
++      spin_lock(&pag->pag_ici_lock);
+       error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
+       if (unlikely(error)) {
+               WARN_ON(error != -EEXIST);
+@@ -363,11 +374,6 @@ xfs_iget_cache_miss(
+               error = EAGAIN;
+               goto out_preload_end;
+       }
+-
+-      /* These values _must_ be set before releasing the radix tree lock! */
+-      ip->i_udquot = ip->i_gdquot = NULL;
+-      xfs_iflags_set(ip, XFS_INEW);
+-
+       spin_unlock(&pag->pag_ici_lock);
+       radix_tree_preload_end();