/* Locking: */
-/*
- * Currently used for the del_recall_lru and file hash table. In an
- * effort to decrease the scope of the client_mutex, this spinlock may
- * eventually cover more:
- */
-static DEFINE_SPINLOCK(state_lock);
-
enum nfsd4_st_mutex_lock_subclass {
OPEN_STATEID_MUTEX = 0,
LOCK_STATEID_MUTEX = 1,
{
struct nfs4_delegation *searchdp = NULL;
struct nfs4_client *searchclp = NULL;
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
- lockdep_assert_held(&state_lock);
+ lockdep_assert_held(&nn->deleg_lock);
lockdep_assert_held(&fp->fi_lock);
list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
{
struct nfs4_client *clp = dp->dl_stid.sc_client;
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
- lockdep_assert_held(&state_lock);
+ lockdep_assert_held(&nn->deleg_lock);
lockdep_assert_held(&fp->fi_lock);
lockdep_assert_held(&clp->cl_lock);
unhash_delegation_locked(struct nfs4_delegation *dp, unsigned short statusmask)
{
struct nfs4_file *fp = dp->dl_stid.sc_file;
+ struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
+ nfsd_net_id);
- lockdep_assert_held(&state_lock);
+ lockdep_assert_held(&nn->deleg_lock);
if (!delegation_hashed(dp))
return false;
static void destroy_delegation(struct nfs4_delegation *dp)
{
bool unhashed;
+ struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
+ nfsd_net_id);
- spin_lock(&state_lock);
+ spin_lock(&nn->deleg_lock);
unhashed = unhash_delegation_locked(dp, SC_STATUS_CLOSED);
- spin_unlock(&state_lock);
+ spin_unlock(&nn->deleg_lock);
if (unhashed)
destroy_unhashed_deleg(dp);
}
case SC_TYPE_DELEG:
refcount_inc(&stid->sc_count);
dp = delegstateid(stid);
- spin_lock(&state_lock);
+ spin_lock(&nn->deleg_lock);
if (!unhash_delegation_locked(
dp, SC_STATUS_ADMIN_REVOKED))
dp = NULL;
- spin_unlock(&state_lock);
+ spin_unlock(&nn->deleg_lock);
if (dp)
revoke_delegation(dp);
break;
struct nfs4_delegation *dp;
LIST_HEAD(reaplist);
- spin_lock(&state_lock);
+ spin_lock(&nn->deleg_lock);
while (!list_empty(&clp->cl_delegations)) {
dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
unhash_delegation_locked(dp, SC_STATUS_CLOSED);
list_add(&dp->dl_recall_lru, &reaplist);
}
- spin_unlock(&state_lock);
+ spin_unlock(&nn->deleg_lock);
while (!list_empty(&reaplist)) {
dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
list_del_init(&dp->dl_recall_lru);
* If the dl_time != 0, then we know that it has already been
* queued for a lease break. Don't queue it again.
*/
- spin_lock(&state_lock);
+ spin_lock(&nn->deleg_lock);
if (delegation_hashed(dp) && dp->dl_time == 0) {
dp->dl_time = ktime_get_boottime_seconds();
list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
}
- spin_unlock(&state_lock);
+ spin_unlock(&nn->deleg_lock);
}
static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
{
bool deleg_ts = nfsd4_want_deleg_timestamps(open);
struct nfs4_client *clp = stp->st_stid.sc_client;
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct nfs4_file *fp = stp->st_stid.sc_file;
struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
struct nfs4_delegation *dp;
return ERR_PTR(-EOPNOTSUPP);
}
- spin_lock(&state_lock);
+ spin_lock(&nn->deleg_lock);
spin_lock(&fp->fi_lock);
if (nfs4_delegation_exists(clp, fp))
status = -EAGAIN;
} else
fp->fi_delegees++;
spin_unlock(&fp->fi_lock);
- spin_unlock(&state_lock);
+ spin_unlock(&nn->deleg_lock);
if (nf)
nfsd_file_put(nf);
if (status)
if (fp->fi_had_conflict)
goto out_unlock;
- spin_lock(&state_lock);
+ spin_lock(&nn->deleg_lock);
spin_lock(&clp->cl_lock);
spin_lock(&fp->fi_lock);
status = hash_delegation_locked(dp, fp);
spin_unlock(&fp->fi_lock);
spin_unlock(&clp->cl_lock);
- spin_unlock(&state_lock);
+ spin_unlock(&nn->deleg_lock);
if (status)
goto out_unlock;
nfs40_clean_admin_revoked(nn, <);
- spin_lock(&state_lock);
+ spin_lock(&nn->deleg_lock);
list_for_each_safe(pos, next, &nn->del_recall_lru) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
if (!state_expired(<, dp->dl_time))
unhash_delegation_locked(dp, SC_STATUS_REVOKED);
list_add(&dp->dl_recall_lru, &reaplist);
}
- spin_unlock(&state_lock);
+ spin_unlock(&nn->deleg_lock);
while (!list_empty(&reaplist)) {
dp = list_first_entry(&reaplist, struct nfs4_delegation,
dl_recall_lru);
INIT_LIST_HEAD(&nn->client_lru);
INIT_LIST_HEAD(&nn->close_lru);
INIT_LIST_HEAD(&nn->del_recall_lru);
+ spin_lock_init(&nn->deleg_lock);
spin_lock_init(&nn->client_lock);
spin_lock_init(&nn->s2s_cp_lock);
idr_init(&nn->s2s_cp_stateids);
locks_end_grace(&nn->nfsd4_manager);
INIT_LIST_HEAD(&reaplist);
- spin_lock(&state_lock);
+ spin_lock(&nn->deleg_lock);
list_for_each_safe(pos, next, &nn->del_recall_lru) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
unhash_delegation_locked(dp, SC_STATUS_CLOSED);
list_add(&dp->dl_recall_lru, &reaplist);
}
- spin_unlock(&state_lock);
+ spin_unlock(&nn->deleg_lock);
list_for_each_safe(pos, next, &reaplist) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
list_del_init(&dp->dl_recall_lru);
struct nfsd_file *nf)
{
struct nfs4_client *clp = cstate->clp;
+ struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
struct nfs4_delegation *dp;
struct file_lease *fl;
struct nfs4_file *fp, *rfp;
}
/* if this client already has one, return that it's unavailable */
- spin_lock(&state_lock);
+ spin_lock(&nn->deleg_lock);
spin_lock(&fp->fi_lock);
/* existing delegation? */
if (nfs4_delegation_exists(clp, fp)) {
++fp->fi_delegees;
}
spin_unlock(&fp->fi_lock);
- spin_unlock(&state_lock);
+ spin_unlock(&nn->deleg_lock);
if (status) {
put_nfs4_file(fp);
* trying to set a delegation on the same file. If that happens,
* then just say UNAVAIL.
*/
- spin_lock(&state_lock);
+ spin_lock(&nn->deleg_lock);
spin_lock(&clp->cl_lock);
spin_lock(&fp->fi_lock);
status = hash_delegation_locked(dp, fp);
spin_unlock(&fp->fi_lock);
spin_unlock(&clp->cl_lock);
- spin_unlock(&state_lock);
+ spin_unlock(&nn->deleg_lock);
if (!status) {
put_nfs4_file(fp);