xchk_dqiter_init(&cursor, sc, dqtype);
while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) {
error = xchk_quota_item(&sqi, dq);
- xfs_qm_dqput(dq);
+ mutex_unlock(&dq->q_qlock);
+ xfs_qm_dqrele(dq);
if (error)
break;
}
xchk_dqiter_init(&cursor, sc, dqtype);
while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) {
error = xrep_quota_item(&rqi, dq);
- xfs_qm_dqput(dq);
+ mutex_unlock(&dq->q_qlock);
+ xfs_qm_dqrele(dq);
if (error)
break;
}
return error;
error = xqcheck_compare_dquot(xqc, dqtype, dq);
- xfs_qm_dqput(dq);
+ mutex_unlock(&dq->q_qlock);
+ xfs_qm_dqrele(dq);
if (error)
return error;
xchk_dqiter_init(&cursor, sc, dqtype);
while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) {
error = xqcheck_compare_dquot(xqc, dqtype, dq);
- xfs_qm_dqput(dq);
+ mutex_unlock(&dq->q_qlock);
+ xfs_qm_dqrele(dq);
if (error)
break;
}
xchk_dqiter_init(&cursor, sc, dqtype);
while ((error = xchk_dquot_iter(&cursor, &dq)) == 1) {
error = xqcheck_commit_dquot(xqc, dqtype, dq);
- xfs_qm_dqput(dq);
+ mutex_unlock(&dq->q_qlock);
+ xfs_qm_dqrele(dq);
if (error)
break;
}
return error;
error = xqcheck_commit_dquot(xqc, dqtype, dq);
- xfs_qm_dqput(dq);
+ mutex_unlock(&dq->q_qlock);
+ xfs_qm_dqrele(dq);
if (error)
return error;
return 0;
}
- xfs_qm_dqput(dqp);
+ mutex_unlock(&dqp->q_qlock);
+ xfs_qm_dqrele(dqp);
}
return error;
}
/*
- * Release a reference to the dquot (decrement ref-count) and unlock it.
- *
- * If there is a group quota attached to this dquot, carefully release that
- * too without tripping over deadlocks'n'stuff.
+ * Release a reference to the dquot.
*/
void
-xfs_qm_dqput(
+xfs_qm_dqrele(
struct xfs_dquot *dqp)
{
- ASSERT(XFS_DQ_IS_LOCKED(dqp));
+ if (!dqp)
+ return;
- trace_xfs_dqput(dqp);
+ trace_xfs_dqrele(dqp);
if (lockref_put_or_lock(&dqp->q_lockref))
- goto out_unlock;
-
+ return;
if (!--dqp->q_lockref.count) {
struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
- trace_xfs_dqput_free(dqp);
+ trace_xfs_dqrele_free(dqp);
if (list_lru_add_obj(&qi->qi_lru, &dqp->q_lru))
XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
}
spin_unlock(&dqp->q_lockref.lock);
-out_unlock:
- mutex_unlock(&dqp->q_qlock);
-}
-
-/*
- * Release a dquot. Flush it if dirty, then dqput() it.
- * dquot must not be locked.
- */
-void
-xfs_qm_dqrele(
- struct xfs_dquot *dqp)
-{
- if (!dqp)
- return;
-
- trace_xfs_dqrele(dqp);
-
- mutex_lock(&dqp->q_qlock);
- /*
- * We don't care to flush it if the dquot is dirty here.
- * That will create stutters that we want to avoid.
- * Instead we do a delayed write when we try to reclaim
- * a dirty dquot. Also xfs_sync will take part of the burden...
- */
- xfs_qm_dqput(dqp);
}
/*
int xfs_qm_dqget_uncached(struct xfs_mount *mp,
xfs_dqid_t id, xfs_dqtype_t type,
struct xfs_dquot **dqpp);
-void xfs_qm_dqput(struct xfs_dquot *dqp);
void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *);
void xfs_dqlockn(struct xfs_dqtrx *q);
dqp->q_flags |= XFS_DQFLAG_DIRTY;
out_unlock:
- xfs_qm_dqput(dqp);
+ mutex_unlock(&dqp->q_qlock);
+ xfs_qm_dqrele(dqp);
return error;
}
xfs_buf_delwri_queue(bp, buffer_list);
xfs_buf_relse(bp);
out_unlock:
- xfs_qm_dqput(dqp);
+ mutex_unlock(&dqp->q_qlock);
+ xfs_qm_dqrele(dqp);
return error;
}
if (!xfs_qm_dqget(mp, ip->i_projid, XFS_DQTYPE_PROJ, false, &dqp)) {
xfs_fill_statvfs_from_dquot(statp, ip, dqp);
- xfs_qm_dqput(dqp);
+ mutex_unlock(&dqp->q_qlock);
+ xfs_qm_dqrele(dqp);
}
}
xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
out_put:
- xfs_qm_dqput(dqp);
+ mutex_unlock(&dqp->q_qlock);
+ xfs_qm_dqrele(dqp);
return error;
}
*id = dqp->q_id;
xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
+ mutex_unlock(&dqp->q_qlock);
- xfs_qm_dqput(dqp);
+ xfs_qm_dqrele(dqp);
return error;
}
DEFINE_DQUOT_EVENT(xfs_dqget_miss);
DEFINE_DQUOT_EVENT(xfs_dqget_freeing);
DEFINE_DQUOT_EVENT(xfs_dqget_dup);
-DEFINE_DQUOT_EVENT(xfs_dqput);
-DEFINE_DQUOT_EVENT(xfs_dqput_free);
DEFINE_DQUOT_EVENT(xfs_dqrele);
+DEFINE_DQUOT_EVENT(xfs_dqrele_free);
DEFINE_DQUOT_EVENT(xfs_dqflush);
DEFINE_DQUOT_EVENT(xfs_dqflush_force);
DEFINE_DQUOT_EVENT(xfs_dqflush_done);