* However, dqiterate gave us a locked dquot, so drop the dquot lock to
* get the ILOCK.
*/
- xfs_dqunlock(dq);
+ mutex_unlock(&dq->q_qlock);
xchk_ilock(sc, XFS_ILOCK_SHARED);
- xfs_dqlock(dq);
+ mutex_lock(&dq->q_qlock);
/*
* Except for the root dquot, the actual dquot we got must either have
* dqiterate gave us a locked dquot, so drop the dquot lock to get the
* ILOCK_EXCL.
*/
- xfs_dqunlock(dq);
+ mutex_unlock(&dq->q_qlock);
xchk_ilock(sc, XFS_ILOCK_EXCL);
- xfs_dqlock(dq);
+ mutex_lock(&dq->q_qlock);
error = xrep_quota_item_bmap(sc, dq, &dirty);
xchk_iunlock(sc, XFS_ILOCK_EXCL);
}
xfs_trans_log_dquot(sc->tp, dq);
error = xfs_trans_roll(&sc->tp);
- xfs_dqlock(dq);
+ mutex_lock(&dq->q_qlock);
return error;
}
int error = 0;
/* Unlock the dquot just long enough to allocate a transaction. */
- xfs_dqunlock(dq);
+ mutex_unlock(&dq->q_qlock);
error = xchk_trans_alloc(xqc->sc, 0);
- xfs_dqlock(dq);
+ mutex_lock(&dq->q_qlock);
if (error)
return error;
* dquot).
*/
error = xrep_trans_commit(xqc->sc);
- xfs_dqlock(dq);
+ mutex_lock(&dq->q_qlock);
return error;
out_unlock:
xchk_trans_cancel(xqc->sc);
/* Re-lock the dquot so the caller can put the reference. */
- xfs_dqlock(dq);
+ mutex_lock(&dq->q_qlock);
return error;
}
*
* ip->i_lock
* qi->qi_tree_lock
- * dquot->q_qlock (xfs_dqlock() and friends)
+ * dquot->q_qlock
* dquot->q_flush (xfs_dqflock() and friends)
* qi->qi_lru_lock
*
return NULL;
}
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
if (dqp->q_flags & XFS_DQFLAG_FREEING) {
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_freeing(dqp);
delay(1);
}
/* Return a locked dquot to the caller, with a reference taken. */
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
dqp->q_nrefs = 1;
qi->qi_dquots++;
if (dqp1) {
xfs_qm_dqdestroy(dqp);
dqp = dqp1;
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
goto dqret;
}
} else {
if (list_lru_add_obj(&qi->qi_lru, &dqp->q_lru))
XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
}
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
}
/*
trace_xfs_dqrele(dqp);
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
/*
* We don't care to flush it if the dquot is dirty here.
* That will create stutters that we want to avoid.
complete(&dqp->q_flush);
}
-static inline int xfs_dqlock_nowait(struct xfs_dquot *dqp)
-{
- return mutex_trylock(&dqp->q_qlock);
-}
-
-static inline void xfs_dqlock(struct xfs_dquot *dqp)
-{
- mutex_lock(&dqp->q_qlock);
-}
-
-static inline void xfs_dqunlock(struct xfs_dquot *dqp)
-{
- mutex_unlock(&dqp->q_qlock);
-}
-
static inline int
xfs_dquot_type(const struct xfs_dquot *dqp)
{
static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp)
{
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
dqp->q_nrefs++;
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
return dqp;
}
if (atomic_read(&dqp->q_pincount) > 0)
return XFS_ITEM_PINNED;
- if (!xfs_dqlock_nowait(dqp))
+ if (!mutex_trylock(&dqp->q_qlock))
return XFS_ITEM_LOCKED;
/*
out_relock_ail:
spin_lock(&lip->li_ailp->ail_lock);
out_unlock:
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
return rval;
}
* transaction layer, within trans_commit. Hence, no LI_HOLD flag
* for the logitem.
*/
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
}
STATIC void
struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
int error = -EAGAIN;
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
goto out_unlock;
!test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
xfs_dqfunlock(dqp);
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
qi->qi_dquots--;
return 0;
out_unlock:
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
return error;
}
* that the dquot returned is the one that should go in the inode.
*/
*IO_idqpp = dqp;
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
return 0;
}
struct xfs_qm_isolate *isol = arg;
enum lru_status ret = LRU_SKIP;
- if (!xfs_dqlock_nowait(dqp))
+ if (!mutex_trylock(&dqp->q_qlock))
goto out_miss_busy;
/*
* the freelist and try again.
*/
if (dqp->q_nrefs) {
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
trace_xfs_dqreclaim_want(dqp);
* Prevent lookups now that we are past the point of no return.
*/
dqp->q_flags |= XFS_DQFLAG_FREEING;
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
ASSERT(dqp->q_nrefs == 0);
list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
return LRU_REMOVED;
out_miss_unlock:
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
out_miss_busy:
trace_xfs_dqreclaim_busy(dqp);
XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
struct xfs_buf *bp = NULL;
int error = 0;
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
if (dqp->q_flags & XFS_DQFLAG_FREEING)
goto out_unlock;
if (!XFS_DQ_IS_DIRTY(dqp))
xfs_buf_delwri_queue(bp, buffer_list);
xfs_buf_relse(bp);
out_unlock:
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
return error;
}
/*
* Get the ilock in the right order.
*/
- xfs_dqunlock(uq);
+ mutex_unlock(&uq->q_qlock);
lockflags = XFS_ILOCK_SHARED;
xfs_ilock(ip, lockflags);
} else {
ASSERT(error != -ENOENT);
goto error_rele;
}
- xfs_dqunlock(gq);
+ mutex_unlock(&gq->q_qlock);
lockflags = XFS_ILOCK_SHARED;
xfs_ilock(ip, lockflags);
} else {
ASSERT(error != -ENOENT);
goto error_rele;
}
- xfs_dqunlock(pq);
+ mutex_unlock(&pq->q_qlock);
lockflags = XFS_ILOCK_SHARED;
xfs_ilock(ip, lockflags);
} else {
* back now.
*/
tp->t_flags |= XFS_TRANS_DIRTY;
- xfs_dqlock(prevdq);
+ mutex_lock(&prevdq->q_qlock);
if (isrt) {
ASSERT(prevdq->q_rtb.reserved >= ip->i_delayed_blks);
prevdq->q_rtb.reserved -= ip->i_delayed_blks;
ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
prevdq->q_blk.reserved -= ip->i_delayed_blks;
}
- xfs_dqunlock(prevdq);
+ mutex_unlock(&prevdq->q_qlock);
/*
* Take an extra reference, because the inode is going to keep
}
defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp);
if (error)
goto out_rele;
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
xfs_trans_dqjoin(tp, dqp);
/*
unsigned int i;
ASSERT(q[0].qt_dquot != NULL);
if (q[1].qt_dquot == NULL) {
- xfs_dqlock(q[0].qt_dquot);
+ mutex_lock(&q[0].qt_dquot->q_qlock);
xfs_trans_dqjoin(tp, q[0].qt_dquot);
} else if (q[2].qt_dquot == NULL) {
xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
locked = already_locked;
if (qtrx->qt_blk_res) {
if (!locked) {
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
locked = true;
}
dqp->q_blk.reserved -=
}
if (qtrx->qt_ino_res) {
if (!locked) {
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
locked = true;
}
dqp->q_ino.reserved -=
if (qtrx->qt_rtblk_res) {
if (!locked) {
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
locked = true;
}
dqp->q_rtb.reserved -=
(xfs_qcnt_t)qtrx->qt_rtblk_res;
}
if (locked && !already_locked)
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
}
}
struct xfs_dquot_res *blkres;
struct xfs_quota_limits *qlim;
- xfs_dqlock(dqp);
+ mutex_lock(&dqp->q_qlock);
defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
XFS_IS_CORRUPT(mp, dqp->q_ino.reserved < dqp->q_ino.count))
goto error_corrupt;
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
return 0;
error_return:
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ)
return -ENOSPC;
return -EDQUOT;
error_corrupt:
- xfs_dqunlock(dqp);
+ mutex_unlock(&dqp->q_qlock);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK);
return -EFSCORRUPTED;