return NULL;
}
- mutex_lock(&dqp->q_qlock);
- if (dqp->q_flags & XFS_DQFLAG_FREEING) {
- mutex_unlock(&dqp->q_qlock);
+ if (!lockref_get_not_dead(&dqp->q_lockref)) {
mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_freeing(dqp);
delay(1);
goto restart;
}
-
- dqp->q_nrefs++;
mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_hit(dqp);
XFS_STATS_INC(mp, xs_qm_dqcachehits);
+ mutex_lock(&dqp->q_qlock);
return dqp;
}
/* Return a locked dquot to the caller, with a reference taken. */
mutex_lock(&dqp->q_qlock);
- dqp->q_nrefs = 1;
+ lockref_init(&dqp->q_lockref);
qi->qi_dquots++;
out_unlock:
xfs_qm_dqput(
struct xfs_dquot *dqp)
{
- ASSERT(dqp->q_nrefs > 0);
ASSERT(XFS_DQ_IS_LOCKED(dqp));
trace_xfs_dqput(dqp);
- if (--dqp->q_nrefs == 0) {
+ if (lockref_put_or_lock(&dqp->q_lockref))
+ goto out_unlock;
+
+ if (!--dqp->q_lockref.count) {
struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
trace_xfs_dqput_free(dqp);
if (list_lru_add_obj(&qi->qi_lru, &dqp->q_lru))
XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
}
+ spin_unlock(&dqp->q_lockref.lock);
+out_unlock:
mutex_unlock(&dqp->q_qlock);
}
void *data)
{
struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
- int error = -EAGAIN;
- mutex_lock(&dqp->q_qlock);
- if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
- goto out_unlock;
-
- dqp->q_flags |= XFS_DQFLAG_FREEING;
+ spin_lock(&dqp->q_lockref.lock);
+ if (dqp->q_lockref.count > 0 || __lockref_is_dead(&dqp->q_lockref)) {
+ spin_unlock(&dqp->q_lockref.lock);
+ return -EAGAIN;
+ }
+ lockref_mark_dead(&dqp->q_lockref);
+ spin_unlock(&dqp->q_lockref.lock);
+ mutex_lock(&dqp->q_qlock);
xfs_qm_dqunpin_wait(dqp);
xfs_dqflock(dqp);
*/
if (XFS_DQ_IS_DIRTY(dqp)) {
struct xfs_buf *bp = NULL;
+ int error;
/*
* We don't care about getting disk errors here. We need
*/
error = xfs_dquot_use_attached_buf(dqp, &bp);
if (error == -EAGAIN) {
- xfs_dqfunlock(dqp);
- dqp->q_flags &= ~XFS_DQFLAG_FREEING;
- goto out_unlock;
+ /* resurrect the refcount from the dead. */
+ dqp->q_lockref.count = 0;
+ goto out_funlock;
}
if (!bp)
goto out_funlock;
xfs_qm_dqdestroy(dqp);
return 0;
-
-out_unlock:
- mutex_unlock(&dqp->q_qlock);
- return error;
}
/*
struct xfs_qm_isolate *isol = arg;
enum lru_status ret = LRU_SKIP;
- if (!mutex_trylock(&dqp->q_qlock))
+ if (!spin_trylock(&dqp->q_lockref.lock))
goto out_miss_busy;
/*
* from the LRU, leave it for the freeing task to complete the freeing
* process rather than risk it being free from under us here.
*/
- if (dqp->q_flags & XFS_DQFLAG_FREEING)
+ if (__lockref_is_dead(&dqp->q_lockref))
goto out_miss_unlock;
/*
* again.
*/
ret = LRU_ROTATE;
- if (XFS_DQ_IS_DIRTY(dqp) || atomic_read(&dqp->q_pincount) > 0) {
+ if (XFS_DQ_IS_DIRTY(dqp) || atomic_read(&dqp->q_pincount) > 0)
goto out_miss_unlock;
- }
/*
* This dquot has acquired a reference in the meantime remove it from
* the freelist and try again.
*/
- if (dqp->q_nrefs) {
- mutex_unlock(&dqp->q_qlock);
+ if (dqp->q_lockref.count) {
+ spin_unlock(&dqp->q_lockref.lock);
XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
trace_xfs_dqreclaim_want(dqp);
/*
* Prevent lookups now that we are past the point of no return.
*/
- dqp->q_flags |= XFS_DQFLAG_FREEING;
- mutex_unlock(&dqp->q_qlock);
+ lockref_mark_dead(&dqp->q_lockref);
+ spin_unlock(&dqp->q_lockref.lock);
- ASSERT(dqp->q_nrefs == 0);
list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
trace_xfs_dqreclaim_done(dqp);
return LRU_REMOVED;
out_miss_unlock:
- mutex_unlock(&dqp->q_qlock);
+ spin_unlock(&dqp->q_lockref.lock);
out_miss_busy:
trace_xfs_dqreclaim_busy(dqp);
XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
struct xfs_buf *bp = NULL;
int error = 0;
+ if (!lockref_get_not_dead(&dqp->q_lockref))
+ return 0;
+
mutex_lock(&dqp->q_qlock);
- if (dqp->q_flags & XFS_DQFLAG_FREEING)
- goto out_unlock;
if (!XFS_DQ_IS_DIRTY(dqp))
goto out_unlock;
xfs_buf_delwri_queue(bp, buffer_list);
xfs_buf_relse(bp);
out_unlock:
- mutex_unlock(&dqp->q_qlock);
+ xfs_qm_dqput(dqp);
return error;
}