return LRU_SKIP;
/*
- * Decrement the b_lru_ref count unless the value is already
- * zero. If the value is already zero, we need to reclaim the
- * buffer, otherwise it gets another trip through the LRU.
+ * If the buffer is in use, remove it from the LRU for now. We can't
+ * free it while someone is using it, and we should also not count
+ * eviction passed for it, just as if it hadn't been added to the LRU
+ * yet.
*/
- if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
+ if (bp->b_lockref.count > 0) {
+ list_lru_isolate(lru, &bp->b_lru);
spin_unlock(&bp->b_lockref.lock);
- return LRU_ROTATE;
+ return LRU_REMOVED;
}
/*
- * If the buffer is in use, remove it from the LRU for now as we can't
- * free it. It will be freed when the last reference drops.
+ * Decrement the b_lru_ref count unless the value is already
+ * zero. If the value is already zero, we need to reclaim the
+ * buffer, otherwise it gets another trip through the LRU.
*/
- if (bp->b_lockref.count > 0) {
- list_lru_isolate(lru, &bp->b_lru);
+ if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
spin_unlock(&bp->b_lockref.lock);
- return LRU_REMOVED;
+ return LRU_ROTATE;
}
lockref_mark_dead(&bp->b_lockref);