return dm_hash_locks_index(block, num_locks);
}
-static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
+/* Get the buffer tree in the cache for the given block. Doesn't lock it. */
+static inline struct buffer_tree *cache_get_tree(struct dm_buffer_cache *bc,
+ sector_t block)
+{
+ return &bc->trees[cache_index(block, bc->num_locks)];
+}
+
+/* Lock the given buffer tree in the cache for reading. */
+static inline void cache_read_lock(struct dm_buffer_cache *bc,
+ struct buffer_tree *tree)
{
if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
- read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
+ read_lock_bh(&tree->u.spinlock);
else
- down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ down_read(&tree->u.lock);
}
-static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
+/* Unlock the given buffer tree in the cache for reading. */
+static inline void cache_read_unlock(struct dm_buffer_cache *bc,
+ struct buffer_tree *tree)
{
if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
- read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
+ read_unlock_bh(&tree->u.spinlock);
else
- up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ up_read(&tree->u.lock);
}
-static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
+/* Lock the given buffer tree in the cache for writing. */
+static inline void cache_write_lock(struct dm_buffer_cache *bc,
+ struct buffer_tree *tree)
{
if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
- write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
+ write_lock_bh(&tree->u.spinlock);
else
- down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ down_write(&tree->u.lock);
}
-static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
+/* Unlock the given buffer tree in the cache for writing. */
+static inline void cache_write_unlock(struct dm_buffer_cache *bc,
+ struct buffer_tree *tree)
{
if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
- write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
+ write_unlock_bh(&tree->u.spinlock);
else
- up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
+ up_write(&tree->u.lock);
}
/*
WRITE_ONCE(b->last_accessed, jiffies);
}
-static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block)
+static struct dm_buffer *cache_get(struct dm_buffer_cache *bc,
+ struct buffer_tree *tree, sector_t block)
{
struct dm_buffer *b;
- cache_read_lock(bc, block);
- b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block);
+ /* Assuming tree == cache_get_tree(bc, block) */
+ cache_read_lock(bc, tree);
+ b = __cache_get(&tree->root, block);
if (b) {
lru_reference(&b->lru);
__cache_inc_buffer(b);
}
- cache_read_unlock(bc, block);
+ cache_read_unlock(bc, tree);
return b;
}
b = le_to_buffer(le);
/* __evict_pred will have locked the appropriate tree. */
- rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
+ rb_erase(&b->node, &cache_get_tree(bc, b->block)->root);
return b;
}
/*
* Mark a buffer as clean or dirty. Not threadsafe.
*/
-static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode)
+static void cache_mark(struct dm_buffer_cache *bc, struct buffer_tree *tree,
+ struct dm_buffer *b, int list_mode)
{
- cache_write_lock(bc, b->block);
+ /* Assuming tree == cache_get_tree(bc, b->block) */
+ cache_write_lock(bc, tree);
if (list_mode != b->list_mode) {
lru_remove(&bc->lru[b->list_mode], &b->lru);
b->list_mode = list_mode;
lru_insert(&bc->lru[b->list_mode], &b->lru);
}
- cache_write_unlock(bc, b->block);
+ cache_write_unlock(bc, tree);
}
/*--------------*/
return true;
}
-static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b)
+static bool cache_insert(struct dm_buffer_cache *bc, struct buffer_tree *tree,
+ struct dm_buffer *b)
{
bool r;
if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE))
return false;
- cache_write_lock(bc, b->block);
+ /* Assuming tree == cache_get_tree(bc, b->block) */
+ cache_write_lock(bc, tree);
BUG_ON(atomic_read(&b->hold_count) != 1);
- r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b);
+ r = __cache_insert(&tree->root, b);
if (r)
lru_insert(&bc->lru[b->list_mode], &b->lru);
- cache_write_unlock(bc, b->block);
+ cache_write_unlock(bc, tree);
return r;
}
*
* Not threadsafe.
*/
-static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b)
+static bool cache_remove(struct dm_buffer_cache *bc, struct buffer_tree *tree,
+ struct dm_buffer *b)
{
bool r;
- cache_write_lock(bc, b->block);
+ /* Assuming tree == cache_get_tree(bc, b->block) */
+ cache_write_lock(bc, tree);
if (atomic_read(&b->hold_count) != 1) {
r = false;
} else {
r = true;
- rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
+ rb_erase(&b->node, &tree->root);
lru_remove(&bc->lru[b->list_mode], &b->lru);
}
- cache_write_unlock(bc, b->block);
+ cache_write_unlock(bc, tree);
return r;
}
*--------------------------------------------------------------
*/
-static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b)
+static void cache_put_and_wake(struct dm_bufio_client *c,
+ struct buffer_tree *tree, struct dm_buffer *b)
{
bool wake;
- cache_read_lock(&c->cache, b->block);
+ /* Assuming tree == cache_get_tree(&c->cache, b->block) */
+ cache_read_lock(&c->cache, tree);
BUG_ON(!atomic_read(&b->hold_count));
wake = atomic_dec_and_test(&b->hold_count);
- cache_read_unlock(&c->cache, b->block);
+ cache_read_unlock(&c->cache, tree);
/*
* Relying on waitqueue_active() is racey, but we sleep
* This assumes you have already checked the cache to see if the buffer
* is already present (it will recheck after dropping the lock for allocation).
*/
-static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
+static struct dm_buffer *__bufio_new(struct dm_bufio_client *c,
+ struct buffer_tree *tree, sector_t block,
enum new_flag nf, int *need_submit,
struct list_head *write_list)
{
* We've had a period where the mutex was unlocked, so need to
* recheck the buffer tree.
*/
- b = cache_get(&c->cache, block);
+ b = cache_get(&c->cache, tree, block);
if (b) {
__free_buffer_wake(new_b);
goto found_buffer;
* is set. Otherwise another thread could get it and use
* it before it had been read.
*/
- cache_insert(&c->cache, b);
+ cache_insert(&c->cache, tree, b);
return b;
found_buffer:
if (nf == NF_PREFETCH) {
- cache_put_and_wake(c, b);
+ cache_put_and_wake(c, tree, b);
return NULL;
}
* the same buffer, it would deadlock if we waited.
*/
if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
- cache_put_and_wake(c, b);
+ cache_put_and_wake(c, tree, b);
return NULL;
}
enum new_flag nf, struct dm_buffer **bp,
unsigned short ioprio)
{
+ struct buffer_tree *tree;
int need_submit = 0;
struct dm_buffer *b;
* Fast path, hopefully the block is already in the cache. No need
* to get the client lock for this.
*/
- b = cache_get(&c->cache, block);
+ tree = cache_get_tree(&c->cache, block);
+ b = cache_get(&c->cache, tree, block);
if (b) {
if (nf == NF_PREFETCH) {
- cache_put_and_wake(c, b);
+ cache_put_and_wake(c, tree, b);
return NULL;
}
* the same buffer, it would deadlock if we waited.
*/
if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
- cache_put_and_wake(c, b);
+ cache_put_and_wake(c, tree, b);
return NULL;
}
}
return NULL;
dm_bufio_lock(c);
- b = __bufio_new(c, block, nf, &need_submit, &write_list);
+ b = __bufio_new(c, tree, block, nf, &need_submit, &write_list);
dm_bufio_unlock(c);
}
blk_start_plug(&plug);
for (; n_blocks--; block++) {
- int need_submit;
+ struct buffer_tree *tree;
struct dm_buffer *b;
+ int need_submit;
- b = cache_get(&c->cache, block);
+ tree = cache_get_tree(&c->cache, block);
+ b = cache_get(&c->cache, tree, block);
if (b) {
/* already in cache */
- cache_put_and_wake(c, b);
+ cache_put_and_wake(c, tree, b);
continue;
}
dm_bufio_lock(c);
- b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
+ b = __bufio_new(c, tree, block, NF_PREFETCH, &need_submit,
&write_list);
if (unlikely(!list_empty(&write_list))) {
dm_bufio_unlock(c);
void dm_bufio_release(struct dm_buffer *b)
{
struct dm_bufio_client *c = b->c;
+ struct buffer_tree *tree = cache_get_tree(&c->cache, b->block);
/*
* If there were errors on the buffer, and the buffer is not
dm_bufio_lock(c);
/* cache remove can fail if there are other holders */
- if (cache_remove(&c->cache, b)) {
+ if (cache_remove(&c->cache, tree, b)) {
__free_buffer_wake(b);
dm_bufio_unlock(c);
return;
dm_bufio_unlock(c);
}
- cache_put_and_wake(c, b);
+ cache_put_and_wake(c, tree, b);
}
EXPORT_SYMBOL_GPL(dm_bufio_release);
if (!test_and_set_bit(B_DIRTY, &b->state)) {
b->dirty_start = start;
b->dirty_end = end;
- cache_mark(&c->cache, b, LIST_DIRTY);
+ cache_mark(&c->cache, cache_get_tree(&c->cache, b->block), b,
+ LIST_DIRTY);
} else {
if (start < b->dirty_start)
b->dirty_start = start;
lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it);
while ((e = lru_iter_next(&it, is_writing, c))) {
struct dm_buffer *b = le_to_buffer(e);
+ struct buffer_tree *tree;
__cache_inc_buffer(b);
BUG_ON(test_bit(B_READING, &b->state));
wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
}
+ tree = cache_get_tree(&c->cache, b->block);
+
if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state))
- cache_mark(&c->cache, b, LIST_CLEAN);
+ cache_mark(&c->cache, tree, b, LIST_CLEAN);
- cache_put_and_wake(c, b);
+ cache_put_and_wake(c, tree, b);
cond_resched();
}
static void forget_buffer(struct dm_bufio_client *c, sector_t block)
{
+ struct buffer_tree *tree = cache_get_tree(&c->cache, block);
struct dm_buffer *b;
- b = cache_get(&c->cache, block);
+ b = cache_get(&c->cache, tree, block);
if (b) {
if (likely(!smp_load_acquire(&b->state))) {
- if (cache_remove(&c->cache, b))
+ if (cache_remove(&c->cache, tree, b))
__free_buffer_wake(b);
else
- cache_put_and_wake(c, b);
+ cache_put_and_wake(c, tree, b);
} else {
- cache_put_and_wake(c, b);
+ cache_put_and_wake(c, tree, b);
}
}
}