]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - drivers/md/dm-bufio.c
block_dev: get rid of blksize bits calculation
[thirdparty/kernel/linux.git] / drivers / md / dm-bufio.c
1 /*
2 * Copyright (C) 2009-2011 Red Hat, Inc.
3 *
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
5 *
6 * This file is released under the GPL.
7 */
8
9 #include "dm-bufio.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/jiffies.h>
15 #include <linux/vmalloc.h>
16 #include <linux/shrinker.h>
17 #include <linux/module.h>
18 #include <linux/rbtree.h>
19 #include <linux/stacktrace.h>
20
21 #define DM_MSG_PREFIX "bufio"
22
23 /*
24 * Memory management policy:
25 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
26 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
27 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
28 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
29 * dirty buffers.
30 */
31 #define DM_BUFIO_MIN_BUFFERS 8
32
33 #define DM_BUFIO_MEMORY_PERCENT 2
34 #define DM_BUFIO_VMALLOC_PERCENT 25
35 #define DM_BUFIO_WRITEBACK_PERCENT 75
36
37 /*
38 * Check buffer ages in this interval (seconds)
39 */
40 #define DM_BUFIO_WORK_TIMER_SECS 30
41
42 /*
43 * Free buffers when they are older than this (seconds)
44 */
45 #define DM_BUFIO_DEFAULT_AGE_SECS 300
46
47 /*
48 * The nr of bytes of cached data to keep around.
49 */
50 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
51
52 /*
53 * The number of bvec entries that are embedded directly in the buffer.
54 * If the chunk size is larger, dm-io is used to do the io.
55 */
56 #define DM_BUFIO_INLINE_VECS 16
57
58 /*
59 * Don't try to use kmem_cache_alloc for blocks larger than this.
60 * For explanation, see alloc_buffer_data below.
61 */
62 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
63 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
64
65 /*
66 * dm_buffer->list_mode
67 */
68 #define LIST_CLEAN 0
69 #define LIST_DIRTY 1
70 #define LIST_SIZE 2
71
72 /*
73 * Linking of buffers:
74 * All buffers are linked to cache_hash with their hash_list field.
75 *
76 * Clean buffers that are not being written (B_WRITING not set)
77 * are linked to lru[LIST_CLEAN] with their lru_list field.
78 *
79 * Dirty and clean buffers that are being written are linked to
80 * lru[LIST_DIRTY] with their lru_list field. When the write
81 * finishes, the buffer cannot be relinked immediately (because we
82 * are in an interrupt context and relinking requires process
83 * context), so some clean-not-writing buffers can be held on
84 * dirty_lru too. They are later added to lru in the process
85 * context.
86 */
87 struct dm_bufio_client {
88 struct mutex lock;
89
90 struct list_head lru[LIST_SIZE];
91 unsigned long n_buffers[LIST_SIZE];
92
93 struct block_device *bdev;
94 unsigned block_size;
95 unsigned char sectors_per_block_bits;
96 unsigned char pages_per_block_bits;
97 unsigned char blocks_per_page_bits;
98 unsigned aux_size;
99 void (*alloc_callback)(struct dm_buffer *);
100 void (*write_callback)(struct dm_buffer *);
101
102 struct dm_io_client *dm_io;
103
104 struct list_head reserved_buffers;
105 unsigned need_reserved_buffers;
106
107 unsigned minimum_buffers;
108
109 struct rb_root buffer_tree;
110 wait_queue_head_t free_buffer_wait;
111
112 int async_write_error;
113
114 struct list_head client_list;
115 struct shrinker shrinker;
116 };
117
118 /*
119 * Buffer state bits.
120 */
121 #define B_READING 0
122 #define B_WRITING 1
123 #define B_DIRTY 2
124
125 /*
126 * Describes how the block was allocated:
127 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
128 * See the comment at alloc_buffer_data.
129 */
130 enum data_mode {
131 DATA_MODE_SLAB = 0,
132 DATA_MODE_GET_FREE_PAGES = 1,
133 DATA_MODE_VMALLOC = 2,
134 DATA_MODE_LIMIT = 3
135 };
136
137 struct dm_buffer {
138 struct rb_node node;
139 struct list_head lru_list;
140 sector_t block;
141 void *data;
142 enum data_mode data_mode;
143 unsigned char list_mode; /* LIST_* */
144 unsigned hold_count;
145 int read_error;
146 int write_error;
147 unsigned long state;
148 unsigned long last_accessed;
149 struct dm_bufio_client *c;
150 struct list_head write_list;
151 struct bio bio;
152 struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
153 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
154 #define MAX_STACK 10
155 struct stack_trace stack_trace;
156 unsigned long stack_entries[MAX_STACK];
157 #endif
158 };
159
160 /*----------------------------------------------------------------*/
161
162 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
163 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
164
165 static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
166 {
167 unsigned ret = c->blocks_per_page_bits - 1;
168
169 BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
170
171 return ret;
172 }
173
174 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
175 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
176
177 #define dm_bufio_in_request() (!!current->bio_list)
178
179 static void dm_bufio_lock(struct dm_bufio_client *c)
180 {
181 mutex_lock_nested(&c->lock, dm_bufio_in_request());
182 }
183
184 static int dm_bufio_trylock(struct dm_bufio_client *c)
185 {
186 return mutex_trylock(&c->lock);
187 }
188
189 static void dm_bufio_unlock(struct dm_bufio_client *c)
190 {
191 mutex_unlock(&c->lock);
192 }
193
194 /*----------------------------------------------------------------*/
195
196 /*
197 * Default cache size: available memory divided by the ratio.
198 */
199 static unsigned long dm_bufio_default_cache_size;
200
201 /*
202 * Total cache size set by the user.
203 */
204 static unsigned long dm_bufio_cache_size;
205
206 /*
207 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
208 * at any time. If it disagrees, the user has changed cache size.
209 */
210 static unsigned long dm_bufio_cache_size_latch;
211
212 static DEFINE_SPINLOCK(param_spinlock);
213
214 /*
215 * Buffers are freed after this timeout
216 */
217 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
218 static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
219
220 static unsigned long dm_bufio_peak_allocated;
221 static unsigned long dm_bufio_allocated_kmem_cache;
222 static unsigned long dm_bufio_allocated_get_free_pages;
223 static unsigned long dm_bufio_allocated_vmalloc;
224 static unsigned long dm_bufio_current_allocated;
225
226 /*----------------------------------------------------------------*/
227
228 /*
229 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
230 */
231 static unsigned long dm_bufio_cache_size_per_client;
232
233 /*
234 * The current number of clients.
235 */
236 static int dm_bufio_client_count;
237
238 /*
239 * The list of all clients.
240 */
241 static LIST_HEAD(dm_bufio_all_clients);
242
243 /*
244 * This mutex protects dm_bufio_cache_size_latch,
245 * dm_bufio_cache_size_per_client and dm_bufio_client_count
246 */
247 static DEFINE_MUTEX(dm_bufio_clients_lock);
248
249 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
250 static void buffer_record_stack(struct dm_buffer *b)
251 {
252 b->stack_trace.nr_entries = 0;
253 b->stack_trace.max_entries = MAX_STACK;
254 b->stack_trace.entries = b->stack_entries;
255 b->stack_trace.skip = 2;
256 save_stack_trace(&b->stack_trace);
257 }
258 #endif
259
260 /*----------------------------------------------------------------
261 * A red/black tree acts as an index for all the buffers.
262 *--------------------------------------------------------------*/
263 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
264 {
265 struct rb_node *n = c->buffer_tree.rb_node;
266 struct dm_buffer *b;
267
268 while (n) {
269 b = container_of(n, struct dm_buffer, node);
270
271 if (b->block == block)
272 return b;
273
274 n = (b->block < block) ? n->rb_left : n->rb_right;
275 }
276
277 return NULL;
278 }
279
280 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
281 {
282 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
283 struct dm_buffer *found;
284
285 while (*new) {
286 found = container_of(*new, struct dm_buffer, node);
287
288 if (found->block == b->block) {
289 BUG_ON(found != b);
290 return;
291 }
292
293 parent = *new;
294 new = (found->block < b->block) ?
295 &((*new)->rb_left) : &((*new)->rb_right);
296 }
297
298 rb_link_node(&b->node, parent, new);
299 rb_insert_color(&b->node, &c->buffer_tree);
300 }
301
302 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
303 {
304 rb_erase(&b->node, &c->buffer_tree);
305 }
306
307 /*----------------------------------------------------------------*/
308
309 static void adjust_total_allocated(enum data_mode data_mode, long diff)
310 {
311 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
312 &dm_bufio_allocated_kmem_cache,
313 &dm_bufio_allocated_get_free_pages,
314 &dm_bufio_allocated_vmalloc,
315 };
316
317 spin_lock(&param_spinlock);
318
319 *class_ptr[data_mode] += diff;
320
321 dm_bufio_current_allocated += diff;
322
323 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
324 dm_bufio_peak_allocated = dm_bufio_current_allocated;
325
326 spin_unlock(&param_spinlock);
327 }
328
329 /*
330 * Change the number of clients and recalculate per-client limit.
331 */
332 static void __cache_size_refresh(void)
333 {
334 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
335 BUG_ON(dm_bufio_client_count < 0);
336
337 dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
338
339 /*
340 * Use default if set to 0 and report the actual cache size used.
341 */
342 if (!dm_bufio_cache_size_latch) {
343 (void)cmpxchg(&dm_bufio_cache_size, 0,
344 dm_bufio_default_cache_size);
345 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
346 }
347
348 dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
349 (dm_bufio_client_count ? : 1);
350 }
351
352 /*
353 * Allocating buffer data.
354 *
355 * Small buffers are allocated with kmem_cache, to use space optimally.
356 *
357 * For large buffers, we choose between get_free_pages and vmalloc.
358 * Each has advantages and disadvantages.
359 *
360 * __get_free_pages can randomly fail if the memory is fragmented.
361 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
362 * as low as 128M) so using it for caching is not appropriate.
363 *
364 * If the allocation may fail we use __get_free_pages. Memory fragmentation
365 * won't have a fatal effect here, but it just causes flushes of some other
366 * buffers and more I/O will be performed. Don't use __get_free_pages if it
367 * always fails (i.e. order >= MAX_ORDER).
368 *
369 * If the allocation shouldn't fail we use __vmalloc. This is only for the
370 * initial reserve allocation, so there's no risk of wasting all vmalloc
371 * space.
372 */
373 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
374 enum data_mode *data_mode)
375 {
376 unsigned noio_flag;
377 void *ptr;
378
379 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
380 *data_mode = DATA_MODE_SLAB;
381 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
382 }
383
384 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
385 gfp_mask & __GFP_NORETRY) {
386 *data_mode = DATA_MODE_GET_FREE_PAGES;
387 return (void *)__get_free_pages(gfp_mask,
388 c->pages_per_block_bits);
389 }
390
391 *data_mode = DATA_MODE_VMALLOC;
392
393 /*
394 * __vmalloc allocates the data pages and auxiliary structures with
395 * gfp_flags that were specified, but pagetables are always allocated
396 * with GFP_KERNEL, no matter what was specified as gfp_mask.
397 *
398 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
399 * all allocations done by this process (including pagetables) are done
400 * as if GFP_NOIO was specified.
401 */
402
403 if (gfp_mask & __GFP_NORETRY)
404 noio_flag = memalloc_noio_save();
405
406 ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL);
407
408 if (gfp_mask & __GFP_NORETRY)
409 memalloc_noio_restore(noio_flag);
410
411 return ptr;
412 }
413
414 /*
415 * Free buffer's data.
416 */
417 static void free_buffer_data(struct dm_bufio_client *c,
418 void *data, enum data_mode data_mode)
419 {
420 switch (data_mode) {
421 case DATA_MODE_SLAB:
422 kmem_cache_free(DM_BUFIO_CACHE(c), data);
423 break;
424
425 case DATA_MODE_GET_FREE_PAGES:
426 free_pages((unsigned long)data, c->pages_per_block_bits);
427 break;
428
429 case DATA_MODE_VMALLOC:
430 vfree(data);
431 break;
432
433 default:
434 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
435 data_mode);
436 BUG();
437 }
438 }
439
440 /*
441 * Allocate buffer and its data.
442 */
443 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
444 {
445 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
446 gfp_mask);
447
448 if (!b)
449 return NULL;
450
451 b->c = c;
452
453 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
454 if (!b->data) {
455 kfree(b);
456 return NULL;
457 }
458
459 adjust_total_allocated(b->data_mode, (long)c->block_size);
460
461 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
462 memset(&b->stack_trace, 0, sizeof(b->stack_trace));
463 #endif
464 return b;
465 }
466
467 /*
468 * Free buffer and its data.
469 */
470 static void free_buffer(struct dm_buffer *b)
471 {
472 struct dm_bufio_client *c = b->c;
473
474 adjust_total_allocated(b->data_mode, -(long)c->block_size);
475
476 free_buffer_data(c, b->data, b->data_mode);
477 kfree(b);
478 }
479
480 /*
481 * Link buffer to the hash list and clean or dirty queue.
482 */
483 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
484 {
485 struct dm_bufio_client *c = b->c;
486
487 c->n_buffers[dirty]++;
488 b->block = block;
489 b->list_mode = dirty;
490 list_add(&b->lru_list, &c->lru[dirty]);
491 __insert(b->c, b);
492 b->last_accessed = jiffies;
493 }
494
495 /*
496 * Unlink buffer from the hash list and dirty or clean queue.
497 */
498 static void __unlink_buffer(struct dm_buffer *b)
499 {
500 struct dm_bufio_client *c = b->c;
501
502 BUG_ON(!c->n_buffers[b->list_mode]);
503
504 c->n_buffers[b->list_mode]--;
505 __remove(b->c, b);
506 list_del(&b->lru_list);
507 }
508
509 /*
510 * Place the buffer to the head of dirty or clean LRU queue.
511 */
512 static void __relink_lru(struct dm_buffer *b, int dirty)
513 {
514 struct dm_bufio_client *c = b->c;
515
516 BUG_ON(!c->n_buffers[b->list_mode]);
517
518 c->n_buffers[b->list_mode]--;
519 c->n_buffers[dirty]++;
520 b->list_mode = dirty;
521 list_move(&b->lru_list, &c->lru[dirty]);
522 b->last_accessed = jiffies;
523 }
524
525 /*----------------------------------------------------------------
526 * Submit I/O on the buffer.
527 *
528 * Bio interface is faster but it has some problems:
529 * the vector list is limited (increasing this limit increases
530 * memory-consumption per buffer, so it is not viable);
531 *
532 * the memory must be direct-mapped, not vmalloced;
533 *
534 * the I/O driver can reject requests spuriously if it thinks that
535 * the requests are too big for the device or if they cross a
536 * controller-defined memory boundary.
537 *
538 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
539 * it is not vmalloced, try using the bio interface.
540 *
541 * If the buffer is big, if it is vmalloced or if the underlying device
542 * rejects the bio because it is too large, use dm-io layer to do the I/O.
543 * The dm-io layer splits the I/O into multiple requests, avoiding the above
544 * shortcomings.
545 *--------------------------------------------------------------*/
546
547 /*
548 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
549 * that the request was handled directly with bio interface.
550 */
551 static void dmio_complete(unsigned long error, void *context)
552 {
553 struct dm_buffer *b = context;
554
555 b->bio.bi_error = error ? -EIO : 0;
556 b->bio.bi_end_io(&b->bio);
557 }
558
559 static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
560 bio_end_io_t *end_io)
561 {
562 int r;
563 struct dm_io_request io_req = {
564 .bi_op = rw,
565 .bi_op_flags = 0,
566 .notify.fn = dmio_complete,
567 .notify.context = b,
568 .client = b->c->dm_io,
569 };
570 struct dm_io_region region = {
571 .bdev = b->c->bdev,
572 .sector = block << b->c->sectors_per_block_bits,
573 .count = b->c->block_size >> SECTOR_SHIFT,
574 };
575
576 if (b->data_mode != DATA_MODE_VMALLOC) {
577 io_req.mem.type = DM_IO_KMEM;
578 io_req.mem.ptr.addr = b->data;
579 } else {
580 io_req.mem.type = DM_IO_VMA;
581 io_req.mem.ptr.vma = b->data;
582 }
583
584 b->bio.bi_end_io = end_io;
585
586 r = dm_io(&io_req, 1, &region, NULL);
587 if (r) {
588 b->bio.bi_error = r;
589 end_io(&b->bio);
590 }
591 }
592
593 static void inline_endio(struct bio *bio)
594 {
595 bio_end_io_t *end_fn = bio->bi_private;
596 int error = bio->bi_error;
597
598 /*
599 * Reset the bio to free any attached resources
600 * (e.g. bio integrity profiles).
601 */
602 bio_reset(bio);
603
604 bio->bi_error = error;
605 end_fn(bio);
606 }
607
608 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
609 bio_end_io_t *end_io)
610 {
611 char *ptr;
612 int len;
613
614 bio_init(&b->bio);
615 b->bio.bi_io_vec = b->bio_vec;
616 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
617 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
618 b->bio.bi_bdev = b->c->bdev;
619 b->bio.bi_end_io = inline_endio;
620 /*
621 * Use of .bi_private isn't a problem here because
622 * the dm_buffer's inline bio is local to bufio.
623 */
624 b->bio.bi_private = end_io;
625 bio_set_op_attrs(&b->bio, rw, 0);
626
627 /*
628 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
629 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
630 */
631 ptr = b->data;
632 len = b->c->block_size;
633
634 if (len >= PAGE_SIZE)
635 BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
636 else
637 BUG_ON((unsigned long)ptr & (len - 1));
638
639 do {
640 if (!bio_add_page(&b->bio, virt_to_page(ptr),
641 len < PAGE_SIZE ? len : PAGE_SIZE,
642 offset_in_page(ptr))) {
643 BUG_ON(b->c->block_size <= PAGE_SIZE);
644 use_dmio(b, rw, block, end_io);
645 return;
646 }
647
648 len -= PAGE_SIZE;
649 ptr += PAGE_SIZE;
650 } while (len > 0);
651
652 submit_bio(&b->bio);
653 }
654
655 static void submit_io(struct dm_buffer *b, int rw, sector_t block,
656 bio_end_io_t *end_io)
657 {
658 if (rw == WRITE && b->c->write_callback)
659 b->c->write_callback(b);
660
661 if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
662 b->data_mode != DATA_MODE_VMALLOC)
663 use_inline_bio(b, rw, block, end_io);
664 else
665 use_dmio(b, rw, block, end_io);
666 }
667
668 /*----------------------------------------------------------------
669 * Writing dirty buffers
670 *--------------------------------------------------------------*/
671
672 /*
673 * The endio routine for write.
674 *
675 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
676 * it.
677 */
678 static void write_endio(struct bio *bio)
679 {
680 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
681
682 b->write_error = bio->bi_error;
683 if (unlikely(bio->bi_error)) {
684 struct dm_bufio_client *c = b->c;
685 int error = bio->bi_error;
686 (void)cmpxchg(&c->async_write_error, 0, error);
687 }
688
689 BUG_ON(!test_bit(B_WRITING, &b->state));
690
691 smp_mb__before_atomic();
692 clear_bit(B_WRITING, &b->state);
693 smp_mb__after_atomic();
694
695 wake_up_bit(&b->state, B_WRITING);
696 }
697
698 /*
699 * Initiate a write on a dirty buffer, but don't wait for it.
700 *
701 * - If the buffer is not dirty, exit.
702 * - If there some previous write going on, wait for it to finish (we can't
703 * have two writes on the same buffer simultaneously).
704 * - Submit our write and don't wait on it. We set B_WRITING indicating
705 * that there is a write in progress.
706 */
707 static void __write_dirty_buffer(struct dm_buffer *b,
708 struct list_head *write_list)
709 {
710 if (!test_bit(B_DIRTY, &b->state))
711 return;
712
713 clear_bit(B_DIRTY, &b->state);
714 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
715
716 if (!write_list)
717 submit_io(b, WRITE, b->block, write_endio);
718 else
719 list_add_tail(&b->write_list, write_list);
720 }
721
722 static void __flush_write_list(struct list_head *write_list)
723 {
724 struct blk_plug plug;
725 blk_start_plug(&plug);
726 while (!list_empty(write_list)) {
727 struct dm_buffer *b =
728 list_entry(write_list->next, struct dm_buffer, write_list);
729 list_del(&b->write_list);
730 submit_io(b, WRITE, b->block, write_endio);
731 cond_resched();
732 }
733 blk_finish_plug(&plug);
734 }
735
736 /*
737 * Wait until any activity on the buffer finishes. Possibly write the
738 * buffer if it is dirty. When this function finishes, there is no I/O
739 * running on the buffer and the buffer is not dirty.
740 */
741 static void __make_buffer_clean(struct dm_buffer *b)
742 {
743 BUG_ON(b->hold_count);
744
745 if (!b->state) /* fast case */
746 return;
747
748 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
749 __write_dirty_buffer(b, NULL);
750 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
751 }
752
753 /*
754 * Find some buffer that is not held by anybody, clean it, unlink it and
755 * return it.
756 */
757 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
758 {
759 struct dm_buffer *b;
760
761 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
762 BUG_ON(test_bit(B_WRITING, &b->state));
763 BUG_ON(test_bit(B_DIRTY, &b->state));
764
765 if (!b->hold_count) {
766 __make_buffer_clean(b);
767 __unlink_buffer(b);
768 return b;
769 }
770 cond_resched();
771 }
772
773 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
774 BUG_ON(test_bit(B_READING, &b->state));
775
776 if (!b->hold_count) {
777 __make_buffer_clean(b);
778 __unlink_buffer(b);
779 return b;
780 }
781 cond_resched();
782 }
783
784 return NULL;
785 }
786
787 /*
788 * Wait until some other threads free some buffer or release hold count on
789 * some buffer.
790 *
791 * This function is entered with c->lock held, drops it and regains it
792 * before exiting.
793 */
794 static void __wait_for_free_buffer(struct dm_bufio_client *c)
795 {
796 DECLARE_WAITQUEUE(wait, current);
797
798 add_wait_queue(&c->free_buffer_wait, &wait);
799 set_task_state(current, TASK_UNINTERRUPTIBLE);
800 dm_bufio_unlock(c);
801
802 io_schedule();
803
804 remove_wait_queue(&c->free_buffer_wait, &wait);
805
806 dm_bufio_lock(c);
807 }
808
809 enum new_flag {
810 NF_FRESH = 0,
811 NF_READ = 1,
812 NF_GET = 2,
813 NF_PREFETCH = 3
814 };
815
816 /*
817 * Allocate a new buffer. If the allocation is not possible, wait until
818 * some other thread frees a buffer.
819 *
820 * May drop the lock and regain it.
821 */
822 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
823 {
824 struct dm_buffer *b;
825
826 /*
827 * dm-bufio is resistant to allocation failures (it just keeps
828 * one buffer reserved in cases all the allocations fail).
829 * So set flags to not try too hard:
830 * GFP_NOIO: don't recurse into the I/O layer
831 * __GFP_NORETRY: don't retry and rather return failure
832 * __GFP_NOMEMALLOC: don't use emergency reserves
833 * __GFP_NOWARN: don't print a warning in case of failure
834 *
835 * For debugging, if we set the cache size to 1, no new buffers will
836 * be allocated.
837 */
838 while (1) {
839 if (dm_bufio_cache_size_latch != 1) {
840 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
841 if (b)
842 return b;
843 }
844
845 if (nf == NF_PREFETCH)
846 return NULL;
847
848 if (!list_empty(&c->reserved_buffers)) {
849 b = list_entry(c->reserved_buffers.next,
850 struct dm_buffer, lru_list);
851 list_del(&b->lru_list);
852 c->need_reserved_buffers++;
853
854 return b;
855 }
856
857 b = __get_unclaimed_buffer(c);
858 if (b)
859 return b;
860
861 __wait_for_free_buffer(c);
862 }
863 }
864
865 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
866 {
867 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
868
869 if (!b)
870 return NULL;
871
872 if (c->alloc_callback)
873 c->alloc_callback(b);
874
875 return b;
876 }
877
878 /*
879 * Free a buffer and wake other threads waiting for free buffers.
880 */
881 static void __free_buffer_wake(struct dm_buffer *b)
882 {
883 struct dm_bufio_client *c = b->c;
884
885 if (!c->need_reserved_buffers)
886 free_buffer(b);
887 else {
888 list_add(&b->lru_list, &c->reserved_buffers);
889 c->need_reserved_buffers--;
890 }
891
892 wake_up(&c->free_buffer_wait);
893 }
894
895 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
896 struct list_head *write_list)
897 {
898 struct dm_buffer *b, *tmp;
899
900 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
901 BUG_ON(test_bit(B_READING, &b->state));
902
903 if (!test_bit(B_DIRTY, &b->state) &&
904 !test_bit(B_WRITING, &b->state)) {
905 __relink_lru(b, LIST_CLEAN);
906 continue;
907 }
908
909 if (no_wait && test_bit(B_WRITING, &b->state))
910 return;
911
912 __write_dirty_buffer(b, write_list);
913 cond_resched();
914 }
915 }
916
917 /*
918 * Get writeback threshold and buffer limit for a given client.
919 */
920 static void __get_memory_limit(struct dm_bufio_client *c,
921 unsigned long *threshold_buffers,
922 unsigned long *limit_buffers)
923 {
924 unsigned long buffers;
925
926 if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
927 mutex_lock(&dm_bufio_clients_lock);
928 __cache_size_refresh();
929 mutex_unlock(&dm_bufio_clients_lock);
930 }
931
932 buffers = dm_bufio_cache_size_per_client >>
933 (c->sectors_per_block_bits + SECTOR_SHIFT);
934
935 if (buffers < c->minimum_buffers)
936 buffers = c->minimum_buffers;
937
938 *limit_buffers = buffers;
939 *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
940 }
941
942 /*
943 * Check if we're over watermark.
944 * If we are over threshold_buffers, start freeing buffers.
945 * If we're over "limit_buffers", block until we get under the limit.
946 */
947 static void __check_watermark(struct dm_bufio_client *c,
948 struct list_head *write_list)
949 {
950 unsigned long threshold_buffers, limit_buffers;
951
952 __get_memory_limit(c, &threshold_buffers, &limit_buffers);
953
954 while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
955 limit_buffers) {
956
957 struct dm_buffer *b = __get_unclaimed_buffer(c);
958
959 if (!b)
960 return;
961
962 __free_buffer_wake(b);
963 cond_resched();
964 }
965
966 if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
967 __write_dirty_buffers_async(c, 1, write_list);
968 }
969
970 /*----------------------------------------------------------------
971 * Getting a buffer
972 *--------------------------------------------------------------*/
973
974 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
975 enum new_flag nf, int *need_submit,
976 struct list_head *write_list)
977 {
978 struct dm_buffer *b, *new_b = NULL;
979
980 *need_submit = 0;
981
982 b = __find(c, block);
983 if (b)
984 goto found_buffer;
985
986 if (nf == NF_GET)
987 return NULL;
988
989 new_b = __alloc_buffer_wait(c, nf);
990 if (!new_b)
991 return NULL;
992
993 /*
994 * We've had a period where the mutex was unlocked, so need to
995 * recheck the hash table.
996 */
997 b = __find(c, block);
998 if (b) {
999 __free_buffer_wake(new_b);
1000 goto found_buffer;
1001 }
1002
1003 __check_watermark(c, write_list);
1004
1005 b = new_b;
1006 b->hold_count = 1;
1007 b->read_error = 0;
1008 b->write_error = 0;
1009 __link_buffer(b, block, LIST_CLEAN);
1010
1011 if (nf == NF_FRESH) {
1012 b->state = 0;
1013 return b;
1014 }
1015
1016 b->state = 1 << B_READING;
1017 *need_submit = 1;
1018
1019 return b;
1020
1021 found_buffer:
1022 if (nf == NF_PREFETCH)
1023 return NULL;
1024 /*
1025 * Note: it is essential that we don't wait for the buffer to be
1026 * read if dm_bufio_get function is used. Both dm_bufio_get and
1027 * dm_bufio_prefetch can be used in the driver request routine.
1028 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1029 * the same buffer, it would deadlock if we waited.
1030 */
1031 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1032 return NULL;
1033
1034 b->hold_count++;
1035 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1036 test_bit(B_WRITING, &b->state));
1037 return b;
1038 }
1039
1040 /*
1041 * The endio routine for reading: set the error, clear the bit and wake up
1042 * anyone waiting on the buffer.
1043 */
1044 static void read_endio(struct bio *bio)
1045 {
1046 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
1047
1048 b->read_error = bio->bi_error;
1049
1050 BUG_ON(!test_bit(B_READING, &b->state));
1051
1052 smp_mb__before_atomic();
1053 clear_bit(B_READING, &b->state);
1054 smp_mb__after_atomic();
1055
1056 wake_up_bit(&b->state, B_READING);
1057 }
1058
1059 /*
1060 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1061 * functions is similar except that dm_bufio_new doesn't read the
1062 * buffer from the disk (assuming that the caller overwrites all the data
1063 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1064 */
1065 static void *new_read(struct dm_bufio_client *c, sector_t block,
1066 enum new_flag nf, struct dm_buffer **bp)
1067 {
1068 int need_submit;
1069 struct dm_buffer *b;
1070
1071 LIST_HEAD(write_list);
1072
1073 dm_bufio_lock(c);
1074 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1075 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1076 if (b && b->hold_count == 1)
1077 buffer_record_stack(b);
1078 #endif
1079 dm_bufio_unlock(c);
1080
1081 __flush_write_list(&write_list);
1082
1083 if (!b)
1084 return NULL;
1085
1086 if (need_submit)
1087 submit_io(b, READ, b->block, read_endio);
1088
1089 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1090
1091 if (b->read_error) {
1092 int error = b->read_error;
1093
1094 dm_bufio_release(b);
1095
1096 return ERR_PTR(error);
1097 }
1098
1099 *bp = b;
1100
1101 return b->data;
1102 }
1103
1104 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1105 struct dm_buffer **bp)
1106 {
1107 return new_read(c, block, NF_GET, bp);
1108 }
1109 EXPORT_SYMBOL_GPL(dm_bufio_get);
1110
1111 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1112 struct dm_buffer **bp)
1113 {
1114 BUG_ON(dm_bufio_in_request());
1115
1116 return new_read(c, block, NF_READ, bp);
1117 }
1118 EXPORT_SYMBOL_GPL(dm_bufio_read);
1119
1120 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1121 struct dm_buffer **bp)
1122 {
1123 BUG_ON(dm_bufio_in_request());
1124
1125 return new_read(c, block, NF_FRESH, bp);
1126 }
1127 EXPORT_SYMBOL_GPL(dm_bufio_new);
1128
1129 void dm_bufio_prefetch(struct dm_bufio_client *c,
1130 sector_t block, unsigned n_blocks)
1131 {
1132 struct blk_plug plug;
1133
1134 LIST_HEAD(write_list);
1135
1136 BUG_ON(dm_bufio_in_request());
1137
1138 blk_start_plug(&plug);
1139 dm_bufio_lock(c);
1140
1141 for (; n_blocks--; block++) {
1142 int need_submit;
1143 struct dm_buffer *b;
1144 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1145 &write_list);
1146 if (unlikely(!list_empty(&write_list))) {
1147 dm_bufio_unlock(c);
1148 blk_finish_plug(&plug);
1149 __flush_write_list(&write_list);
1150 blk_start_plug(&plug);
1151 dm_bufio_lock(c);
1152 }
1153 if (unlikely(b != NULL)) {
1154 dm_bufio_unlock(c);
1155
1156 if (need_submit)
1157 submit_io(b, READ, b->block, read_endio);
1158 dm_bufio_release(b);
1159
1160 cond_resched();
1161
1162 if (!n_blocks)
1163 goto flush_plug;
1164 dm_bufio_lock(c);
1165 }
1166 }
1167
1168 dm_bufio_unlock(c);
1169
1170 flush_plug:
1171 blk_finish_plug(&plug);
1172 }
1173 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1174
1175 void dm_bufio_release(struct dm_buffer *b)
1176 {
1177 struct dm_bufio_client *c = b->c;
1178
1179 dm_bufio_lock(c);
1180
1181 BUG_ON(!b->hold_count);
1182
1183 b->hold_count--;
1184 if (!b->hold_count) {
1185 wake_up(&c->free_buffer_wait);
1186
1187 /*
1188 * If there were errors on the buffer, and the buffer is not
1189 * to be written, free the buffer. There is no point in caching
1190 * invalid buffer.
1191 */
1192 if ((b->read_error || b->write_error) &&
1193 !test_bit(B_READING, &b->state) &&
1194 !test_bit(B_WRITING, &b->state) &&
1195 !test_bit(B_DIRTY, &b->state)) {
1196 __unlink_buffer(b);
1197 __free_buffer_wake(b);
1198 }
1199 }
1200
1201 dm_bufio_unlock(c);
1202 }
1203 EXPORT_SYMBOL_GPL(dm_bufio_release);
1204
1205 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1206 {
1207 struct dm_bufio_client *c = b->c;
1208
1209 dm_bufio_lock(c);
1210
1211 BUG_ON(test_bit(B_READING, &b->state));
1212
1213 if (!test_and_set_bit(B_DIRTY, &b->state))
1214 __relink_lru(b, LIST_DIRTY);
1215
1216 dm_bufio_unlock(c);
1217 }
1218 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1219
1220 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1221 {
1222 LIST_HEAD(write_list);
1223
1224 BUG_ON(dm_bufio_in_request());
1225
1226 dm_bufio_lock(c);
1227 __write_dirty_buffers_async(c, 0, &write_list);
1228 dm_bufio_unlock(c);
1229 __flush_write_list(&write_list);
1230 }
1231 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1232
1233 /*
1234 * For performance, it is essential that the buffers are written asynchronously
1235 * and simultaneously (so that the block layer can merge the writes) and then
1236 * waited upon.
1237 *
1238 * Finally, we flush hardware disk cache.
1239 */
1240 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1241 {
1242 int a, f;
1243 unsigned long buffers_processed = 0;
1244 struct dm_buffer *b, *tmp;
1245
1246 LIST_HEAD(write_list);
1247
1248 dm_bufio_lock(c);
1249 __write_dirty_buffers_async(c, 0, &write_list);
1250 dm_bufio_unlock(c);
1251 __flush_write_list(&write_list);
1252 dm_bufio_lock(c);
1253
1254 again:
1255 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1256 int dropped_lock = 0;
1257
1258 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1259 buffers_processed++;
1260
1261 BUG_ON(test_bit(B_READING, &b->state));
1262
1263 if (test_bit(B_WRITING, &b->state)) {
1264 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1265 dropped_lock = 1;
1266 b->hold_count++;
1267 dm_bufio_unlock(c);
1268 wait_on_bit_io(&b->state, B_WRITING,
1269 TASK_UNINTERRUPTIBLE);
1270 dm_bufio_lock(c);
1271 b->hold_count--;
1272 } else
1273 wait_on_bit_io(&b->state, B_WRITING,
1274 TASK_UNINTERRUPTIBLE);
1275 }
1276
1277 if (!test_bit(B_DIRTY, &b->state) &&
1278 !test_bit(B_WRITING, &b->state))
1279 __relink_lru(b, LIST_CLEAN);
1280
1281 cond_resched();
1282
1283 /*
1284 * If we dropped the lock, the list is no longer consistent,
1285 * so we must restart the search.
1286 *
1287 * In the most common case, the buffer just processed is
1288 * relinked to the clean list, so we won't loop scanning the
1289 * same buffer again and again.
1290 *
1291 * This may livelock if there is another thread simultaneously
1292 * dirtying buffers, so we count the number of buffers walked
1293 * and if it exceeds the total number of buffers, it means that
1294 * someone is doing some writes simultaneously with us. In
1295 * this case, stop, dropping the lock.
1296 */
1297 if (dropped_lock)
1298 goto again;
1299 }
1300 wake_up(&c->free_buffer_wait);
1301 dm_bufio_unlock(c);
1302
1303 a = xchg(&c->async_write_error, 0);
1304 f = dm_bufio_issue_flush(c);
1305 if (a)
1306 return a;
1307
1308 return f;
1309 }
1310 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1311
1312 /*
1313 * Use dm-io to send and empty barrier flush the device.
1314 */
1315 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1316 {
1317 struct dm_io_request io_req = {
1318 .bi_op = REQ_OP_WRITE,
1319 .bi_op_flags = REQ_PREFLUSH,
1320 .mem.type = DM_IO_KMEM,
1321 .mem.ptr.addr = NULL,
1322 .client = c->dm_io,
1323 };
1324 struct dm_io_region io_reg = {
1325 .bdev = c->bdev,
1326 .sector = 0,
1327 .count = 0,
1328 };
1329
1330 BUG_ON(dm_bufio_in_request());
1331
1332 return dm_io(&io_req, 1, &io_reg, NULL);
1333 }
1334 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1335
1336 /*
1337 * We first delete any other buffer that may be at that new location.
1338 *
1339 * Then, we write the buffer to the original location if it was dirty.
1340 *
1341 * Then, if we are the only one who is holding the buffer, relink the buffer
1342 * in the hash queue for the new location.
1343 *
1344 * If there was someone else holding the buffer, we write it to the new
1345 * location but not relink it, because that other user needs to have the buffer
1346 * at the same place.
1347 */
1348 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1349 {
1350 struct dm_bufio_client *c = b->c;
1351 struct dm_buffer *new;
1352
1353 BUG_ON(dm_bufio_in_request());
1354
1355 dm_bufio_lock(c);
1356
1357 retry:
1358 new = __find(c, new_block);
1359 if (new) {
1360 if (new->hold_count) {
1361 __wait_for_free_buffer(c);
1362 goto retry;
1363 }
1364
1365 /*
1366 * FIXME: Is there any point waiting for a write that's going
1367 * to be overwritten in a bit?
1368 */
1369 __make_buffer_clean(new);
1370 __unlink_buffer(new);
1371 __free_buffer_wake(new);
1372 }
1373
1374 BUG_ON(!b->hold_count);
1375 BUG_ON(test_bit(B_READING, &b->state));
1376
1377 __write_dirty_buffer(b, NULL);
1378 if (b->hold_count == 1) {
1379 wait_on_bit_io(&b->state, B_WRITING,
1380 TASK_UNINTERRUPTIBLE);
1381 set_bit(B_DIRTY, &b->state);
1382 __unlink_buffer(b);
1383 __link_buffer(b, new_block, LIST_DIRTY);
1384 } else {
1385 sector_t old_block;
1386 wait_on_bit_lock_io(&b->state, B_WRITING,
1387 TASK_UNINTERRUPTIBLE);
1388 /*
1389 * Relink buffer to "new_block" so that write_callback
1390 * sees "new_block" as a block number.
1391 * After the write, link the buffer back to old_block.
1392 * All this must be done in bufio lock, so that block number
1393 * change isn't visible to other threads.
1394 */
1395 old_block = b->block;
1396 __unlink_buffer(b);
1397 __link_buffer(b, new_block, b->list_mode);
1398 submit_io(b, WRITE, new_block, write_endio);
1399 wait_on_bit_io(&b->state, B_WRITING,
1400 TASK_UNINTERRUPTIBLE);
1401 __unlink_buffer(b);
1402 __link_buffer(b, old_block, b->list_mode);
1403 }
1404
1405 dm_bufio_unlock(c);
1406 dm_bufio_release(b);
1407 }
1408 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1409
1410 /*
1411 * Free the given buffer.
1412 *
1413 * This is just a hint, if the buffer is in use or dirty, this function
1414 * does nothing.
1415 */
1416 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1417 {
1418 struct dm_buffer *b;
1419
1420 dm_bufio_lock(c);
1421
1422 b = __find(c, block);
1423 if (b && likely(!b->hold_count) && likely(!b->state)) {
1424 __unlink_buffer(b);
1425 __free_buffer_wake(b);
1426 }
1427
1428 dm_bufio_unlock(c);
1429 }
1430 EXPORT_SYMBOL(dm_bufio_forget);
1431
1432 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1433 {
1434 c->minimum_buffers = n;
1435 }
1436 EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
1437
1438 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1439 {
1440 return c->block_size;
1441 }
1442 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1443
1444 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1445 {
1446 return i_size_read(c->bdev->bd_inode) >>
1447 (SECTOR_SHIFT + c->sectors_per_block_bits);
1448 }
1449 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1450
1451 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1452 {
1453 return b->block;
1454 }
1455 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1456
1457 void *dm_bufio_get_block_data(struct dm_buffer *b)
1458 {
1459 return b->data;
1460 }
1461 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1462
1463 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1464 {
1465 return b + 1;
1466 }
1467 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1468
1469 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1470 {
1471 return b->c;
1472 }
1473 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1474
1475 static void drop_buffers(struct dm_bufio_client *c)
1476 {
1477 struct dm_buffer *b;
1478 int i;
1479 bool warned = false;
1480
1481 BUG_ON(dm_bufio_in_request());
1482
1483 /*
1484 * An optimization so that the buffers are not written one-by-one.
1485 */
1486 dm_bufio_write_dirty_buffers_async(c);
1487
1488 dm_bufio_lock(c);
1489
1490 while ((b = __get_unclaimed_buffer(c)))
1491 __free_buffer_wake(b);
1492
1493 for (i = 0; i < LIST_SIZE; i++)
1494 list_for_each_entry(b, &c->lru[i], lru_list) {
1495 WARN_ON(!warned);
1496 warned = true;
1497 DMERR("leaked buffer %llx, hold count %u, list %d",
1498 (unsigned long long)b->block, b->hold_count, i);
1499 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1500 print_stack_trace(&b->stack_trace, 1);
1501 b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */
1502 #endif
1503 }
1504
1505 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1506 while ((b = __get_unclaimed_buffer(c)))
1507 __free_buffer_wake(b);
1508 #endif
1509
1510 for (i = 0; i < LIST_SIZE; i++)
1511 BUG_ON(!list_empty(&c->lru[i]));
1512
1513 dm_bufio_unlock(c);
1514 }
1515
1516 /*
1517 * We may not be able to evict this buffer if IO pending or the client
1518 * is still using it. Caller is expected to know buffer is too old.
1519 *
1520 * And if GFP_NOFS is used, we must not do any I/O because we hold
1521 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1522 * rerouted to different bufio client.
1523 */
1524 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1525 {
1526 if (!(gfp & __GFP_FS)) {
1527 if (test_bit(B_READING, &b->state) ||
1528 test_bit(B_WRITING, &b->state) ||
1529 test_bit(B_DIRTY, &b->state))
1530 return false;
1531 }
1532
1533 if (b->hold_count)
1534 return false;
1535
1536 __make_buffer_clean(b);
1537 __unlink_buffer(b);
1538 __free_buffer_wake(b);
1539
1540 return true;
1541 }
1542
1543 static unsigned get_retain_buffers(struct dm_bufio_client *c)
1544 {
1545 unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
1546 return retain_bytes / c->block_size;
1547 }
1548
1549 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1550 gfp_t gfp_mask)
1551 {
1552 int l;
1553 struct dm_buffer *b, *tmp;
1554 unsigned long freed = 0;
1555 unsigned long count = nr_to_scan;
1556 unsigned retain_target = get_retain_buffers(c);
1557
1558 for (l = 0; l < LIST_SIZE; l++) {
1559 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1560 if (__try_evict_buffer(b, gfp_mask))
1561 freed++;
1562 if (!--nr_to_scan || ((count - freed) <= retain_target))
1563 return freed;
1564 cond_resched();
1565 }
1566 }
1567 return freed;
1568 }
1569
1570 static unsigned long
1571 dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1572 {
1573 struct dm_bufio_client *c;
1574 unsigned long freed;
1575
1576 c = container_of(shrink, struct dm_bufio_client, shrinker);
1577 if (sc->gfp_mask & __GFP_FS)
1578 dm_bufio_lock(c);
1579 else if (!dm_bufio_trylock(c))
1580 return SHRINK_STOP;
1581
1582 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1583 dm_bufio_unlock(c);
1584 return freed;
1585 }
1586
1587 static unsigned long
1588 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1589 {
1590 struct dm_bufio_client *c;
1591 unsigned long count;
1592
1593 c = container_of(shrink, struct dm_bufio_client, shrinker);
1594 if (sc->gfp_mask & __GFP_FS)
1595 dm_bufio_lock(c);
1596 else if (!dm_bufio_trylock(c))
1597 return 0;
1598
1599 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1600 dm_bufio_unlock(c);
1601 return count;
1602 }
1603
1604 /*
1605 * Create the buffering interface
1606 */
1607 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1608 unsigned reserved_buffers, unsigned aux_size,
1609 void (*alloc_callback)(struct dm_buffer *),
1610 void (*write_callback)(struct dm_buffer *))
1611 {
1612 int r;
1613 struct dm_bufio_client *c;
1614 unsigned i;
1615
1616 BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1617 (block_size & (block_size - 1)));
1618
1619 c = kzalloc(sizeof(*c), GFP_KERNEL);
1620 if (!c) {
1621 r = -ENOMEM;
1622 goto bad_client;
1623 }
1624 c->buffer_tree = RB_ROOT;
1625
1626 c->bdev = bdev;
1627 c->block_size = block_size;
1628 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1629 c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ?
1630 __ffs(block_size) - PAGE_SHIFT : 0;
1631 c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ?
1632 PAGE_SHIFT - __ffs(block_size) : 0);
1633
1634 c->aux_size = aux_size;
1635 c->alloc_callback = alloc_callback;
1636 c->write_callback = write_callback;
1637
1638 for (i = 0; i < LIST_SIZE; i++) {
1639 INIT_LIST_HEAD(&c->lru[i]);
1640 c->n_buffers[i] = 0;
1641 }
1642
1643 mutex_init(&c->lock);
1644 INIT_LIST_HEAD(&c->reserved_buffers);
1645 c->need_reserved_buffers = reserved_buffers;
1646
1647 c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
1648
1649 init_waitqueue_head(&c->free_buffer_wait);
1650 c->async_write_error = 0;
1651
1652 c->dm_io = dm_io_client_create();
1653 if (IS_ERR(c->dm_io)) {
1654 r = PTR_ERR(c->dm_io);
1655 goto bad_dm_io;
1656 }
1657
1658 mutex_lock(&dm_bufio_clients_lock);
1659 if (c->blocks_per_page_bits) {
1660 if (!DM_BUFIO_CACHE_NAME(c)) {
1661 DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1662 if (!DM_BUFIO_CACHE_NAME(c)) {
1663 r = -ENOMEM;
1664 mutex_unlock(&dm_bufio_clients_lock);
1665 goto bad_cache;
1666 }
1667 }
1668
1669 if (!DM_BUFIO_CACHE(c)) {
1670 DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1671 c->block_size,
1672 c->block_size, 0, NULL);
1673 if (!DM_BUFIO_CACHE(c)) {
1674 r = -ENOMEM;
1675 mutex_unlock(&dm_bufio_clients_lock);
1676 goto bad_cache;
1677 }
1678 }
1679 }
1680 mutex_unlock(&dm_bufio_clients_lock);
1681
1682 while (c->need_reserved_buffers) {
1683 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1684
1685 if (!b) {
1686 r = -ENOMEM;
1687 goto bad_buffer;
1688 }
1689 __free_buffer_wake(b);
1690 }
1691
1692 mutex_lock(&dm_bufio_clients_lock);
1693 dm_bufio_client_count++;
1694 list_add(&c->client_list, &dm_bufio_all_clients);
1695 __cache_size_refresh();
1696 mutex_unlock(&dm_bufio_clients_lock);
1697
1698 c->shrinker.count_objects = dm_bufio_shrink_count;
1699 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1700 c->shrinker.seeks = 1;
1701 c->shrinker.batch = 0;
1702 register_shrinker(&c->shrinker);
1703
1704 return c;
1705
1706 bad_buffer:
1707 bad_cache:
1708 while (!list_empty(&c->reserved_buffers)) {
1709 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1710 struct dm_buffer, lru_list);
1711 list_del(&b->lru_list);
1712 free_buffer(b);
1713 }
1714 dm_io_client_destroy(c->dm_io);
1715 bad_dm_io:
1716 kfree(c);
1717 bad_client:
1718 return ERR_PTR(r);
1719 }
1720 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1721
1722 /*
1723 * Free the buffering interface.
1724 * It is required that there are no references on any buffers.
1725 */
1726 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1727 {
1728 unsigned i;
1729
1730 drop_buffers(c);
1731
1732 unregister_shrinker(&c->shrinker);
1733
1734 mutex_lock(&dm_bufio_clients_lock);
1735
1736 list_del(&c->client_list);
1737 dm_bufio_client_count--;
1738 __cache_size_refresh();
1739
1740 mutex_unlock(&dm_bufio_clients_lock);
1741
1742 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1743 BUG_ON(c->need_reserved_buffers);
1744
1745 while (!list_empty(&c->reserved_buffers)) {
1746 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1747 struct dm_buffer, lru_list);
1748 list_del(&b->lru_list);
1749 free_buffer(b);
1750 }
1751
1752 for (i = 0; i < LIST_SIZE; i++)
1753 if (c->n_buffers[i])
1754 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1755
1756 for (i = 0; i < LIST_SIZE; i++)
1757 BUG_ON(c->n_buffers[i]);
1758
1759 dm_io_client_destroy(c->dm_io);
1760 kfree(c);
1761 }
1762 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1763
1764 static unsigned get_max_age_hz(void)
1765 {
1766 unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
1767
1768 if (max_age > UINT_MAX / HZ)
1769 max_age = UINT_MAX / HZ;
1770
1771 return max_age * HZ;
1772 }
1773
1774 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1775 {
1776 return time_after_eq(jiffies, b->last_accessed + age_hz);
1777 }
1778
1779 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1780 {
1781 struct dm_buffer *b, *tmp;
1782 unsigned retain_target = get_retain_buffers(c);
1783 unsigned count;
1784
1785 dm_bufio_lock(c);
1786
1787 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1788 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1789 if (count <= retain_target)
1790 break;
1791
1792 if (!older_than(b, age_hz))
1793 break;
1794
1795 if (__try_evict_buffer(b, 0))
1796 count--;
1797
1798 cond_resched();
1799 }
1800
1801 dm_bufio_unlock(c);
1802 }
1803
1804 static void cleanup_old_buffers(void)
1805 {
1806 unsigned long max_age_hz = get_max_age_hz();
1807 struct dm_bufio_client *c;
1808
1809 mutex_lock(&dm_bufio_clients_lock);
1810
1811 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
1812 __evict_old_buffers(c, max_age_hz);
1813
1814 mutex_unlock(&dm_bufio_clients_lock);
1815 }
1816
1817 static struct workqueue_struct *dm_bufio_wq;
1818 static struct delayed_work dm_bufio_work;
1819
1820 static void work_fn(struct work_struct *w)
1821 {
1822 cleanup_old_buffers();
1823
1824 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1825 DM_BUFIO_WORK_TIMER_SECS * HZ);
1826 }
1827
1828 /*----------------------------------------------------------------
1829 * Module setup
1830 *--------------------------------------------------------------*/
1831
1832 /*
1833 * This is called only once for the whole dm_bufio module.
1834 * It initializes memory limit.
1835 */
1836 static int __init dm_bufio_init(void)
1837 {
1838 __u64 mem;
1839
1840 dm_bufio_allocated_kmem_cache = 0;
1841 dm_bufio_allocated_get_free_pages = 0;
1842 dm_bufio_allocated_vmalloc = 0;
1843 dm_bufio_current_allocated = 0;
1844
1845 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1846 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1847
1848 mem = (__u64)((totalram_pages - totalhigh_pages) *
1849 DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1850
1851 if (mem > ULONG_MAX)
1852 mem = ULONG_MAX;
1853
1854 #ifdef CONFIG_MMU
1855 /*
1856 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1857 * in fs/proc/internal.h
1858 */
1859 if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1860 mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1861 #endif
1862
1863 dm_bufio_default_cache_size = mem;
1864
1865 mutex_lock(&dm_bufio_clients_lock);
1866 __cache_size_refresh();
1867 mutex_unlock(&dm_bufio_clients_lock);
1868
1869 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
1870 if (!dm_bufio_wq)
1871 return -ENOMEM;
1872
1873 INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1874 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1875 DM_BUFIO_WORK_TIMER_SECS * HZ);
1876
1877 return 0;
1878 }
1879
1880 /*
1881 * This is called once when unloading the dm_bufio module.
1882 */
1883 static void __exit dm_bufio_exit(void)
1884 {
1885 int bug = 0;
1886 int i;
1887
1888 cancel_delayed_work_sync(&dm_bufio_work);
1889 destroy_workqueue(dm_bufio_wq);
1890
1891 for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++)
1892 kmem_cache_destroy(dm_bufio_caches[i]);
1893
1894 for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1895 kfree(dm_bufio_cache_names[i]);
1896
1897 if (dm_bufio_client_count) {
1898 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1899 __func__, dm_bufio_client_count);
1900 bug = 1;
1901 }
1902
1903 if (dm_bufio_current_allocated) {
1904 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1905 __func__, dm_bufio_current_allocated);
1906 bug = 1;
1907 }
1908
1909 if (dm_bufio_allocated_get_free_pages) {
1910 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1911 __func__, dm_bufio_allocated_get_free_pages);
1912 bug = 1;
1913 }
1914
1915 if (dm_bufio_allocated_vmalloc) {
1916 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1917 __func__, dm_bufio_allocated_vmalloc);
1918 bug = 1;
1919 }
1920
1921 BUG_ON(bug);
1922 }
1923
1924 module_init(dm_bufio_init)
1925 module_exit(dm_bufio_exit)
1926
1927 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1928 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1929
1930 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1931 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1932
1933 module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR);
1934 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1935
1936 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1937 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1938
1939 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1940 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1941
1942 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1943 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1944
1945 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1946 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1947
1948 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1949 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1950
1951 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1952 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1953 MODULE_LICENSE("GPL");