1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/blkdev.h>
4 #include <linux/wait.h>
5 #include <linux/rbtree.h>
6 #include <linux/kthread.h>
7 #include <linux/backing-dev.h>
8 #include <linux/blk-cgroup.h>
9 #include <linux/freezer.h>
11 #include <linux/pagemap.h>
13 #include <linux/sched/mm.h>
14 #include <linux/sched.h>
15 #include <linux/module.h>
16 #include <linux/writeback.h>
17 #include <linux/device.h>
18 #include <trace/events/writeback.h>
20 struct backing_dev_info noop_backing_dev_info
;
21 EXPORT_SYMBOL_GPL(noop_backing_dev_info
);
23 static struct class *bdi_class
;
24 static const char *bdi_unknown_name
= "(unknown)";
27 * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
28 * reader side locking.
30 DEFINE_SPINLOCK(bdi_lock
);
31 static u64 bdi_id_cursor
;
32 static struct rb_root bdi_tree
= RB_ROOT
;
35 /* bdi_wq serves all asynchronous writeback tasks */
36 struct workqueue_struct
*bdi_wq
;
38 #define K(x) ((x) << (PAGE_SHIFT - 10))
40 #ifdef CONFIG_DEBUG_FS
41 #include <linux/debugfs.h>
42 #include <linux/seq_file.h>
44 static struct dentry
*bdi_debug_root
;
46 static void bdi_debug_init(void)
48 bdi_debug_root
= debugfs_create_dir("bdi", NULL
);
51 static int bdi_debug_stats_show(struct seq_file
*m
, void *v
)
53 struct backing_dev_info
*bdi
= m
->private;
54 struct bdi_writeback
*wb
= &bdi
->wb
;
55 unsigned long background_thresh
;
56 unsigned long dirty_thresh
;
57 unsigned long wb_thresh
;
58 unsigned long nr_dirty
, nr_io
, nr_more_io
, nr_dirty_time
;
61 nr_dirty
= nr_io
= nr_more_io
= nr_dirty_time
= 0;
62 spin_lock(&wb
->list_lock
);
63 list_for_each_entry(inode
, &wb
->b_dirty
, i_io_list
)
65 list_for_each_entry(inode
, &wb
->b_io
, i_io_list
)
67 list_for_each_entry(inode
, &wb
->b_more_io
, i_io_list
)
69 list_for_each_entry(inode
, &wb
->b_dirty_time
, i_io_list
)
70 if (inode
->i_state
& I_DIRTY_TIME
)
72 spin_unlock(&wb
->list_lock
);
74 global_dirty_limits(&background_thresh
, &dirty_thresh
);
75 wb_thresh
= wb_calc_thresh(wb
, dirty_thresh
);
78 "BdiWriteback: %10lu kB\n"
79 "BdiReclaimable: %10lu kB\n"
80 "BdiDirtyThresh: %10lu kB\n"
81 "DirtyThresh: %10lu kB\n"
82 "BackgroundThresh: %10lu kB\n"
83 "BdiDirtied: %10lu kB\n"
84 "BdiWritten: %10lu kB\n"
85 "BdiWriteBandwidth: %10lu kBps\n"
89 "b_dirty_time: %10lu\n"
92 (unsigned long) K(wb_stat(wb
, WB_WRITEBACK
)),
93 (unsigned long) K(wb_stat(wb
, WB_RECLAIMABLE
)),
97 (unsigned long) K(wb_stat(wb
, WB_DIRTIED
)),
98 (unsigned long) K(wb_stat(wb
, WB_WRITTEN
)),
99 (unsigned long) K(wb
->write_bandwidth
),
104 !list_empty(&bdi
->bdi_list
), bdi
->wb
.state
);
108 DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats
);
110 static void bdi_debug_register(struct backing_dev_info
*bdi
, const char *name
)
112 bdi
->debug_dir
= debugfs_create_dir(name
, bdi_debug_root
);
114 debugfs_create_file("stats", 0444, bdi
->debug_dir
, bdi
,
115 &bdi_debug_stats_fops
);
118 static void bdi_debug_unregister(struct backing_dev_info
*bdi
)
120 debugfs_remove_recursive(bdi
->debug_dir
);
123 static inline void bdi_debug_init(void)
126 static inline void bdi_debug_register(struct backing_dev_info
*bdi
,
130 static inline void bdi_debug_unregister(struct backing_dev_info
*bdi
)
135 static ssize_t
read_ahead_kb_store(struct device
*dev
,
136 struct device_attribute
*attr
,
137 const char *buf
, size_t count
)
139 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
140 unsigned long read_ahead_kb
;
143 ret
= kstrtoul(buf
, 10, &read_ahead_kb
);
147 bdi
->ra_pages
= read_ahead_kb
>> (PAGE_SHIFT
- 10);
152 #define BDI_SHOW(name, expr) \
153 static ssize_t name##_show(struct device *dev, \
154 struct device_attribute *attr, char *buf) \
156 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
158 return sysfs_emit(buf, "%lld\n", (long long)expr); \
160 static DEVICE_ATTR_RW(name);
162 BDI_SHOW(read_ahead_kb
, K(bdi
->ra_pages
))
164 static ssize_t
min_ratio_store(struct device
*dev
,
165 struct device_attribute
*attr
, const char *buf
, size_t count
)
167 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
171 ret
= kstrtouint(buf
, 10, &ratio
);
175 ret
= bdi_set_min_ratio(bdi
, ratio
);
181 BDI_SHOW(min_ratio
, bdi
->min_ratio
/ BDI_RATIO_SCALE
)
183 static ssize_t
min_ratio_fine_store(struct device
*dev
,
184 struct device_attribute
*attr
, const char *buf
, size_t count
)
186 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
190 ret
= kstrtouint(buf
, 10, &ratio
);
194 ret
= bdi_set_min_ratio_no_scale(bdi
, ratio
);
200 BDI_SHOW(min_ratio_fine
, bdi
->min_ratio
)
202 static ssize_t
max_ratio_store(struct device
*dev
,
203 struct device_attribute
*attr
, const char *buf
, size_t count
)
205 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
209 ret
= kstrtouint(buf
, 10, &ratio
);
213 ret
= bdi_set_max_ratio(bdi
, ratio
);
219 BDI_SHOW(max_ratio
, bdi
->max_ratio
/ BDI_RATIO_SCALE
)
221 static ssize_t
max_ratio_fine_store(struct device
*dev
,
222 struct device_attribute
*attr
, const char *buf
, size_t count
)
224 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
228 ret
= kstrtouint(buf
, 10, &ratio
);
232 ret
= bdi_set_max_ratio_no_scale(bdi
, ratio
);
238 BDI_SHOW(max_ratio_fine
, bdi
->max_ratio
)
240 static ssize_t
min_bytes_show(struct device
*dev
,
241 struct device_attribute
*attr
,
244 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
246 return sysfs_emit(buf
, "%llu\n", bdi_get_min_bytes(bdi
));
249 static ssize_t
min_bytes_store(struct device
*dev
,
250 struct device_attribute
*attr
, const char *buf
, size_t count
)
252 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
256 ret
= kstrtoull(buf
, 10, &bytes
);
260 ret
= bdi_set_min_bytes(bdi
, bytes
);
266 static DEVICE_ATTR_RW(min_bytes
);
268 static ssize_t
max_bytes_show(struct device
*dev
,
269 struct device_attribute
*attr
,
272 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
274 return sysfs_emit(buf
, "%llu\n", bdi_get_max_bytes(bdi
));
277 static ssize_t
max_bytes_store(struct device
*dev
,
278 struct device_attribute
*attr
, const char *buf
, size_t count
)
280 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
284 ret
= kstrtoull(buf
, 10, &bytes
);
288 ret
= bdi_set_max_bytes(bdi
, bytes
);
294 static DEVICE_ATTR_RW(max_bytes
);
296 static ssize_t
stable_pages_required_show(struct device
*dev
,
297 struct device_attribute
*attr
,
301 "the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n");
302 return sysfs_emit(buf
, "%d\n", 0);
304 static DEVICE_ATTR_RO(stable_pages_required
);
306 static ssize_t
strict_limit_store(struct device
*dev
,
307 struct device_attribute
*attr
, const char *buf
, size_t count
)
309 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
310 unsigned int strict_limit
;
313 ret
= kstrtouint(buf
, 10, &strict_limit
);
317 ret
= bdi_set_strict_limit(bdi
, strict_limit
);
324 static ssize_t
strict_limit_show(struct device
*dev
,
325 struct device_attribute
*attr
, char *buf
)
327 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
329 return sysfs_emit(buf
, "%d\n",
330 !!(bdi
->capabilities
& BDI_CAP_STRICTLIMIT
));
332 static DEVICE_ATTR_RW(strict_limit
);
334 static struct attribute
*bdi_dev_attrs
[] = {
335 &dev_attr_read_ahead_kb
.attr
,
336 &dev_attr_min_ratio
.attr
,
337 &dev_attr_min_ratio_fine
.attr
,
338 &dev_attr_max_ratio
.attr
,
339 &dev_attr_max_ratio_fine
.attr
,
340 &dev_attr_min_bytes
.attr
,
341 &dev_attr_max_bytes
.attr
,
342 &dev_attr_stable_pages_required
.attr
,
343 &dev_attr_strict_limit
.attr
,
346 ATTRIBUTE_GROUPS(bdi_dev
);
348 static __init
int bdi_class_init(void)
350 bdi_class
= class_create("bdi");
351 if (IS_ERR(bdi_class
))
352 return PTR_ERR(bdi_class
);
354 bdi_class
->dev_groups
= bdi_dev_groups
;
359 postcore_initcall(bdi_class_init
);
361 static int __init
default_bdi_init(void)
363 bdi_wq
= alloc_workqueue("writeback", WQ_MEM_RECLAIM
| WQ_UNBOUND
|
369 subsys_initcall(default_bdi_init
);
372 * This function is used when the first inode for this wb is marked dirty. It
373 * wakes-up the corresponding bdi thread which should then take care of the
374 * periodic background write-out of dirty inodes. Since the write-out would
375 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
376 * set up a timer which wakes the bdi thread up later.
378 * Note, we wouldn't bother setting up the timer, but this function is on the
379 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
380 * by delaying the wake-up.
382 * We have to be careful not to postpone flush work if it is scheduled for
383 * earlier. Thus we use queue_delayed_work().
385 void wb_wakeup_delayed(struct bdi_writeback
*wb
)
387 unsigned long timeout
;
389 timeout
= msecs_to_jiffies(dirty_writeback_interval
* 10);
390 spin_lock_irq(&wb
->work_lock
);
391 if (test_bit(WB_registered
, &wb
->state
))
392 queue_delayed_work(bdi_wq
, &wb
->dwork
, timeout
);
393 spin_unlock_irq(&wb
->work_lock
);
396 static void wb_update_bandwidth_workfn(struct work_struct
*work
)
398 struct bdi_writeback
*wb
= container_of(to_delayed_work(work
),
399 struct bdi_writeback
, bw_dwork
);
401 wb_update_bandwidth(wb
);
405 * Initial write bandwidth: 100 MB/s
407 #define INIT_BW (100 << (20 - PAGE_SHIFT))
409 static int wb_init(struct bdi_writeback
*wb
, struct backing_dev_info
*bdi
,
414 memset(wb
, 0, sizeof(*wb
));
417 wb
->last_old_flush
= jiffies
;
418 INIT_LIST_HEAD(&wb
->b_dirty
);
419 INIT_LIST_HEAD(&wb
->b_io
);
420 INIT_LIST_HEAD(&wb
->b_more_io
);
421 INIT_LIST_HEAD(&wb
->b_dirty_time
);
422 spin_lock_init(&wb
->list_lock
);
424 atomic_set(&wb
->writeback_inodes
, 0);
425 wb
->bw_time_stamp
= jiffies
;
426 wb
->balanced_dirty_ratelimit
= INIT_BW
;
427 wb
->dirty_ratelimit
= INIT_BW
;
428 wb
->write_bandwidth
= INIT_BW
;
429 wb
->avg_write_bandwidth
= INIT_BW
;
431 spin_lock_init(&wb
->work_lock
);
432 INIT_LIST_HEAD(&wb
->work_list
);
433 INIT_DELAYED_WORK(&wb
->dwork
, wb_workfn
);
434 INIT_DELAYED_WORK(&wb
->bw_dwork
, wb_update_bandwidth_workfn
);
435 wb
->dirty_sleep
= jiffies
;
437 err
= fprop_local_init_percpu(&wb
->completions
, gfp
);
441 for (i
= 0; i
< NR_WB_STAT_ITEMS
; i
++) {
442 err
= percpu_counter_init(&wb
->stat
[i
], 0, gfp
);
444 goto out_destroy_stat
;
451 percpu_counter_destroy(&wb
->stat
[i
]);
452 fprop_local_destroy_percpu(&wb
->completions
);
456 static void cgwb_remove_from_bdi_list(struct bdi_writeback
*wb
);
459 * Remove bdi from the global list and shutdown any threads we have running
461 static void wb_shutdown(struct bdi_writeback
*wb
)
463 /* Make sure nobody queues further work */
464 spin_lock_irq(&wb
->work_lock
);
465 if (!test_and_clear_bit(WB_registered
, &wb
->state
)) {
466 spin_unlock_irq(&wb
->work_lock
);
469 spin_unlock_irq(&wb
->work_lock
);
471 cgwb_remove_from_bdi_list(wb
);
473 * Drain work list and shutdown the delayed_work. !WB_registered
474 * tells wb_workfn() that @wb is dying and its work_list needs to
475 * be drained no matter what.
477 mod_delayed_work(bdi_wq
, &wb
->dwork
, 0);
478 flush_delayed_work(&wb
->dwork
);
479 WARN_ON(!list_empty(&wb
->work_list
));
480 flush_delayed_work(&wb
->bw_dwork
);
483 static void wb_exit(struct bdi_writeback
*wb
)
487 WARN_ON(delayed_work_pending(&wb
->dwork
));
489 for (i
= 0; i
< NR_WB_STAT_ITEMS
; i
++)
490 percpu_counter_destroy(&wb
->stat
[i
]);
492 fprop_local_destroy_percpu(&wb
->completions
);
495 #ifdef CONFIG_CGROUP_WRITEBACK
497 #include <linux/memcontrol.h>
500 * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and
501 * memcg->cgwb_list. bdi->cgwb_tree is also RCU protected.
503 static DEFINE_SPINLOCK(cgwb_lock
);
504 static struct workqueue_struct
*cgwb_release_wq
;
506 static LIST_HEAD(offline_cgwbs
);
507 static void cleanup_offline_cgwbs_workfn(struct work_struct
*work
);
508 static DECLARE_WORK(cleanup_offline_cgwbs_work
, cleanup_offline_cgwbs_workfn
);
510 static void cgwb_free_rcu(struct rcu_head
*rcu_head
)
512 struct bdi_writeback
*wb
= container_of(rcu_head
,
513 struct bdi_writeback
, rcu
);
515 percpu_ref_exit(&wb
->refcnt
);
519 static void cgwb_release_workfn(struct work_struct
*work
)
521 struct bdi_writeback
*wb
= container_of(work
, struct bdi_writeback
,
523 struct backing_dev_info
*bdi
= wb
->bdi
;
525 mutex_lock(&wb
->bdi
->cgwb_release_mutex
);
528 css_put(wb
->memcg_css
);
529 css_put(wb
->blkcg_css
);
530 mutex_unlock(&wb
->bdi
->cgwb_release_mutex
);
532 /* triggers blkg destruction if no online users left */
533 blkcg_unpin_online(wb
->blkcg_css
);
535 fprop_local_destroy_percpu(&wb
->memcg_completions
);
537 spin_lock_irq(&cgwb_lock
);
538 list_del(&wb
->offline_node
);
539 spin_unlock_irq(&cgwb_lock
);
543 WARN_ON_ONCE(!list_empty(&wb
->b_attached
));
544 call_rcu(&wb
->rcu
, cgwb_free_rcu
);
547 static void cgwb_release(struct percpu_ref
*refcnt
)
549 struct bdi_writeback
*wb
= container_of(refcnt
, struct bdi_writeback
,
551 queue_work(cgwb_release_wq
, &wb
->release_work
);
554 static void cgwb_kill(struct bdi_writeback
*wb
)
556 lockdep_assert_held(&cgwb_lock
);
558 WARN_ON(!radix_tree_delete(&wb
->bdi
->cgwb_tree
, wb
->memcg_css
->id
));
559 list_del(&wb
->memcg_node
);
560 list_del(&wb
->blkcg_node
);
561 list_add(&wb
->offline_node
, &offline_cgwbs
);
562 percpu_ref_kill(&wb
->refcnt
);
565 static void cgwb_remove_from_bdi_list(struct bdi_writeback
*wb
)
567 spin_lock_irq(&cgwb_lock
);
568 list_del_rcu(&wb
->bdi_node
);
569 spin_unlock_irq(&cgwb_lock
);
572 static int cgwb_create(struct backing_dev_info
*bdi
,
573 struct cgroup_subsys_state
*memcg_css
, gfp_t gfp
)
575 struct mem_cgroup
*memcg
;
576 struct cgroup_subsys_state
*blkcg_css
;
577 struct list_head
*memcg_cgwb_list
, *blkcg_cgwb_list
;
578 struct bdi_writeback
*wb
;
582 memcg
= mem_cgroup_from_css(memcg_css
);
583 blkcg_css
= cgroup_get_e_css(memcg_css
->cgroup
, &io_cgrp_subsys
);
584 memcg_cgwb_list
= &memcg
->cgwb_list
;
585 blkcg_cgwb_list
= blkcg_get_cgwb_list(blkcg_css
);
587 /* look up again under lock and discard on blkcg mismatch */
588 spin_lock_irqsave(&cgwb_lock
, flags
);
589 wb
= radix_tree_lookup(&bdi
->cgwb_tree
, memcg_css
->id
);
590 if (wb
&& wb
->blkcg_css
!= blkcg_css
) {
594 spin_unlock_irqrestore(&cgwb_lock
, flags
);
598 /* need to create a new one */
599 wb
= kmalloc(sizeof(*wb
), gfp
);
605 ret
= wb_init(wb
, bdi
, gfp
);
609 ret
= percpu_ref_init(&wb
->refcnt
, cgwb_release
, 0, gfp
);
613 ret
= fprop_local_init_percpu(&wb
->memcg_completions
, gfp
);
617 wb
->memcg_css
= memcg_css
;
618 wb
->blkcg_css
= blkcg_css
;
619 INIT_LIST_HEAD(&wb
->b_attached
);
620 INIT_WORK(&wb
->release_work
, cgwb_release_workfn
);
621 set_bit(WB_registered
, &wb
->state
);
625 * The root wb determines the registered state of the whole bdi and
626 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
627 * whether they're still online. Don't link @wb if any is dead.
628 * See wb_memcg_offline() and wb_blkcg_offline().
631 spin_lock_irqsave(&cgwb_lock
, flags
);
632 if (test_bit(WB_registered
, &bdi
->wb
.state
) &&
633 blkcg_cgwb_list
->next
&& memcg_cgwb_list
->next
) {
634 /* we might have raced another instance of this function */
635 ret
= radix_tree_insert(&bdi
->cgwb_tree
, memcg_css
->id
, wb
);
637 list_add_tail_rcu(&wb
->bdi_node
, &bdi
->wb_list
);
638 list_add(&wb
->memcg_node
, memcg_cgwb_list
);
639 list_add(&wb
->blkcg_node
, blkcg_cgwb_list
);
640 blkcg_pin_online(blkcg_css
);
645 spin_unlock_irqrestore(&cgwb_lock
, flags
);
655 fprop_local_destroy_percpu(&wb
->memcg_completions
);
657 percpu_ref_exit(&wb
->refcnt
);
668 * wb_get_lookup - get wb for a given memcg
670 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
672 * Try to get the wb for @memcg_css on @bdi. The returned wb has its
673 * refcount incremented.
675 * This function uses css_get() on @memcg_css and thus expects its refcnt
676 * to be positive on invocation. IOW, rcu_read_lock() protection on
677 * @memcg_css isn't enough. try_get it before calling this function.
679 * A wb is keyed by its associated memcg. As blkcg implicitly enables
680 * memcg on the default hierarchy, memcg association is guaranteed to be
681 * more specific (equal or descendant to the associated blkcg) and thus can
682 * identify both the memcg and blkcg associations.
684 * Because the blkcg associated with a memcg may change as blkcg is enabled
685 * and disabled closer to root in the hierarchy, each wb keeps track of
686 * both the memcg and blkcg associated with it and verifies the blkcg on
687 * each lookup. On mismatch, the existing wb is discarded and a new one is
690 struct bdi_writeback
*wb_get_lookup(struct backing_dev_info
*bdi
,
691 struct cgroup_subsys_state
*memcg_css
)
693 struct bdi_writeback
*wb
;
695 if (!memcg_css
->parent
)
699 wb
= radix_tree_lookup(&bdi
->cgwb_tree
, memcg_css
->id
);
701 struct cgroup_subsys_state
*blkcg_css
;
703 /* see whether the blkcg association has changed */
704 blkcg_css
= cgroup_get_e_css(memcg_css
->cgroup
, &io_cgrp_subsys
);
705 if (unlikely(wb
->blkcg_css
!= blkcg_css
|| !wb_tryget(wb
)))
715 * wb_get_create - get wb for a given memcg, create if necessary
717 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
718 * @gfp: allocation mask to use
720 * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
721 * create one. See wb_get_lookup() for more details.
723 struct bdi_writeback
*wb_get_create(struct backing_dev_info
*bdi
,
724 struct cgroup_subsys_state
*memcg_css
,
727 struct bdi_writeback
*wb
;
731 if (!memcg_css
->parent
)
735 wb
= wb_get_lookup(bdi
, memcg_css
);
736 } while (!wb
&& !cgwb_create(bdi
, memcg_css
, gfp
));
741 static int cgwb_bdi_init(struct backing_dev_info
*bdi
)
745 INIT_RADIX_TREE(&bdi
->cgwb_tree
, GFP_ATOMIC
);
746 mutex_init(&bdi
->cgwb_release_mutex
);
747 init_rwsem(&bdi
->wb_switch_rwsem
);
749 ret
= wb_init(&bdi
->wb
, bdi
, GFP_KERNEL
);
751 bdi
->wb
.memcg_css
= &root_mem_cgroup
->css
;
752 bdi
->wb
.blkcg_css
= blkcg_root_css
;
757 static void cgwb_bdi_unregister(struct backing_dev_info
*bdi
)
759 struct radix_tree_iter iter
;
761 struct bdi_writeback
*wb
;
763 WARN_ON(test_bit(WB_registered
, &bdi
->wb
.state
));
765 spin_lock_irq(&cgwb_lock
);
766 radix_tree_for_each_slot(slot
, &bdi
->cgwb_tree
, &iter
, 0)
768 spin_unlock_irq(&cgwb_lock
);
770 mutex_lock(&bdi
->cgwb_release_mutex
);
771 spin_lock_irq(&cgwb_lock
);
772 while (!list_empty(&bdi
->wb_list
)) {
773 wb
= list_first_entry(&bdi
->wb_list
, struct bdi_writeback
,
775 spin_unlock_irq(&cgwb_lock
);
777 spin_lock_irq(&cgwb_lock
);
779 spin_unlock_irq(&cgwb_lock
);
780 mutex_unlock(&bdi
->cgwb_release_mutex
);
784 * cleanup_offline_cgwbs_workfn - try to release dying cgwbs
786 * Try to release dying cgwbs by switching attached inodes to the nearest
787 * living ancestor's writeback. Processed wbs are placed at the end
788 * of the list to guarantee the forward progress.
790 static void cleanup_offline_cgwbs_workfn(struct work_struct
*work
)
792 struct bdi_writeback
*wb
;
793 LIST_HEAD(processed
);
795 spin_lock_irq(&cgwb_lock
);
797 while (!list_empty(&offline_cgwbs
)) {
798 wb
= list_first_entry(&offline_cgwbs
, struct bdi_writeback
,
800 list_move(&wb
->offline_node
, &processed
);
803 * If wb is dirty, cleaning up the writeback by switching
804 * attached inodes will result in an effective removal of any
805 * bandwidth restrictions, which isn't the goal. Instead,
806 * it can be postponed until the next time, when all io
807 * will be likely completed. If in the meantime some inodes
808 * will get re-dirtied, they should be eventually switched to
811 if (wb_has_dirty_io(wb
))
817 spin_unlock_irq(&cgwb_lock
);
818 while (cleanup_offline_cgwb(wb
))
820 spin_lock_irq(&cgwb_lock
);
825 if (!list_empty(&processed
))
826 list_splice_tail(&processed
, &offline_cgwbs
);
828 spin_unlock_irq(&cgwb_lock
);
832 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
833 * @memcg: memcg being offlined
835 * Also prevents creation of any new wb's associated with @memcg.
837 void wb_memcg_offline(struct mem_cgroup
*memcg
)
839 struct list_head
*memcg_cgwb_list
= &memcg
->cgwb_list
;
840 struct bdi_writeback
*wb
, *next
;
842 spin_lock_irq(&cgwb_lock
);
843 list_for_each_entry_safe(wb
, next
, memcg_cgwb_list
, memcg_node
)
845 memcg_cgwb_list
->next
= NULL
; /* prevent new wb's */
846 spin_unlock_irq(&cgwb_lock
);
848 queue_work(system_unbound_wq
, &cleanup_offline_cgwbs_work
);
852 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
853 * @css: blkcg being offlined
855 * Also prevents creation of any new wb's associated with @blkcg.
857 void wb_blkcg_offline(struct cgroup_subsys_state
*css
)
859 struct bdi_writeback
*wb
, *next
;
860 struct list_head
*list
= blkcg_get_cgwb_list(css
);
862 spin_lock_irq(&cgwb_lock
);
863 list_for_each_entry_safe(wb
, next
, list
, blkcg_node
)
865 list
->next
= NULL
; /* prevent new wb's */
866 spin_unlock_irq(&cgwb_lock
);
869 static void cgwb_bdi_register(struct backing_dev_info
*bdi
)
871 spin_lock_irq(&cgwb_lock
);
872 list_add_tail_rcu(&bdi
->wb
.bdi_node
, &bdi
->wb_list
);
873 spin_unlock_irq(&cgwb_lock
);
876 static int __init
cgwb_init(void)
879 * There can be many concurrent release work items overwhelming
880 * system_wq. Put them in a separate wq and limit concurrency.
881 * There's no point in executing many of these in parallel.
883 cgwb_release_wq
= alloc_workqueue("cgwb_release", 0, 1);
884 if (!cgwb_release_wq
)
889 subsys_initcall(cgwb_init
);
891 #else /* CONFIG_CGROUP_WRITEBACK */
893 static int cgwb_bdi_init(struct backing_dev_info
*bdi
)
895 return wb_init(&bdi
->wb
, bdi
, GFP_KERNEL
);
898 static void cgwb_bdi_unregister(struct backing_dev_info
*bdi
) { }
900 static void cgwb_bdi_register(struct backing_dev_info
*bdi
)
902 list_add_tail_rcu(&bdi
->wb
.bdi_node
, &bdi
->wb_list
);
905 static void cgwb_remove_from_bdi_list(struct bdi_writeback
*wb
)
907 list_del_rcu(&wb
->bdi_node
);
910 #endif /* CONFIG_CGROUP_WRITEBACK */
912 int bdi_init(struct backing_dev_info
*bdi
)
916 kref_init(&bdi
->refcnt
);
918 bdi
->max_ratio
= 100 * BDI_RATIO_SCALE
;
919 bdi
->max_prop_frac
= FPROP_FRAC_BASE
;
920 INIT_LIST_HEAD(&bdi
->bdi_list
);
921 INIT_LIST_HEAD(&bdi
->wb_list
);
922 init_waitqueue_head(&bdi
->wb_waitq
);
924 return cgwb_bdi_init(bdi
);
927 struct backing_dev_info
*bdi_alloc(int node_id
)
929 struct backing_dev_info
*bdi
;
931 bdi
= kzalloc_node(sizeof(*bdi
), GFP_KERNEL
, node_id
);
939 bdi
->capabilities
= BDI_CAP_WRITEBACK
| BDI_CAP_WRITEBACK_ACCT
;
940 bdi
->ra_pages
= VM_READAHEAD_PAGES
;
941 bdi
->io_pages
= VM_READAHEAD_PAGES
;
942 timer_setup(&bdi
->laptop_mode_wb_timer
, laptop_mode_timer_fn
, 0);
945 EXPORT_SYMBOL(bdi_alloc
);
947 static struct rb_node
**bdi_lookup_rb_node(u64 id
, struct rb_node
**parentp
)
949 struct rb_node
**p
= &bdi_tree
.rb_node
;
950 struct rb_node
*parent
= NULL
;
951 struct backing_dev_info
*bdi
;
953 lockdep_assert_held(&bdi_lock
);
957 bdi
= rb_entry(parent
, struct backing_dev_info
, rb_node
);
961 else if (bdi
->id
< id
)
973 * bdi_get_by_id - lookup and get bdi from its id
974 * @id: bdi id to lookup
976 * Find bdi matching @id and get it. Returns NULL if the matching bdi
977 * doesn't exist or is already unregistered.
979 struct backing_dev_info
*bdi_get_by_id(u64 id
)
981 struct backing_dev_info
*bdi
= NULL
;
984 spin_lock_bh(&bdi_lock
);
985 p
= bdi_lookup_rb_node(id
, NULL
);
987 bdi
= rb_entry(*p
, struct backing_dev_info
, rb_node
);
990 spin_unlock_bh(&bdi_lock
);
995 int bdi_register_va(struct backing_dev_info
*bdi
, const char *fmt
, va_list args
)
998 struct rb_node
*parent
, **p
;
1000 if (bdi
->dev
) /* The driver needs to use separate queues per device */
1003 vsnprintf(bdi
->dev_name
, sizeof(bdi
->dev_name
), fmt
, args
);
1004 dev
= device_create(bdi_class
, NULL
, MKDEV(0, 0), bdi
, bdi
->dev_name
);
1006 return PTR_ERR(dev
);
1008 cgwb_bdi_register(bdi
);
1011 bdi_debug_register(bdi
, dev_name(dev
));
1012 set_bit(WB_registered
, &bdi
->wb
.state
);
1014 spin_lock_bh(&bdi_lock
);
1016 bdi
->id
= ++bdi_id_cursor
;
1018 p
= bdi_lookup_rb_node(bdi
->id
, &parent
);
1019 rb_link_node(&bdi
->rb_node
, parent
, p
);
1020 rb_insert_color(&bdi
->rb_node
, &bdi_tree
);
1022 list_add_tail_rcu(&bdi
->bdi_list
, &bdi_list
);
1024 spin_unlock_bh(&bdi_lock
);
1026 trace_writeback_bdi_register(bdi
);
1030 int bdi_register(struct backing_dev_info
*bdi
, const char *fmt
, ...)
1035 va_start(args
, fmt
);
1036 ret
= bdi_register_va(bdi
, fmt
, args
);
1040 EXPORT_SYMBOL(bdi_register
);
1042 void bdi_set_owner(struct backing_dev_info
*bdi
, struct device
*owner
)
1044 WARN_ON_ONCE(bdi
->owner
);
1050 * Remove bdi from bdi_list, and ensure that it is no longer visible
1052 static void bdi_remove_from_list(struct backing_dev_info
*bdi
)
1054 spin_lock_bh(&bdi_lock
);
1055 rb_erase(&bdi
->rb_node
, &bdi_tree
);
1056 list_del_rcu(&bdi
->bdi_list
);
1057 spin_unlock_bh(&bdi_lock
);
1059 synchronize_rcu_expedited();
1062 void bdi_unregister(struct backing_dev_info
*bdi
)
1064 del_timer_sync(&bdi
->laptop_mode_wb_timer
);
1066 /* make sure nobody finds us on the bdi_list anymore */
1067 bdi_remove_from_list(bdi
);
1068 wb_shutdown(&bdi
->wb
);
1069 cgwb_bdi_unregister(bdi
);
1072 * If this BDI's min ratio has been set, use bdi_set_min_ratio() to
1073 * update the global bdi_min_ratio.
1076 bdi_set_min_ratio(bdi
, 0);
1079 bdi_debug_unregister(bdi
);
1080 device_unregister(bdi
->dev
);
1085 put_device(bdi
->owner
);
1089 EXPORT_SYMBOL(bdi_unregister
);
1091 static void release_bdi(struct kref
*ref
)
1093 struct backing_dev_info
*bdi
=
1094 container_of(ref
, struct backing_dev_info
, refcnt
);
1096 WARN_ON_ONCE(test_bit(WB_registered
, &bdi
->wb
.state
));
1097 WARN_ON_ONCE(bdi
->dev
);
1102 void bdi_put(struct backing_dev_info
*bdi
)
1104 kref_put(&bdi
->refcnt
, release_bdi
);
1106 EXPORT_SYMBOL(bdi_put
);
1108 struct backing_dev_info
*inode_to_bdi(struct inode
*inode
)
1110 struct super_block
*sb
;
1113 return &noop_backing_dev_info
;
1117 if (sb_is_blkdev_sb(sb
))
1118 return I_BDEV(inode
)->bd_disk
->bdi
;
1122 EXPORT_SYMBOL(inode_to_bdi
);
1124 const char *bdi_dev_name(struct backing_dev_info
*bdi
)
1126 if (!bdi
|| !bdi
->dev
)
1127 return bdi_unknown_name
;
1128 return bdi
->dev_name
;
1130 EXPORT_SYMBOL_GPL(bdi_dev_name
);