1 /* SPDX-License-Identifier: GPL-2.0 */
3 * include/linux/backing-dev.h
5 * low-level device information and state which is propagated up through
9 #ifndef _LINUX_BACKING_DEV_H
10 #define _LINUX_BACKING_DEV_H
12 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/blkdev.h>
16 #include <linux/device.h>
17 #include <linux/writeback.h>
18 #include <linux/blk-cgroup.h>
19 #include <linux/backing-dev-defs.h>
20 #include <linux/slab.h>
22 static inline struct backing_dev_info
*bdi_get(struct backing_dev_info
*bdi
)
24 kref_get(&bdi
->refcnt
);
28 struct backing_dev_info
*bdi_get_by_id(u64 id
);
29 void bdi_put(struct backing_dev_info
*bdi
);
32 int bdi_register(struct backing_dev_info
*bdi
, const char *fmt
, ...);
34 int bdi_register_va(struct backing_dev_info
*bdi
, const char *fmt
,
36 int bdi_register_owner(struct backing_dev_info
*bdi
, struct device
*owner
);
37 void bdi_unregister(struct backing_dev_info
*bdi
);
39 struct backing_dev_info
*bdi_alloc_node(gfp_t gfp_mask
, int node_id
);
40 static inline struct backing_dev_info
*bdi_alloc(gfp_t gfp_mask
)
42 return bdi_alloc_node(gfp_mask
, NUMA_NO_NODE
);
45 void wb_start_background_writeback(struct bdi_writeback
*wb
);
46 void wb_workfn(struct work_struct
*work
);
47 void wb_wakeup_delayed(struct bdi_writeback
*wb
);
49 void wb_wait_for_completion(struct wb_completion
*done
);
51 extern spinlock_t bdi_lock
;
52 extern struct list_head bdi_list
;
54 extern struct workqueue_struct
*bdi_wq
;
55 extern struct workqueue_struct
*bdi_async_bio_wq
;
57 static inline bool wb_has_dirty_io(struct bdi_writeback
*wb
)
59 return test_bit(WB_has_dirty_io
, &wb
->state
);
62 static inline bool bdi_has_dirty_io(struct backing_dev_info
*bdi
)
65 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
66 * any dirty wbs. See wb_update_write_bandwidth().
68 return atomic_long_read(&bdi
->tot_write_bandwidth
);
71 static inline void __add_wb_stat(struct bdi_writeback
*wb
,
72 enum wb_stat_item item
, s64 amount
)
74 percpu_counter_add_batch(&wb
->stat
[item
], amount
, WB_STAT_BATCH
);
77 static inline void inc_wb_stat(struct bdi_writeback
*wb
, enum wb_stat_item item
)
79 __add_wb_stat(wb
, item
, 1);
82 static inline void dec_wb_stat(struct bdi_writeback
*wb
, enum wb_stat_item item
)
84 __add_wb_stat(wb
, item
, -1);
87 static inline s64
wb_stat(struct bdi_writeback
*wb
, enum wb_stat_item item
)
89 return percpu_counter_read_positive(&wb
->stat
[item
]);
92 static inline s64
wb_stat_sum(struct bdi_writeback
*wb
, enum wb_stat_item item
)
94 return percpu_counter_sum_positive(&wb
->stat
[item
]);
97 extern void wb_writeout_inc(struct bdi_writeback
*wb
);
100 * maximal error of a stat counter.
102 static inline unsigned long wb_stat_error(void)
105 return nr_cpu_ids
* WB_STAT_BATCH
;
111 int bdi_set_min_ratio(struct backing_dev_info
*bdi
, unsigned int min_ratio
);
112 int bdi_set_max_ratio(struct backing_dev_info
*bdi
, unsigned int max_ratio
);
115 * Flags in backing_dev_info::capability
117 * The first three flags control whether dirty pages will contribute to the
118 * VM's accounting and whether writepages() should be called for dirty pages
119 * (something that would not, for example, be appropriate for ramfs)
121 * WARNING: these flags are closely related and should not normally be
122 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
123 * three flags into a single convenience macro.
125 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
126 * BDI_CAP_NO_WRITEBACK: Don't write pages back
127 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
128 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
130 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
131 * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be
134 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
135 #define BDI_CAP_NO_WRITEBACK 0x00000002
136 #define BDI_CAP_NO_ACCT_WB 0x00000004
137 #define BDI_CAP_STABLE_WRITES 0x00000008
138 #define BDI_CAP_STRICTLIMIT 0x00000010
139 #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
140 #define BDI_CAP_SYNCHRONOUS_IO 0x00000040
142 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
143 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
145 extern struct backing_dev_info noop_backing_dev_info
;
148 * writeback_in_progress - determine whether there is writeback in progress
149 * @wb: bdi_writeback of interest
151 * Determine whether there is writeback waiting to be handled against a
154 static inline bool writeback_in_progress(struct bdi_writeback
*wb
)
156 return test_bit(WB_writeback_running
, &wb
->state
);
159 static inline struct backing_dev_info
*inode_to_bdi(struct inode
*inode
)
161 struct super_block
*sb
;
164 return &noop_backing_dev_info
;
168 if (sb_is_blkdev_sb(sb
))
169 return I_BDEV(inode
)->bd_bdi
;
174 static inline int wb_congested(struct bdi_writeback
*wb
, int cong_bits
)
176 struct backing_dev_info
*bdi
= wb
->bdi
;
178 if (bdi
->congested_fn
)
179 return bdi
->congested_fn(bdi
->congested_data
, cong_bits
);
180 return wb
->congested
->state
& cong_bits
;
183 long congestion_wait(int sync
, long timeout
);
184 long wait_iff_congested(int sync
, long timeout
);
186 static inline bool bdi_cap_synchronous_io(struct backing_dev_info
*bdi
)
188 return bdi
->capabilities
& BDI_CAP_SYNCHRONOUS_IO
;
191 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info
*bdi
)
193 return bdi
->capabilities
& BDI_CAP_STABLE_WRITES
;
196 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info
*bdi
)
198 return !(bdi
->capabilities
& BDI_CAP_NO_WRITEBACK
);
201 static inline bool bdi_cap_account_dirty(struct backing_dev_info
*bdi
)
203 return !(bdi
->capabilities
& BDI_CAP_NO_ACCT_DIRTY
);
206 static inline bool bdi_cap_account_writeback(struct backing_dev_info
*bdi
)
208 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
209 return !(bdi
->capabilities
& (BDI_CAP_NO_ACCT_WB
|
210 BDI_CAP_NO_WRITEBACK
));
213 static inline bool mapping_cap_writeback_dirty(struct address_space
*mapping
)
215 return bdi_cap_writeback_dirty(inode_to_bdi(mapping
->host
));
218 static inline bool mapping_cap_account_dirty(struct address_space
*mapping
)
220 return bdi_cap_account_dirty(inode_to_bdi(mapping
->host
));
223 static inline int bdi_sched_wait(void *word
)
229 #ifdef CONFIG_CGROUP_WRITEBACK
231 struct bdi_writeback_congested
*
232 wb_congested_get_create(struct backing_dev_info
*bdi
, int blkcg_id
, gfp_t gfp
);
233 void wb_congested_put(struct bdi_writeback_congested
*congested
);
234 struct bdi_writeback
*wb_get_lookup(struct backing_dev_info
*bdi
,
235 struct cgroup_subsys_state
*memcg_css
);
236 struct bdi_writeback
*wb_get_create(struct backing_dev_info
*bdi
,
237 struct cgroup_subsys_state
*memcg_css
,
239 void wb_memcg_offline(struct mem_cgroup
*memcg
);
240 void wb_blkcg_offline(struct blkcg
*blkcg
);
241 int inode_congested(struct inode
*inode
, int cong_bits
);
244 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
245 * @inode: inode of interest
247 * cgroup writeback requires support from both the bdi and filesystem.
248 * Also, both memcg and iocg have to be on the default hierarchy. Test
249 * whether all conditions are met.
251 * Note that the test result may change dynamically on the same inode
252 * depending on how memcg and iocg are configured.
254 static inline bool inode_cgwb_enabled(struct inode
*inode
)
256 struct backing_dev_info
*bdi
= inode_to_bdi(inode
);
258 return cgroup_subsys_on_dfl(memory_cgrp_subsys
) &&
259 cgroup_subsys_on_dfl(io_cgrp_subsys
) &&
260 bdi_cap_account_dirty(bdi
) &&
261 (bdi
->capabilities
& BDI_CAP_CGROUP_WRITEBACK
) &&
262 (inode
->i_sb
->s_iflags
& SB_I_CGROUPWB
);
266 * wb_find_current - find wb for %current on a bdi
267 * @bdi: bdi of interest
269 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
270 * Must be called under rcu_read_lock() which protects the returend wb.
273 static inline struct bdi_writeback
*wb_find_current(struct backing_dev_info
*bdi
)
275 struct cgroup_subsys_state
*memcg_css
;
276 struct bdi_writeback
*wb
;
278 memcg_css
= task_css(current
, memory_cgrp_id
);
279 if (!memcg_css
->parent
)
282 wb
= radix_tree_lookup(&bdi
->cgwb_tree
, memcg_css
->id
);
285 * %current's blkcg equals the effective blkcg of its memcg. No
286 * need to use the relatively expensive cgroup_get_e_css().
288 if (likely(wb
&& wb
->blkcg_css
== task_css(current
, io_cgrp_id
)))
294 * wb_get_create_current - get or create wb for %current on a bdi
295 * @bdi: bdi of interest
296 * @gfp: allocation mask
298 * Equivalent to wb_get_create() on %current's memcg. This function is
299 * called from a relatively hot path and optimizes the common cases using
302 static inline struct bdi_writeback
*
303 wb_get_create_current(struct backing_dev_info
*bdi
, gfp_t gfp
)
305 struct bdi_writeback
*wb
;
308 wb
= wb_find_current(bdi
);
309 if (wb
&& unlikely(!wb_tryget(wb
)))
314 struct cgroup_subsys_state
*memcg_css
;
316 memcg_css
= task_get_css(current
, memory_cgrp_id
);
317 wb
= wb_get_create(bdi
, memcg_css
, gfp
);
324 * inode_to_wb_is_valid - test whether an inode has a wb associated
325 * @inode: inode of interest
327 * Returns %true if @inode has a wb associated. May be called without any
330 static inline bool inode_to_wb_is_valid(struct inode
*inode
)
336 * inode_to_wb - determine the wb of an inode
337 * @inode: inode of interest
339 * Returns the wb @inode is currently associated with. The caller must be
340 * holding either @inode->i_lock, the i_pages lock, or the
341 * associated wb's list_lock.
343 static inline struct bdi_writeback
*inode_to_wb(const struct inode
*inode
)
345 #ifdef CONFIG_LOCKDEP
346 WARN_ON_ONCE(debug_locks
&&
347 (!lockdep_is_held(&inode
->i_lock
) &&
348 !lockdep_is_held(&inode
->i_mapping
->i_pages
.xa_lock
) &&
349 !lockdep_is_held(&inode
->i_wb
->list_lock
)));
355 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
356 * @inode: target inode
357 * @cookie: output param, to be passed to the end function
359 * The caller wants to access the wb associated with @inode but isn't
360 * holding inode->i_lock, the i_pages lock or wb->list_lock. This
361 * function determines the wb associated with @inode and ensures that the
362 * association doesn't change until the transaction is finished with
363 * unlocked_inode_to_wb_end().
365 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
366 * can't sleep during the transaction. IRQs may or may not be disabled on
369 static inline struct bdi_writeback
*
370 unlocked_inode_to_wb_begin(struct inode
*inode
, struct wb_lock_cookie
*cookie
)
375 * Paired with store_release in inode_switch_wbs_work_fn() and
376 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
378 cookie
->locked
= smp_load_acquire(&inode
->i_state
) & I_WB_SWITCH
;
380 if (unlikely(cookie
->locked
))
381 xa_lock_irqsave(&inode
->i_mapping
->i_pages
, cookie
->flags
);
384 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
385 * lock. inode_to_wb() will bark. Deref directly.
391 * unlocked_inode_to_wb_end - end inode wb access transaction
392 * @inode: target inode
393 * @cookie: @cookie from unlocked_inode_to_wb_begin()
395 static inline void unlocked_inode_to_wb_end(struct inode
*inode
,
396 struct wb_lock_cookie
*cookie
)
398 if (unlikely(cookie
->locked
))
399 xa_unlock_irqrestore(&inode
->i_mapping
->i_pages
, cookie
->flags
);
404 #else /* CONFIG_CGROUP_WRITEBACK */
406 static inline bool inode_cgwb_enabled(struct inode
*inode
)
411 static inline struct bdi_writeback_congested
*
412 wb_congested_get_create(struct backing_dev_info
*bdi
, int blkcg_id
, gfp_t gfp
)
414 refcount_inc(&bdi
->wb_congested
->refcnt
);
415 return bdi
->wb_congested
;
418 static inline void wb_congested_put(struct bdi_writeback_congested
*congested
)
420 if (refcount_dec_and_test(&congested
->refcnt
))
424 static inline struct bdi_writeback
*wb_find_current(struct backing_dev_info
*bdi
)
429 static inline struct bdi_writeback
*
430 wb_get_create_current(struct backing_dev_info
*bdi
, gfp_t gfp
)
435 static inline bool inode_to_wb_is_valid(struct inode
*inode
)
440 static inline struct bdi_writeback
*inode_to_wb(struct inode
*inode
)
442 return &inode_to_bdi(inode
)->wb
;
445 static inline struct bdi_writeback
*
446 unlocked_inode_to_wb_begin(struct inode
*inode
, struct wb_lock_cookie
*cookie
)
448 return inode_to_wb(inode
);
451 static inline void unlocked_inode_to_wb_end(struct inode
*inode
,
452 struct wb_lock_cookie
*cookie
)
456 static inline void wb_memcg_offline(struct mem_cgroup
*memcg
)
460 static inline void wb_blkcg_offline(struct blkcg
*blkcg
)
464 static inline int inode_congested(struct inode
*inode
, int cong_bits
)
466 return wb_congested(&inode_to_bdi(inode
)->wb
, cong_bits
);
469 #endif /* CONFIG_CGROUP_WRITEBACK */
471 static inline int inode_read_congested(struct inode
*inode
)
473 return inode_congested(inode
, 1 << WB_sync_congested
);
476 static inline int inode_write_congested(struct inode
*inode
)
478 return inode_congested(inode
, 1 << WB_async_congested
);
481 static inline int inode_rw_congested(struct inode
*inode
)
483 return inode_congested(inode
, (1 << WB_sync_congested
) |
484 (1 << WB_async_congested
));
487 static inline int bdi_congested(struct backing_dev_info
*bdi
, int cong_bits
)
489 return wb_congested(&bdi
->wb
, cong_bits
);
492 static inline int bdi_read_congested(struct backing_dev_info
*bdi
)
494 return bdi_congested(bdi
, 1 << WB_sync_congested
);
497 static inline int bdi_write_congested(struct backing_dev_info
*bdi
)
499 return bdi_congested(bdi
, 1 << WB_async_congested
);
502 static inline int bdi_rw_congested(struct backing_dev_info
*bdi
)
504 return bdi_congested(bdi
, (1 << WB_sync_congested
) |
505 (1 << WB_async_congested
));
508 extern const char *bdi_unknown_name
;
510 static inline const char *bdi_dev_name(struct backing_dev_info
*bdi
)
512 if (!bdi
|| !bdi
->dev
)
513 return bdi_unknown_name
;
514 return dev_name(bdi
->dev
);
517 #endif /* _LINUX_BACKING_DEV_H */