]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
66114cad TH |
2 | #ifndef __LINUX_BACKING_DEV_DEFS_H |
3 | #define __LINUX_BACKING_DEV_DEFS_H | |
4 | ||
5 | #include <linux/list.h> | |
52ebea74 TH |
6 | #include <linux/radix-tree.h> |
7 | #include <linux/rbtree.h> | |
66114cad TH |
8 | #include <linux/spinlock.h> |
9 | #include <linux/percpu_counter.h> | |
52ebea74 | 10 | #include <linux/percpu-refcount.h> |
66114cad TH |
11 | #include <linux/flex_proportions.h> |
12 | #include <linux/timer.h> | |
13 | #include <linux/workqueue.h> | |
d03f6cdc | 14 | #include <linux/kref.h> |
e58dd0de | 15 | #include <linux/refcount.h> |
66114cad TH |
16 | |
17 | struct page; | |
18 | struct device; | |
19 | struct dentry; | |
20 | ||
21 | /* | |
22 | * Bits in bdi_writeback.state | |
23 | */ | |
24 | enum wb_state { | |
66114cad TH |
25 | WB_registered, /* bdi_register() was done */ |
26 | WB_writeback_running, /* Writeback is in progress */ | |
d6c10f1f | 27 | WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */ |
aac8d41c | 28 | WB_start_all, /* nr_pages == 0 (all) work pending */ |
66114cad TH |
29 | }; |
30 | ||
4aa9c692 TH |
31 | enum wb_congested_state { |
32 | WB_async_congested, /* The async (write) queue is getting full */ | |
33 | WB_sync_congested, /* The sync queue is getting full */ | |
34 | }; | |
35 | ||
66114cad TH |
36 | typedef int (congested_fn)(void *, int); |
37 | ||
38 | enum wb_stat_item { | |
39 | WB_RECLAIMABLE, | |
40 | WB_WRITEBACK, | |
41 | WB_DIRTIED, | |
42 | WB_WRITTEN, | |
43 | NR_WB_STAT_ITEMS | |
44 | }; | |
45 | ||
46 | #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) | |
47 | ||
85009b4f JA |
48 | /* |
49 | * why some writeback work was initiated | |
50 | */ | |
51 | enum wb_reason { | |
52 | WB_REASON_BACKGROUND, | |
53 | WB_REASON_VMSCAN, | |
54 | WB_REASON_SYNC, | |
55 | WB_REASON_PERIODIC, | |
56 | WB_REASON_LAPTOP_TIMER, | |
85009b4f JA |
57 | WB_REASON_FS_FREE_SPACE, |
58 | /* | |
59 | * There is no bdi forker thread any more and works are done | |
60 | * by emergency worker, however, this is TPs userland visible | |
61 | * and we'll be exposing exactly the same information, | |
62 | * so it has a mismatch name. | |
63 | */ | |
64 | WB_REASON_FORKER_THREAD, | |
97b27821 | 65 | WB_REASON_FOREIGN_FLUSH, |
85009b4f JA |
66 | |
67 | WB_REASON_MAX, | |
68 | }; | |
69 | ||
5b9cce4c TH |
70 | struct wb_completion { |
71 | atomic_t cnt; | |
72 | wait_queue_head_t *waitq; | |
73 | }; | |
74 | ||
75 | #define __WB_COMPLETION_INIT(_waitq) \ | |
76 | (struct wb_completion){ .cnt = ATOMIC_INIT(1), .waitq = (_waitq) } | |
77 | ||
78 | /* | |
79 | * If one wants to wait for one or more wb_writeback_works, each work's | |
80 | * ->done should be set to a wb_completion defined using the following | |
81 | * macro. Once all work items are issued with wb_queue_work(), the caller | |
82 | * can wait for the completion of all using wb_wait_for_completion(). Work | |
83 | * items which are waited upon aren't freed automatically on completion. | |
84 | */ | |
85 | #define WB_COMPLETION_INIT(bdi) __WB_COMPLETION_INIT(&(bdi)->wb_waitq) | |
86 | ||
87 | #define DEFINE_WB_COMPLETION(cmpl, bdi) \ | |
88 | struct wb_completion cmpl = WB_COMPLETION_INIT(bdi) | |
89 | ||
52ebea74 TH |
90 | /* |
91 | * For cgroup writeback, multiple wb's may map to the same blkcg. Those | |
92 | * wb's can operate mostly independently but should share the congested | |
93 | * state. To facilitate such sharing, the congested state is tracked using | |
94 | * the following struct which is created on demand, indexed by blkcg ID on | |
95 | * its bdi, and refcounted. | |
96 | */ | |
4aa9c692 TH |
97 | struct bdi_writeback_congested { |
98 | unsigned long state; /* WB_[a]sync_congested flags */ | |
e58dd0de | 99 | refcount_t refcnt; /* nr of attached wb's and blkg */ |
52ebea74 TH |
100 | |
101 | #ifdef CONFIG_CGROUP_WRITEBACK | |
b7d680d7 JK |
102 | struct backing_dev_info *__bdi; /* the associated bdi, set to NULL |
103 | * on bdi unregistration. For memcg-wb | |
104 | * internal use only! */ | |
52ebea74 TH |
105 | int blkcg_id; /* ID of the associated blkcg */ |
106 | struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */ | |
107 | #endif | |
4aa9c692 TH |
108 | }; |
109 | ||
52ebea74 TH |
110 | /* |
111 | * Each wb (bdi_writeback) can perform writeback operations, is measured | |
112 | * and throttled, independently. Without cgroup writeback, each bdi | |
113 | * (bdi_writeback) is served by its embedded bdi->wb. | |
114 | * | |
115 | * On the default hierarchy, blkcg implicitly enables memcg. This allows | |
116 | * using memcg's page ownership for attributing writeback IOs, and every | |
117 | * memcg - blkcg combination can be served by its own wb by assigning a | |
118 | * dedicated wb to each memcg, which enables isolation across different | |
119 | * cgroups and propagation of IO back pressure down from the IO layer upto | |
120 | * the tasks which are generating the dirty pages to be written back. | |
121 | * | |
122 | * A cgroup wb is indexed on its bdi by the ID of the associated memcg, | |
123 | * refcounted with the number of inodes attached to it, and pins the memcg | |
124 | * and the corresponding blkcg. As the corresponding blkcg for a memcg may | |
125 | * change as blkcg is disabled and enabled higher up in the hierarchy, a wb | |
126 | * is tested for blkcg after lookup and removed from index on mismatch so | |
127 | * that a new wb for the combination can be created. | |
128 | */ | |
66114cad TH |
129 | struct bdi_writeback { |
130 | struct backing_dev_info *bdi; /* our parent bdi */ | |
131 | ||
132 | unsigned long state; /* Always use atomic bitops on this */ | |
133 | unsigned long last_old_flush; /* last old data flush */ | |
134 | ||
135 | struct list_head b_dirty; /* dirty inodes */ | |
136 | struct list_head b_io; /* parked for writeback */ | |
137 | struct list_head b_more_io; /* parked for more writeback */ | |
138 | struct list_head b_dirty_time; /* time stamps are dirty */ | |
139 | spinlock_t list_lock; /* protects the b_* lists */ | |
140 | ||
141 | struct percpu_counter stat[NR_WB_STAT_ITEMS]; | |
142 | ||
4aa9c692 TH |
143 | struct bdi_writeback_congested *congested; |
144 | ||
66114cad TH |
145 | unsigned long bw_time_stamp; /* last time write bw is updated */ |
146 | unsigned long dirtied_stamp; | |
147 | unsigned long written_stamp; /* pages written at bw_time_stamp */ | |
148 | unsigned long write_bandwidth; /* the estimated write bandwidth */ | |
95a46c65 | 149 | unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */ |
66114cad TH |
150 | |
151 | /* | |
152 | * The base dirty throttle rate, re-calculated on every 200ms. | |
153 | * All the bdi tasks' dirty rate will be curbed under it. | |
154 | * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit | |
155 | * in small steps and is much more smooth/stable than the latter. | |
156 | */ | |
157 | unsigned long dirty_ratelimit; | |
158 | unsigned long balanced_dirty_ratelimit; | |
159 | ||
160 | struct fprop_local_percpu completions; | |
161 | int dirty_exceeded; | |
85009b4f | 162 | enum wb_reason start_all_reason; |
66114cad TH |
163 | |
164 | spinlock_t work_lock; /* protects work_list & dwork scheduling */ | |
165 | struct list_head work_list; | |
166 | struct delayed_work dwork; /* work item used for writeback */ | |
52ebea74 | 167 | |
b57d74af JA |
168 | unsigned long dirty_sleep; /* last wait */ |
169 | ||
b817525a TH |
170 | struct list_head bdi_node; /* anchored at bdi->wb_list */ |
171 | ||
52ebea74 TH |
172 | #ifdef CONFIG_CGROUP_WRITEBACK |
173 | struct percpu_ref refcnt; /* used only for !root wb's */ | |
841710aa | 174 | struct fprop_local_percpu memcg_completions; |
52ebea74 TH |
175 | struct cgroup_subsys_state *memcg_css; /* the associated memcg */ |
176 | struct cgroup_subsys_state *blkcg_css; /* and blkcg */ | |
177 | struct list_head memcg_node; /* anchored at memcg->cgwb_list */ | |
178 | struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */ | |
179 | ||
180 | union { | |
181 | struct work_struct release_work; | |
182 | struct rcu_head rcu; | |
183 | }; | |
184 | #endif | |
66114cad TH |
185 | }; |
186 | ||
187 | struct backing_dev_info { | |
34f8fe50 TH |
188 | u64 id; |
189 | struct rb_node rb_node; /* keyed by ->id */ | |
66114cad | 190 | struct list_head bdi_list; |
ea1754a0 | 191 | unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ |
9491ae4a | 192 | unsigned long io_pages; /* max allowed IO size */ |
66114cad TH |
193 | congested_fn *congested_fn; /* Function pointer if device is md/dm */ |
194 | void *congested_data; /* Pointer to aux data for congested func */ | |
195 | ||
fca39346 | 196 | const char *name; |
66114cad | 197 | |
d03f6cdc | 198 | struct kref refcnt; /* Reference counter for the structure */ |
8db378a5 | 199 | unsigned int capabilities; /* Device capabilities */ |
66114cad TH |
200 | unsigned int min_ratio; |
201 | unsigned int max_ratio, max_prop_frac; | |
202 | ||
95a46c65 TH |
203 | /* |
204 | * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are | |
205 | * any dirty wbs, which is depended upon by bdi_has_dirty(). | |
206 | */ | |
207 | atomic_long_t tot_write_bandwidth; | |
766a9d6e | 208 | |
52ebea74 | 209 | struct bdi_writeback wb; /* the root writeback info for this bdi */ |
b817525a | 210 | struct list_head wb_list; /* list of all wbs */ |
52ebea74 TH |
211 | #ifdef CONFIG_CGROUP_WRITEBACK |
212 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ | |
213 | struct rb_root cgwb_congested_tree; /* their congested states */ | |
3ee7e869 | 214 | struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ |
7fc5854f | 215 | struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ |
a13f35e8 TH |
216 | #else |
217 | struct bdi_writeback_congested *wb_congested; | |
52ebea74 | 218 | #endif |
cc395d7f TH |
219 | wait_queue_head_t wb_waitq; |
220 | ||
66114cad | 221 | struct device *dev; |
6bd87eec | 222 | char dev_name[64]; |
df08c32c | 223 | struct device *owner; |
66114cad TH |
224 | |
225 | struct timer_list laptop_mode_wb_timer; | |
226 | ||
227 | #ifdef CONFIG_DEBUG_FS | |
228 | struct dentry *debug_dir; | |
66114cad TH |
229 | #endif |
230 | }; | |
231 | ||
232 | enum { | |
233 | BLK_RW_ASYNC = 0, | |
234 | BLK_RW_SYNC = 1, | |
235 | }; | |
236 | ||
ec8a6f26 TH |
237 | void clear_wb_congested(struct bdi_writeback_congested *congested, int sync); |
238 | void set_wb_congested(struct bdi_writeback_congested *congested, int sync); | |
239 | ||
240 | static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync) | |
241 | { | |
242 | clear_wb_congested(bdi->wb.congested, sync); | |
243 | } | |
244 | ||
245 | static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) | |
246 | { | |
247 | set_wb_congested(bdi->wb.congested, sync); | |
248 | } | |
66114cad | 249 | |
2e898e4c GT |
250 | struct wb_lock_cookie { |
251 | bool locked; | |
252 | unsigned long flags; | |
253 | }; | |
254 | ||
21c6321f TH |
255 | #ifdef CONFIG_CGROUP_WRITEBACK |
256 | ||
257 | /** | |
258 | * wb_tryget - try to increment a wb's refcount | |
259 | * @wb: bdi_writeback to get | |
260 | */ | |
261 | static inline bool wb_tryget(struct bdi_writeback *wb) | |
262 | { | |
263 | if (wb != &wb->bdi->wb) | |
264 | return percpu_ref_tryget(&wb->refcnt); | |
265 | return true; | |
266 | } | |
267 | ||
268 | /** | |
269 | * wb_get - increment a wb's refcount | |
270 | * @wb: bdi_writeback to get | |
271 | */ | |
272 | static inline void wb_get(struct bdi_writeback *wb) | |
273 | { | |
274 | if (wb != &wb->bdi->wb) | |
275 | percpu_ref_get(&wb->refcnt); | |
276 | } | |
277 | ||
278 | /** | |
279 | * wb_put - decrement a wb's refcount | |
280 | * @wb: bdi_writeback to put | |
281 | */ | |
282 | static inline void wb_put(struct bdi_writeback *wb) | |
283 | { | |
368686a9 AR |
284 | if (WARN_ON_ONCE(!wb->bdi)) { |
285 | /* | |
286 | * A driver bug might cause a file to be removed before bdi was | |
287 | * initialized. | |
288 | */ | |
289 | return; | |
290 | } | |
291 | ||
21c6321f TH |
292 | if (wb != &wb->bdi->wb) |
293 | percpu_ref_put(&wb->refcnt); | |
294 | } | |
295 | ||
e8a7abf5 TH |
296 | /** |
297 | * wb_dying - is a wb dying? | |
298 | * @wb: bdi_writeback of interest | |
299 | * | |
300 | * Returns whether @wb is unlinked and being drained. | |
301 | */ | |
302 | static inline bool wb_dying(struct bdi_writeback *wb) | |
303 | { | |
304 | return percpu_ref_is_dying(&wb->refcnt); | |
305 | } | |
306 | ||
21c6321f TH |
307 | #else /* CONFIG_CGROUP_WRITEBACK */ |
308 | ||
309 | static inline bool wb_tryget(struct bdi_writeback *wb) | |
310 | { | |
311 | return true; | |
312 | } | |
313 | ||
314 | static inline void wb_get(struct bdi_writeback *wb) | |
315 | { | |
316 | } | |
317 | ||
318 | static inline void wb_put(struct bdi_writeback *wb) | |
319 | { | |
320 | } | |
321 | ||
e8a7abf5 TH |
322 | static inline bool wb_dying(struct bdi_writeback *wb) |
323 | { | |
324 | return false; | |
325 | } | |
326 | ||
21c6321f TH |
327 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
328 | ||
66114cad | 329 | #endif /* __LINUX_BACKING_DEV_DEFS_H */ |